repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
gonadarian/kagen
kagen/khan.py
1
1955
import os import csv import json import pymongo from kagen import utils from kagen.utils import config from datetime import datetime logger = utils.get_logger("khan") def work(): khan = utils.get_conn_khan() db = utils.get_conn_mongo() dtf = "%Y-%m-%dT%H:%M:%SZ" doc = utils.get_response_json(khan, "/api/v1/playlists") for item in doc: item["_id"] = item["id"] for playlist in doc: playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf) db.playlists.drop() db.playlists.insert(doc) logger.info("loaded {} items in playlists collection".format(len(doc))) doc = utils.get_response_json(khan, "/api/v1/playlists/library") db.playlists_library.drop() db.playlists_library.insert(doc) logger.info("loaded {} items in playlists_library collection".format(len(doc))) doc = utils.get_response_json(khan, "/api/v1/playlists/library/list") for playlist in doc: playlist["_id"] = playlist["id"] playlist["backup_timestamp"] = datetime.strptime(playlist["backup_timestamp"], dtf) db.playlists_library_list.drop() db.playlists_library_list.insert(doc) logger.info("loaded {} items in playlists_library_list collection".format(len(doc))) videos = [] ids = [] for playlist in doc: for video in playlist["videos"]: video_id = video["id"] if video_id not in ids: video["_id"] = video_id videos.append(video) ids.append(video_id) video["date_added"] = datetime.strptime(video["date_added"], dtf) video["backup_timestamp"] = datetime.strptime(video["backup_timestamp"], dtf) db.video_list.drop() db.video_list.insert(videos) logger.info("loaded {} items in video_list collection".format(len(videos))) @utils.entry_point def main(): logger.info("START khan") work() logger.info("DONE khan")
mit
5,795,363,018,765,925,000
31.583333
91
0.640409
false
3.528881
false
false
false
irvined1982/olweb-clients
bin/bkill.py
1
3674
#!/usr/bin/env python # Copyright 2014 David Irvine # # This file is part of olwclients # # olwclients is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or (at # your option) any later version. # # olwclients is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with olwclients. If not, see <http://www.gnu.org/licenses/>. import argparse from olwclient import * import getpass import re import sys parser = argparse.ArgumentParser(description='Displays information about hosts') OpenLavaConnection.configure_argument_list(parser) parser.add_argument("-J", dest="job_name", default=None, help="Operates only on jobs with the specified job_name. The -J option is ignored if a job ID \ other than 0 is specified in the job_ID option.") parser.add_argument("-m", dest="host_name", default=None, help="Operates only on jobs dispatched to the specified host or host group.") parser.add_argument("-q", dest="queue_name", default=None, help="Operates only on jobs in the specified queue.") parser.add_argument("-u", dest="user_name", default=getpass.getuser(), help="Operates only on jobs submitted by the specified user or user group (see bugroup(1)), or by \ all users if the reserved user name all is specified.") parser.add_argument("job_ids", nargs='+', type=str, default=None, help='Operates only on jobs that are specified by job_ID or "job_ID[index]", where \ "job_ID[index]" specifies selected job array elements (see bjobs(1)). For job arrays, quotation \ marks must enclose the job ID and index, and index must be enclosed in square brackets.') parser.add_argument("-s", dest="signal", default="kill", choices=["kill", "suspend", "resume", "requeue"], help="Sends the specified signal to specified jobs. Signals can be one of: kill, suspend, resume, \ requeue,") args = parser.parse_args() connection = OpenLavaConnection(args) if 0 in args.job_ids or "0" in args.job_ids: jobs = Job.get_job_list(connection, user_name=args.user_name, host_name=args.host_name, queue_name=args.queue_name, job_name=args.job_name, ) else: jobs = [] for job_id in args.job_ids: try: jid = int(job_id) aid = 0 except ValueError: match = re.search('\d+\[\d+\]', job_id) if match: jid = match.group(0) aid = match.group(1) else: print "Invalid job id: %s" % job_id sys.exit(1) jobs.append(Job(connection, job_id=jid, array_index=aid)) try: for job in jobs: try: print "Sending %s signal to job: %s[%s]" % (args.signal, job.job_id, job.array_index) getattr(job, args.signal)() except PermissionDeniedError, e: print "Unable to perform action on job: %s[%s]: %s" % (job.job_id, job.array_index, e.message) except RemoteServerError, e: print "Unable to display job information: %s" % e.message sys.exit(1)
gpl-2.0
4,289,308,900,084,577,300
42.223529
119
0.619488
false
3.963323
false
false
false
panosl/helios
helios/orders/forms.py
1
1114
from django import forms from helios.shipping.models import ShippingMethodRegions class ShippingChoiceField(forms.ModelChoiceField): def label_from_instance(self, obj): return u'%s, %s - %s' % (obj.method.name, obj.method.shipper, obj.cost) # todo this needs to be handled either here # or in the checkout view in the store app class ShippingOrderForm(forms.Form): def __init__(self, customer, *args, **kwargs): super(ShippingOrderForm, self).__init__(*args, **kwargs) methods = [region.shippingmethodregions_set.all() for region in customer.country.shippingregion_set.all()] methods = [method[0] for method in methods] self.fields['shipping_choice'].queryset = ShippingMethodRegions.objects.filter(id__in=[method.id for method in methods]) shipping_choice = ShippingChoiceField( queryset=ShippingMethodRegions.objects.all(), empty_label=None, widget=forms.RadioSelect(attrs={ 'class': 'order', 'onclick': '$("#shipping_choice").submit()', }) ) class OrderForm(forms.Form): pass
bsd-3-clause
1,921,637,111,047,108,400
34.935484
128
0.670557
false
3.776271
false
false
false
buaawp/pums
mock/migrations/0001_initial.py
1
3879
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='LtMockModule', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(default='module', max_length=128)), ('description', models.CharField(max_length=1024, blank=True)), ], ), migrations.CreateModel( name='LtMockProject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(default='project', max_length=128)), ('description', models.CharField(max_length=1024, blank=True)), ('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.CreateModel( name='LtMockRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(default='defaultName', max_length=128)), ('method', models.CharField(default='GET', max_length=20)), ('address', models.CharField(default='defaultUrl', max_length=2048)), ('params', models.CharField(max_length=1648, blank=True)), ('module', models.ForeignKey(to='mock.LtMockModule')), ('project', models.ForeignKey(to='mock.LtMockProject')), ], ), migrations.CreateModel( name='LtMockRequestHeader', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('key', models.CharField(default='defaultkey', max_length=128)), ('value', models.CharField(max_length=1024, blank=True)), ], ), migrations.CreateModel( name='LtMockRequestParam', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('key', models.CharField(default='defaultkey', max_length=128)), ('value', models.CharField(max_length=1024, blank=True)), ], ), migrations.CreateModel( name='LtMockResponse', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(default='defaultresponse', max_length=128)), ('template', models.CharField(max_length=2048, blank=True)), ('sample', models.CharField(max_length=2048, blank=True)), ], ), migrations.AddField( model_name='ltmockrequest', name='requestheader', field=models.ForeignKey(blank=True, to='mock.LtMockRequestHeader', null=True), ), migrations.AddField( model_name='ltmockrequest', name='requestparam', field=models.ForeignKey(blank=True, to='mock.LtMockRequestParam', null=True), ), migrations.AddField( model_name='ltmockrequest', name='response', field=models.ForeignKey(blank=True, to='mock.LtMockResponse', null=True), ), migrations.AddField( model_name='ltmockmodule', name='project', field=models.ForeignKey(to='mock.LtMockProject'), ), ]
mit
6,251,896,528,812,043,000
42.58427
114
0.564836
false
4.453502
false
false
false
cossatot/culpable
culpable/magnitudes.py
1
22751
import numpy as np from .stats import Pdf, pdf_from_samples, multiply_pdfs, divide_pdfs """ Scaling relationships and related equations for earthquake magnitude calculations. """ """ Normalized slip distribution from Biasi and Weldon, 2006 """ Dn_x = np.array( [ 0. , 0.03852144, 0.07704287, 0.11556431, 0.15408574, 0.19260718, 0.23112861, 0.26965005, 0.30817149, 0.34669292, 0.38521436, 0.42373579, 0.46225723, 0.50077866, 0.5393001 , 0.57782153, 0.61634297, 0.65486441, 0.69338584, 0.73190728, 0.77042871, 0.80895015, 0.84747158, 0.88599302, 0.92451446, 0.96303589, 1.00155733, 1.04007876, 1.0786002 , 1.11712163, 1.15564307, 1.19416451, 1.23268594, 1.27120738, 1.30972881, 1.34825025, 1.38677168, 1.42529312, 1.46381456, 1.50233599, 1.54085743, 1.57937886, 1.6179003 , 1.65642173, 1.69494317, 1.7334646 , 1.77198604, 1.81050748, 1.84902891, 1.88755035, 1.92607178, 1.96459322, 2.00311465, 2.04163609, 2.08015753, 2.11867896, 2.1572004 , 2.19572183, 2.23424327, 2.2727647 , 2.31128614, 2.34980758, 2.38832901, 2.42685045, 2.46537188, 2.50389332, 2.54241475, 2.58093619, 2.61945762, 2.65797906, 2.6965005 , 2.73502193, 2.77354337, 2.8120648 , 2.85058624, 2.88910767, 2.92762911, 2.96615055, 3.00467198, 3.04319342, 3.08171485, 3.12023629, 3.15875772, 3.19727916, 3.2358006 , 3.27432203, 3.31284347, 3.3513649 , 3.38988634, 3.42840777, 3.46692921, 3.50545064, 3.54397208, 3.58249352, 3.62101495, 3.65953639, 3.69805782, 3.73657926, 3.77510069, 3.81362213]) Dn_y = np.array( [ 3.56431234e-01, 4.07514412e-01, 4.49469325e-01, 4.80250978e-01, 4.99600050e-01, 5.08967345e-01, 5.11056831e-01, 5.09135209e-01, 5.06305810e-01, 5.04929021e-01, 5.06305202e-01, 5.10647854e-01, 5.17294850e-01, 5.25056042e-01, 5.32585263e-01, 5.38688051e-01, 5.42518154e-01, 5.43657945e-01, 5.42107125e-01, 5.38215229e-01, 5.32589131e-01, 5.25993774e-01, 5.19250549e-01, 5.13129949e-01, 5.08236899e-01, 5.04898081e-01, 5.03074847e-01, 5.02334004e-01, 5.01903866e-01, 5.00822254e-01, 4.98152675e-01, 4.93216557e-01, 4.85776256e-01, 4.76112653e-01, 4.64970884e-01, 4.53387277e-01, 4.42445033e-01, 4.33023117e-01, 4.25598012e-01, 4.20136711e-01, 4.16092401e-01, 4.12492219e-01, 4.08093894e-01, 4.01583982e-01, 3.91790171e-01, 3.77880214e-01, 3.59519131e-01, 3.36956396e-01, 3.11019404e-01, 2.83002312e-01, 2.54461304e-01, 2.26954105e-01, 2.01783046e-01, 1.79805426e-01, 1.61356306e-01, 1.46292387e-01, 1.34126853e-01, 1.24201482e-01, 1.15842979e-01, 1.08470898e-01, 1.01650879e-01, 9.51051805e-02, 8.86970782e-02, 8.24006991e-02, 7.62618151e-02, 7.03540397e-02, 6.47382510e-02, 5.94357659e-02, 5.44230300e-02, 4.96471997e-02, 4.50527124e-02, 4.06047119e-02, 3.62987575e-02, 3.21550847e-02, 2.82040784e-02, 2.44727150e-02, 2.09786579e-02, 1.77325398e-02, 1.47440829e-02, 1.20266593e-02, 9.59725861e-03, 7.47225770e-03, 5.66159378e-03, 4.16411755e-03, 2.96568107e-03, 2.04006393e-03, 1.35194170e-03, 8.60866657e-04, 5.25372416e-04, 3.06545806e-04, 1.70626053e-04, 9.04155999e-05, 4.55329491e-05, 2.17590136e-05, 9.85449333e-06, 4.22528115e-06, 1.71367970e-06, 6.56980895e-07, 2.37946616e-07, 8.13790788e-08]) Dn = Pdf(Dn_x, Dn_y) Dn_sb = multiply_pdfs(Dn, Pdf([Dn_x.min(), Dn_x.max()], [Dn_x.min(), Dn_x.max()])) """ Probability distribution for an earthquake breaking the surface given Gutenberg-Richter prior; to be used as a p(M) prior for paleoseismic magnitudes from Biasi and Weldon 2006 """ gr_pm_x = [5.000, 5.001, 5.057, 5.097, 5.192, 5.300, 5.392, 5.499, 5.597, 5.753, 5.922, 6.021, 6.211, 6.353, 6.533, 6.604, 6.771, 6.999, 7.280, 7.507, 7.726, 7.953, 8.182] gr_pm_y = [0.000, 0.030, 0.050, 0.063, 0.081, 0.089, 0.089, 0.085, 0.079, 0.067, 0.054, 0.047, 0.035, 0.027, 0.020, 0.018, 0.013, 0.008, 0.005, 0.003, 0.002, 9.785e-4, 0.00] """ Conversion functions """ def _exp_10(x): return 10**x log_fn = {'e': np.log, '10': np.log10} exp_fn = {'e': np.exp, '10': _exp_10} M_from_D_coeffs = {'BW_2006': {'a': 6.94, 'b': 1.14, 'log_base': '10'}, # WC_1994 are for Average Displacement, not max. 'WC_1994_all': {'a': 6.93, 'b': 0.82, 'log_base': '10'}, 'WC_1994_SS': {'a': 7.04, 'b': 0.89, 'log_base': '10'}, 'WC_1994_R': {'a': 6.64, 'b': 0.13, 'log_base': '10'}, 'WC_1994_N': {'a': 6.78, 'b': 0.65, 'log_base': '10'}, } M_from_L_coeffs = {'Stirling_2002_instr': {'a': 5.45, 'a_err': 0.08, 'b': 0.95, 'b_err': 0.06, 'log_base': '10'}, 'Stirling_2002_pre_instr': {'a': 5.89, 'a_err': 0.11, 'b': 0.79, 'b_err': 0.06, 'log_base': '10'}, 'WC_1994_all': {'a': 5.08, 'a_err': 0.1, 'b': 1.16, 'b_err': 0.07, 'log_base': '10'}, 'WC_1994_SS': {'a': 5.16, 'a_err': 0.13, 'b': 1.12, 'b_err': 0.08, 'log_base': '10'}, 'WC_1994_R': {'a': 5.00, 'a_err': 0.22, 'b': 1.22, 'b_err': 0.16, 'log_base': '10'}, 'WC_1994_N': {'a': 4.86, 'a_err': 0.34, 'b': 1.32, 'b_err': 0.26, 'log_base': '10'}, } def M_from_D(D, ref='BW_2006', a=None, b=None, base='e'): """ Moment magnitude from displacement, using the specified scaling (keyword 'ref', or parameters 'a', 'b' and 'log'. General relationship is M = a + b * log(D). Parameters ---------- D : Scalar or vector values for displacement (in meters) ref : string indicating scaling relationship. 'BW_2006' is Biasi and Weldon (2006) (default). 'WC_1994_all' is Wells and Coppersmith (1994) for all events. 'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events. 'WC_1994_R' is Wells and Coppersmith (1994) for reverse events. 'WC_1994_N' is Wells and Coppersmith (1994) for normal events. `ref=None` will allow you to enter your own coefficients and base. a : Scalar, or vector of same length as D. b : Scalar, or vector of same length as D. base : String, base for logarithm, default 'e'. 'e' is natural log. '10' is log10. Returns ------- M : Scalar or vector of calculated magnitude, with shape of D. """ if ref is not None: # consider warning if ref is not None and a, b, log are inputs a = M_from_D_coeffs[ref]['a'] b = M_from_D_coeffs[ref]['b'] base = M_from_D_coeffs[ref]['log_base'] else: pass return a + b * log_fn[base](D) def D_from_M(M, ref='BW_2006', a=None, b=None, base='e'): """ Moment magnitude from displacement, using the specified scaling (keyword 'ref', or parameters 'a', 'b' and 'base'. General relationship is D = base ** ((M - a) / b) Parameters ---------- M : Scalar or vector values for moment magnitude ref : string indicating scaling relationship. 'BW_2006' is Biasi and Weldon (2006) (default). 'WC_1994_all' is Wells and Coppersmith (1994) for all events. 'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events. 'WC_1994_R' is Wells and Coppersmith (1994) for reverse events. 'WC_1994_N' is Wells and Coppersmith (1994) for normal events. `ref=None` will allow you to enter your own coefficients and base. a : Scalar, or vector of same length as M. b : Scalar, or vector of same length as M. base : String, base for exponent, default 'e'. 'e' is e. '10' is 10. Returns ------- D : Scalar or vector of calculated displacement (in meters), with shape of M. """ if ref is not None: a = M_from_D_coeffs[ref]['a'] b = M_from_D_coeffs[ref]['b'] base = M_from_D_coeffs[ref]['log_base'] return exp_fn[base]((M - a) / b) def M_from_L(L, ref='Stirling_2002_instr', unit='km', a=None, b=None, base='e', a_err=None, b_err=None, mc=False): """ Moment magnitude from length, using the specified scaling (keyword 'ref', or parameters 'a', 'b' and 'log'. General relationship is M = a + b * log(D). Parameters ---------- D : Scalar or vector values for displacement (in meters) ref : string indicating scaling relationship. 'Stirling_2002_instr' is from Stirling et al. 2002, instrumental data. 'WC_1994_all' is Wells and Coppersmith (1994) for all events. 'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events. 'WC_1994_R' is Wells and Coppersmith (1994) for reverse events. 'WC_1994_N' is Wells and Coppersmith (1994) for normal events. `ref=None` will allow you to enter your own coefficients and base. unit : Unit of length measure. Default is 'km'. 'm' also works. a : Scalar, or vector of same length as D. a_err : Standard error of `a`. Scalar. b : Scalar, or vector of same length as D. b_err : Standard error of `b`. Scalar. log : String, base for logarithm, default 'e'. 'e' is natural log. '10' is log10. mc : Boolean that indicates whether to sample the coefficents a and b including uncertainties `a_err` and `b_err` through Monte Carlo techniques. Returns ------- M : Scalar or vector of calculated magnitude, with shape of L. """ # unit conversion if unit == 'm': L = L * 1000. if ref is not None: a = M_from_L_coeffs[ref]['a'] b = M_from_L_coeffs[ref]['b'] base = M_from_L_coeffs[ref]['log_base'] try: a_err = M_from_L_coeffs[ref]['a_err'] b_err = M_from_L_coeffs[ref]['b_err'] except KeyError: pass if mc == True: A = a if a_err is None else np.random.normal(a, a_err, len(L)) B = b if b_err is None else np.random.normal(b, b_err, len(L)) else: A = a B = b return A + B * log_fn[base](L) """ Estimation functions """ def p_D_M(D, M, ref='BW_2006', sample_bias_corr=False): """ Likelihood of predicted D given M, as defined by Biasi and Weldon (2006). Parameters ---------- D : Scalar or array of displacement values (in meters). M : Scalar or array of magnitudes. ref: Displacement-magnitude scaling reference (string). 'BW_2006' is Biasi and Weldon (2006). 'WC_1994_all' is Wells and Coppersmith (1994). Returns ------- p_D_M : Calculated likelihood. If scalar, simply returns the likelihood. If not, returns an improper pdf (a `culpable.stats.Pdf`) which is an interpolation class. Actual likelihoods are `p_D_M.y`, and corresponding magnitudes (i.e. the prior p_M) are `p_D_M.x`. """ D_ave = D_from_M(M, ref=ref) D = np.abs(D) if sample_bias_corr == True: Dn_ = Dn_sb else: Dn_ = Dn if np.isscalar(D): D_score = D / D_ave p_D_M = Dn_(D_score) else: D_score = np.array([d / D_ave for d in D]) p_D_M = Dn_(D_score) p_D_M = np.mean(p_D_M, axis=0) if np.isscalar(p_D_M): p_D_M = np.float(p_D_M) else: p_D_M = Pdf(M, p_D_M, normalize=True) return p_D_M def _make_p_M_x(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None): """ Makes the X values (i.e., the magnitudes) for a p_M distribution. """ if n_M is not None: p_M_x = np.linspace(p_M_min, p_M_max, num=n_M) else: if M_step is None: M_step = 0.1 # in case it's passed as None from another function p_M_x = np.arange(p_M_min, p_M_max + M_step, M_step) return p_M_x def make_p_M_uniform(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None): """ Creates a uniform PDF between the minimum and maximum magnitudes given by p_M_min and p_M_max. Parameters ---------- p_M_min : Minimum magnitude. p_M_max : Maximum magnitude. M_step : Width of steps in interpolation (no effect on final results). n_M : number of points in interpolation (no effect on final results). Returns ------- p_M : Pdf function with a uniform distribution between p_M_min and p_M_max """ p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) return Pdf(p_M_x, np.ones(len(p_M_x)) * 1 / len(p_M_x)) def make_p_M_gr_surface_break(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None): """ Creates a PDF based on a Gutenberg-Richter distribution that is then modified to account for the decreasing likelihood of surface rupture with decreasing magnitude (distribution from Biasi and Weldon 2006, figure 8b. Returns: -------- p_M : Pdf class with a modified Gutenberg-Richter distribution. """ p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) p_M_gr_sb = Pdf(gr_pm_x, gr_pm_y) p_M_gr_sb_y = p_M_gr_sb(p_M_x) return Pdf(p_M_x, p_M_gr_sb_y) def make_p_M(p_M_type='uniform', p_M_min=None, p_M_max=None, M_step=None, n_M=None): """ Creates the a PDF of magnitudes to use as the prior p(M). Parameters ---------- p_M_type : Type of prior. Current values are 'uniform' and 'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's correction for the likelihood of events of different sizes breaking the surface, as reported in BW 2006). p_M_min : Minimum magnitude. p_M_max : Maximum magnitude. M_step : Width of steps in interpolation (no effect on final results). n_M : number of points in interpolation (no effect on final results). Returns ------- p_M : Pdf function with a uniform distribution between p_M_min and p_M_max """ if p_M_type == 'uniform': p_M = make_p_M_uniform(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) elif p_M_type == 'GR_surface_break': p_M = make_p_M_gr_surface_break(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) return p_M def p_M_D(D, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None, ref='BW_2006', p_M_type='uniform', sample_bias_corr=False): """ Calculates p(M|D), the posterior probability of an earthquake having a magnitude of M given observed displacement D, based on Biasi and Weldon 2006 (but with optional sample bias correction). Either a `p_M` Pdf object should be passed, or the additional parameters necessary to construct one; see `make_p_M`. Parameters ---------- D : Scalar or vector of displacements in meters (floats). p_M : Prior magnitude distribution p(M), in the Pdf class from culpable.stats. p_M_type : Type of prior. Current values are 'uniform' and 'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's correction for the likelihood of events of different sizes breaking the surface, as reported in BW 2006). p_M_min : Minimum prior magnitude; only needed if `p_M` is not given. p_M_max : Maximum prior magnitude; only needed if `p_M` is not given. M_step : Spacing for `p_M`; only needed if `p_M` is not given. n_M : number of points for `p_M`; only needed if `p_M` is not given. ref : Reference for magnitude-displacement scaling relationships. See `M_from_D` for a list of implemented relationships. sample_bias_correction: Boolean indicating whether to correct for preferential sampling of scarps proportionally to the offset at a point relative to the min and max offsets. Returns ------ p_M_D : Pdf function of the posterior magnitude estimation p(M|D). """ if p_M is None: p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) else: #TODO: maybe add some logic for dealing with non `Pdf` priors pass p_D = Pdf(p_M.x, [np.trapz(Dn_y, Dn_x * D_from_M(M, ref=ref)) for M in p_M.x]) p_D_M_ = p_D_M(D, p_M.x, ref=ref, sample_bias_corr=sample_bias_corr) p_M_D_ = multiply_pdfs(p_M, p_D_M_, step=M_step) p_M_D_ = divide_pdfs(p_M_D_, p_D, step=M_step) return p_M_D_ def p_M_L(L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None, p_M_type='uniform', ref='WC_1994_all', mc=True): """ Calculates p(M|L), the posterior probability of an earthquake having a magnitude of M given observed length L. Either a `p_M` Pdf object should be passed, or the additional parameters necessary to construct one; see `make_p_M`. Parameters ---------- L : Scalar or vector of lengths in kilometers (floats). p_M : Prior magnitude distribution p(M), in the Pdf class from culpable.stats. p_M_type : Type of prior. Current values are 'uniform' and 'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's correction for the likelihood of events of different sizes breaking the surface, as reported in BW 2006). p_M_min : Minimum prior magnitude; only needed if `p_M` is not given. p_M_max : Maximum prior magnitude; only needed if `p_M` is not given. M_step : Spacing for `p_M`; only needed if `p_M` is not given. n_M : number of points for `p_M`; only needed if `p_M` is not given. ref : Reference for magnitude-length scaling relationships. See `M_from_L` for a list of implemented relationships. mc : Boolean that describes whether to propagate the uncertainty (standard errors) in the scaling relationship to the posterior using a Monte Carlo simulation. Returns ------ p_M_D : Pdf function of the posterior magnitude estimation p(M|D). """ if p_M is None: p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) p_M_L_samples = M_from_L(L, ref=ref, mc=mc) p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(), x_max=p_M.x.max()) p_M_L_ = multiply_pdfs(p_M, p_M_L_) return p_M_L_ def p_M_DL(D, L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None, p_M_type='uniform', D_ref='BW_2006', L_ref='WC_1994_all', L_mc=True, sample_bias_corr=False): """ Calculates p(M|D,L), the posterior probability of an earthquake having a magnitude of M given observed offset/displacement D and rupture length L. Either a `p_M` Pdf object should be passed, or the additional parameters necessary to construct one; see `make_p_M`. Parameters ---------- D : Scalar or vector of displacement in meters (floats). L : Scalar or vector of lengths in kilometers (floats). p_M : Prior magnitude distribution p(M), in the Pdf class from culpable.stats. p_M_type : Type of prior. Current values are 'uniform' and 'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's correction for the likelihood of events of different sizes breaking the surface, as reported in BW 2006). p_M_min : Minimum prior magnitude; only needed if `p_M` is not given. p_M_min : Minimum prior magnitude; only needed if `p_M` is not given. M_step : Spacing for `p_M`; only needed if `p_M` is not given. n_M : number of points for `p_M`; only needed if `p_M` is not given. D_ref : Reference for magnitude-displacement scaling relationships. See `M_from_D` for a list of implemented relationships. L_ref : Reference for magnitude-length scaling relationships. See `M_from_L` for a list of implemented relationships. mc : Boolean that describes whether to propagate the uncertainty (standard errors) in the scaling relationship to the posterior using a Monte Carlo simulation. sample_bias_correction: Boolean indicating whether to correct for preferential sampling of scarps proportionally to the offset at a point relative to the min and max offsets. Returns ------ p_M_D : Pdf function of the posterior magnitude estimation p(M|D). """ if p_M is None: p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step, n_M=n_M) p_M_D_ = p_M_D(D, p_M, ref=D_ref, sample_bias_corr=sample_bias_corr) p_M_L_samples = M_from_L(L, ref=L_ref, mc=L_mc) p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(), x_max=p_M.x.max()) return multiply_pdfs(p_M_L_, p_M_D_)
mit
3,321,407,271,756,784,000
33.628615
79
0.551976
false
2.902283
false
false
false
huwiki/featured-feeds
rsslib.py
1
4281
#!/usr/bin/python # -*- coding: iso-8859-2 -*- import sys, os import re, string import time, datetime, calendar, locale import urllib import cPickle import xml.sax.saxutils locale.setlocale(locale.LC_TIME, 'en_GB') currenttimestamp = time.strftime(u'%a, %d %b %Y %H:%M:%S +0000', time.gmtime()) locale.setlocale(locale.LC_TIME, 'hu_HU') # general settings settings = { 'rss_webmaster': u'[email protected] (Tisza Gergõ)', 'program_name': 'WpFeedMaker', 'program_version': '1.0', 'program_contact': '[email protected]', } # helpers def encode_title(s): s = s[0:1].upper() + s[1:] s = re.sub(' ', '_', s) return urllib.quote(s.encode('utf-8')) def date_vars(date, extend = {}): if date.isocalendar()[2] < 4: n = 1 else: n = 2 iso = date.isocalendar() dict = { 'year': iso[0], 'years1': iso[0] % 5, 'years2': iso[0] % 5 + 5, 'month': date.month, 'monthname': calendar.month_name[date.month].decode('iso-8859-2'), 'day' : date.day, 'week': iso[1], 'dow' : iso[2], 'n' : n, } dict.update(extend) return dict # Subclassing of URLopener - sets "User-agent: ", which Wikipedia requires to be set # to something else than the default "Python-urllib" class MyURLopener(urllib.URLopener): version = settings['program_name'] + "/" + settings['program_version'] + " " + settings['program_contact'] # Caching of HTML from Wikipedia class CacheItem: def __init__(self, html, date, fetchtime): self.html = html self.date = date self.fetchtime = fetchtime class WPCache: def __init__(self, settings): self.settings = settings self.url_opener = MyURLopener() self.filename = self.settings['cache_filename'] if (os.path.exists(self.filename)): file = open(self.filename) self.cache = cPickle.load(file) file.close() else: self.cache = {} def get_html(self, url, date): if url in self.cache: return self.cache[url].html else: html = self.url_opener.open(url).read() cacheitem = CacheItem(html, date, time.gmtime()) self.cache[url] = cacheitem return html # Weed out old entries, so cache doesn't get big def too_old(self, date): return (datetime.date.today() - date).days > self.settings['time_range'] def weed_out_old(self): self.cache = dict([x for x in self.cache.items() if not self.too_old(x[1].date)]) def save(self): self.weed_out_old() file = open(self.filename, "w") p = cPickle.Pickler(file) p.dump(self.cache) class WPFeed: def __init__(self, settings): self.settings = settings self.cache = WPCache(self.settings) def get_html(self, url, date, clean = True): html = self.cache.get_html(url, date) if clean: html = re.sub('\s*<!--[\s\S]*?-->\s*', '', html) return html def rss_item(self, item): return """<item> <title>%(title)s</title> <link>%(url)s</link> <guid isPermaLink="true">%(url)s</guid> <description>%(escaped_content)s</description> </item> """ % { 'title': xml.sax.saxutils.escape(item['title']), 'url': item['url'], 'escaped_content': xml.sax.saxutils.escape(item['content']), } def rss(self, items): self.xml = """<?xml version="1.0" encoding="UTF-8"?> <rss version="2.0" xmlns:blogChannel="http://backend.userland.com/blogChannelModule"> <channel> <title>%(rss_title)s</title> <link>%(rss_link)s</link> <description>%(rss_description)s</description> <language>hu</language> <copyright>CC-BY-SA-3.0</copyright> <lastBuildDate>%(build_date)s</lastBuildDate> <docs>http://blogs.law.harvard.edu/tech/rss</docs> <webMaster>%(webmaster)s</webMaster> <generator>%(generator)s</generator> %(items)s </channel> </rss> """ % { 'rss_title': self.settings['rss_title'], 'rss_link': self.settings['rss_link'], 'rss_description': self.settings['rss_description'], 'webmaster': settings['rss_webmaster'], 'build_date': currenttimestamp, 'items': '\n'.join(map(self.rss_item, items)), 'generator': settings['program_name'] + ' ' + settings['program_version'], } def save(self): file = open(self.settings['output_filename'], "w") file.write(self.xml.encode('utf-8')) file.close() self.cache.save() def main(): print "This file cannot be invoked directly" sys.exit(1) if __name__ == '__main__': main()
mit
-2,498,234,806,636,101,000
25.103659
107
0.640972
false
2.772668
false
false
false
mgedmin/objgraph
objgraph.py
1
43531
""" Tools for drawing Python object reference graphs with graphviz. You can find documentation online at https://mg.pov.lt/objgraph/ Copyright (c) 2008-2017 Marius Gedminas <[email protected]> and contributors Released under the MIT licence. """ # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import print_function import codecs import collections import gc import inspect import itertools import operator import os import re import subprocess import sys import tempfile import types try: # Python 2.x compatibility from StringIO import StringIO except ImportError: # pragma: PY3 from io import StringIO try: from types import InstanceType except ImportError: # pragma: PY3 # Python 3.x compatibility InstanceType = None __author__ = "Marius Gedminas ([email protected])" __copyright__ = "Copyright (c) 2008-2017 Marius Gedminas and contributors" __license__ = "MIT" __version__ = '3.5.1.dev0' __date__ = '2020-10-11' try: basestring except NameError: # pragma: PY3 # Python 3.x compatibility basestring = str try: iteritems = dict.iteritems except AttributeError: # pragma: PY3 # Python 3.x compatibility iteritems = dict.items IS_INTERACTIVE = False try: # pragma: nocover import graphviz if 'TerminalInteractiveShell' not in get_ipython().__class__.__name__: # So far I know two shells where it's inappropriate to use inline # graphics, because they're text only: # - ipython uses a TerminalInteractiveShell # - pycharm's console uses PyDevTerminalInteractiveShell IS_INTERACTIVE = True except (NameError, ImportError): pass def _isinstance(object, classinfo): """Return whether an object is an instance of a class or its subclass. Differs from the builtin isinstance() implementation in that it does not depend on the ``__class__`` attribute which is proxied by mock.Mock(spec=...). """ return issubclass(type(object), classinfo) def count(typename, objects=None): """Count objects tracked by the garbage collector with a given class name. The class name can optionally be fully qualified. Example: >>> count('dict') 42 >>> count('mymodule.MyClass') 2 .. note:: The Python garbage collector does not track simple objects like int or str. See https://docs.python.org/3/library/gc.html#gc.is_tracked for more information. Instead of looking through all objects tracked by the GC, you may specify your own collection, e.g. >>> count('MyClass', get_leaking_objects()) 3 See also: :func:`get_leaking_objects`. .. versionchanged:: 1.7 New parameter: ``objects``. .. versionchanged:: 1.8 Accepts fully-qualified type names (i.e. 'package.module.ClassName') as well as short type names (i.e. 'ClassName'). """ if objects is None: objects = gc.get_objects() try: if '.' in typename: return sum(1 for o in objects if _long_typename(o) == typename) else: return sum(1 for o in objects if _short_typename(o) == typename) finally: del objects # clear cyclic references to frame def typestats(objects=None, shortnames=True, filter=None): """Count the number of instances for each type tracked by the GC. Note that the GC does not track simple objects like int or str. Note that classes with the same name but defined in different modules will be lumped together if ``shortnames`` is True. If ``filter`` is specified, it should be a function taking one argument and returning a boolean. Objects for which ``filter(obj)`` returns ``False`` will be ignored. Example: >>> typestats() {'list': 12041, 'tuple': 10245, ...} >>> typestats(get_leaking_objects()) {'MemoryError': 1, 'tuple': 2795, 'RuntimeError': 1, 'list': 47, ...} .. versionadded:: 1.1 .. versionchanged:: 1.7 New parameter: ``objects``. .. versionchanged:: 1.8 New parameter: ``shortnames``. .. versionchanged:: 3.1.3 New parameter: ``filter``. """ if objects is None: objects = gc.get_objects() try: if shortnames: typename = _short_typename else: typename = _long_typename stats = {} for o in objects: if filter and not filter(o): continue n = typename(o) stats[n] = stats.get(n, 0) + 1 return stats finally: del objects # clear cyclic references to frame def most_common_types(limit=10, objects=None, shortnames=True, filter=None): """Count the names of types with the most instances. Returns a list of (type_name, count), sorted most-frequent-first. Limits the return value to at most ``limit`` items. You may set ``limit`` to None to avoid that. If ``filter`` is specified, it should be a function taking one argument and returning a boolean. Objects for which ``filter(obj)`` returns ``False`` will be ignored. The caveats documented in :func:`typestats` apply. Example: >>> most_common_types(limit=2) [('list', 12041), ('tuple', 10245)] .. versionadded:: 1.4 .. versionchanged:: 1.7 New parameter: ``objects``. .. versionchanged:: 1.8 New parameter: ``shortnames``. .. versionchanged:: 3.1.3 New parameter: ``filter``. """ stats = sorted( typestats(objects, shortnames=shortnames, filter=filter).items(), key=operator.itemgetter(1), reverse=True) if limit: stats = stats[:limit] return stats def show_most_common_types( limit=10, objects=None, shortnames=True, file=None, filter=None): """Print the table of types of most common instances. If ``filter`` is specified, it should be a function taking one argument and returning a boolean. Objects for which ``filter(obj)`` returns ``False`` will be ignored. The caveats documented in :func:`typestats` apply. Example: >>> show_most_common_types(limit=5) tuple 8959 function 2442 wrapper_descriptor 1048 dict 953 builtin_function_or_method 800 .. versionadded:: 1.1 .. versionchanged:: 1.7 New parameter: ``objects``. .. versionchanged:: 1.8 New parameter: ``shortnames``. .. versionchanged:: 3.0 New parameter: ``file``. .. versionchanged:: 3.1.3 New parameter: ``filter``. """ if file is None: file = sys.stdout stats = most_common_types(limit, objects, shortnames=shortnames, filter=filter) width = max(len(name) for name, count in stats) for name, count in stats: file.write('%-*s %i\n' % (width, name, count)) def growth(limit=10, peak_stats={}, shortnames=True, filter=None): """Count the increase in peak object since last call. Returns a list of (type_name, total_count, increase_delta), descending order by increase_delta. Limits the output to ``limit`` largest deltas. You may set ``limit`` to None to see all of them. Uses and updates ``peak_stats``, a dictionary from type names to previously seen peak object counts. Usually you don't need to pay attention to this argument. If ``filter`` is specified, it should be a function taking one argument and returning a boolean. Objects for which ``filter(obj)`` returns ``False`` will be ignored. The caveats documented in :func:`typestats` apply. Example: >>> growth(2) [(tuple, 12282, 10), (dict, 1922, 7)] .. versionadded:: 3.3.0 """ gc.collect() stats = typestats(shortnames=shortnames, filter=filter) deltas = {} for name, count in iteritems(stats): old_count = peak_stats.get(name, 0) if count > old_count: deltas[name] = count - old_count peak_stats[name] = count deltas = sorted(deltas.items(), key=operator.itemgetter(1), reverse=True) if limit: deltas = deltas[:limit] return [(name, stats[name], delta) for name, delta in deltas] def show_growth(limit=10, peak_stats=None, shortnames=True, file=None, filter=None): """Show the increase in peak object counts since last call. if ``peak_stats`` is None, peak object counts will recorded in func `growth`, and your can record the counts by yourself with set ``peak_stats`` to a dictionary. The caveats documented in :func:`growth` apply. Example: >>> show_growth() wrapper_descriptor 970 +14 tuple 12282 +10 dict 1922 +7 ... .. versionadded:: 1.5 .. versionchanged:: 1.8 New parameter: ``shortnames``. .. versionchanged:: 2.1 New parameter: ``file``. .. versionchanged:: 3.1.3 New parameter: ``filter``. """ if peak_stats is None: result = growth(limit, shortnames=shortnames, filter=filter) else: result = growth(limit, peak_stats, shortnames, filter) if result: if file is None: file = sys.stdout width = max(len(name) for name, _, _ in result) for name, count, delta in result: file.write('%-*s%9d %+9d\n' % (width, name, count, delta)) def get_new_ids(skip_update=False, limit=10, sortby='deltas', shortnames=None, file=None, _state={}): """Find and display new objects allocated since last call. Shows the increase in object counts since last call to this function and returns the memory address ids for new objects. Returns a dictionary mapping object type names to sets of object IDs that have been created since the last time this function was called. ``skip_update`` (bool): If True, returns the same dictionary that was returned during the previous call without updating the internal state or examining the objects currently in memory. ``limit`` (int): The maximum number of rows that you want to print data for. Use 0 to suppress the printing. Use None to print everything. ``sortby`` (str): This is the column that you want to sort by in descending order. Possible values are: 'old', 'current', 'new', 'deltas' ``shortnames`` (bool): If True, classes with the same name but defined in different modules will be lumped together. If False, all type names will be qualified with the module name. If None (default), ``get_new_ids`` will remember the value from previous calls, so it's enough to prime this once. By default the primed value is True. ``_state`` (dict): Stores old, current, and new_ids in memory. It is used by the function to store the internal state between calls. Never pass in this argument unless you know what you're doing. The caveats documented in :func:`growth` apply. When one gets new_ids from :func:`get_new_ids`, one can use :func:`at_addrs` to get a list of those objects. Then one can iterate over the new objects, print out what they are, and call :func:`show_backrefs` or :func:`show_chain` to see where they are referenced. Example: >>> _ = get_new_ids() # store current objects in _state >>> _ = get_new_ids() # current_ids become old_ids in _state >>> a = [0, 1, 2] # list we don't know about >>> b = [3, 4, 5] # list we don't know about >>> new_ids = get_new_ids(limit=3) # we see new lists ====================================================================== Type Old_ids Current_ids New_ids Count_Deltas ====================================================================== list 324 326 +3 +2 dict 1125 1125 +0 +0 wrapper_descriptor 1001 1001 +0 +0 ====================================================================== >>> new_lists = at_addrs(new_ids['list']) >>> a in new_lists True >>> b in new_lists True .. versionadded:: 3.4 """ if not _state: _state['old'] = collections.defaultdict(set) _state['current'] = collections.defaultdict(set) _state['new'] = collections.defaultdict(set) _state['shortnames'] = True new_ids = _state['new'] if skip_update: return new_ids old_ids = _state['old'] current_ids = _state['current'] if shortnames is None: shortnames = _state['shortnames'] else: _state['shortnames'] = shortnames gc.collect() objects = gc.get_objects() for class_name in old_ids: old_ids[class_name].clear() for class_name, ids_set in current_ids.items(): old_ids[class_name].update(ids_set) for class_name in current_ids: current_ids[class_name].clear() for o in objects: if shortnames: class_name = _short_typename(o) else: class_name = _long_typename(o) id_number = id(o) current_ids[class_name].add(id_number) for class_name in new_ids: new_ids[class_name].clear() rows = [] keys_to_remove = [] for class_name in current_ids: num_old = len(old_ids[class_name]) num_current = len(current_ids[class_name]) if num_old == 0 and num_current == 0: # remove the key from our dicts if we don't have any old or # current class_name objects keys_to_remove.append(class_name) continue new_ids_set = current_ids[class_name] - old_ids[class_name] new_ids[class_name].update(new_ids_set) num_new = len(new_ids_set) num_delta = num_current - num_old row = (class_name, num_old, num_current, num_new, num_delta) rows.append(row) for key in keys_to_remove: del old_ids[key] del current_ids[key] del new_ids[key] index_by_sortby = {'old': 1, 'current': 2, 'new': 3, 'deltas': 4} rows.sort(key=operator.itemgetter(index_by_sortby[sortby], 0), reverse=True) if limit is not None: rows = rows[:limit] if not rows: return new_ids if file is None: file = sys.stdout width = max(len(row[0]) for row in rows) print('='*(width+13*4), file=file) print('%-*s%13s%13s%13s%13s' % (width, 'Type', 'Old_ids', 'Current_ids', 'New_ids', 'Count_Deltas'), file=file) print('='*(width+13*4), file=file) for row_class, old, current, new, delta in rows: print('%-*s%13d%13d%+13d%+13d' % (width, row_class, old, current, new, delta), file=file) print('='*(width+13*4), file=file) return new_ids def get_leaking_objects(objects=None): """Return objects that do not have any referents. These could indicate reference-counting bugs in C code. Or they could be legitimate. Note that the GC does not track simple objects like int or str. .. versionadded:: 1.7 """ if objects is None: gc.collect() objects = gc.get_objects() try: ids = set(id(i) for i in objects) for i in objects: ids.difference_update(id(j) for j in gc.get_referents(i)) # this then is our set of objects without referrers return [i for i in objects if id(i) in ids] finally: del objects, i # clear cyclic references to frame def by_type(typename, objects=None): """Return objects tracked by the garbage collector with a given class name. Example: >>> by_type('MyClass') [<mymodule.MyClass object at 0x...>] Note that the GC does not track simple objects like int or str. .. versionchanged:: 1.7 New parameter: ``objects``. .. versionchanged:: 1.8 Accepts fully-qualified type names (i.e. 'package.module.ClassName') as well as short type names (i.e. 'ClassName'). """ if objects is None: objects = gc.get_objects() try: if '.' in typename: return [o for o in objects if _long_typename(o) == typename] else: return [o for o in objects if _short_typename(o) == typename] finally: del objects # clear cyclic references to frame def at(addr): """Return an object at a given memory address. The reverse of id(obj): >>> at(id(obj)) is obj True Note that this function does not work on objects that are not tracked by the GC (e.g. ints or strings). """ for o in gc.get_objects(): if id(o) == addr: return o return None def at_addrs(address_set): """Return a list of objects for a given set of memory addresses. The reverse of [id(obj1), id(obj2), ...]. Note that objects are returned in an arbitrary order. When one gets ``new_ids`` from :func:`get_new_ids`, one can use this function to get a list of those objects. Then one can iterate over the new objects, print out what they are, and call :func:`show_backrefs` or :func:`show_chain` to see where they are referenced. >>> a = [0, 1, 2] >>> new_ids = get_new_ids() >>> new_lists = at_addrs(new_ids['list']) >>> a in new_lists True Note that this function does not work on objects that are not tracked by the GC (e.g. ints or strings). .. versionadded:: 3.4 """ res = [] for o in gc.get_objects(): if id(o) in address_set: res.append(o) return res def find_ref_chain(obj, predicate, max_depth=20, extra_ignore=()): """Find a shortest chain of references leading from obj. The end of the chain will be some object that matches your predicate. ``predicate`` is a function taking one argument and returning a boolean. ``max_depth`` limits the search depth. ``extra_ignore`` can be a list of object IDs to exclude those objects from your search. Example: >>> find_ref_chain(obj, lambda x: isinstance(x, MyClass)) [obj, ..., <MyClass object at ...>] Returns ``[obj]`` if such a chain could not be found. .. versionadded:: 1.7 """ return _find_chain(obj, predicate, gc.get_referents, max_depth=max_depth, extra_ignore=extra_ignore)[::-1] def find_backref_chain(obj, predicate, max_depth=20, extra_ignore=()): """Find a shortest chain of references leading to obj. The start of the chain will be some object that matches your predicate. ``predicate`` is a function taking one argument and returning a boolean. ``max_depth`` limits the search depth. ``extra_ignore`` can be a list of object IDs to exclude those objects from your search. Example: >>> find_backref_chain(obj, is_proper_module) [<module ...>, ..., obj] Returns ``[obj]`` if such a chain could not be found. .. versionchanged:: 1.5 Returns ``obj`` instead of ``None`` when a chain could not be found. """ return _find_chain(obj, predicate, gc.get_referrers, max_depth=max_depth, extra_ignore=extra_ignore) def show_backrefs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10, highlight=None, filename=None, extra_info=None, refcounts=False, shortnames=True, output=None, extra_node_attrs=None): """Generate an object reference graph ending at ``objs``. The graph will show you what objects refer to ``objs``, directly and indirectly. ``objs`` can be a single object, or it can be a list of objects. If unsure, wrap the single object in a new list. ``filename`` if specified, can be the name of a .dot or a image file, whose extension indicates the desired output format; note that output to a specific format is entirely handled by GraphViz: if the desired format is not supported, you just get the .dot file. If ``filename`` and ``output`` are not specified, ``show_backrefs`` will try to display the graph inline (if you're using IPython), otherwise it'll try to produce a .dot file and spawn a viewer (xdot). If xdot is not available, ``show_backrefs`` will convert the .dot file to a .png and print its name. ``output`` if specified, the GraphViz output will be written to this file object. ``output`` and ``filename`` should not both be specified. Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the graph. Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to remove undesired objects from the graph. Use ``highlight`` (a predicate) to highlight certain graph nodes in blue. Use ``extra_info`` (a function taking one argument and returning a string) to report extra information for objects. Use ``extra_node_attrs`` (a function taking the current object as argument, returning a dict of strings) to add extra attributes to the nodes. See https://www.graphviz.org/doc/info/attrs.html for a list of possible node attributes. Specify ``refcounts=True`` if you want to see reference counts. These will mostly match the number of arrows pointing to an object, but can be different for various reasons. Specify ``shortnames=False`` if you want to see fully-qualified type names ('package.module.ClassName'). By default you get to see only the class name part. Examples: >>> show_backrefs(obj) >>> show_backrefs([obj1, obj2]) >>> show_backrefs(obj, max_depth=5) >>> show_backrefs(obj, filter=lambda x: not inspect.isclass(x)) >>> show_backrefs(obj, highlight=inspect.isclass) >>> show_backrefs(obj, extra_ignore=[id(locals())]) >>> show_backrefs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x)))) .. versionchanged:: 1.3 New parameters: ``filename``, ``extra_info``. .. versionchanged:: 1.5 New parameter: ``refcounts``. .. versionchanged:: 1.8 New parameter: ``shortnames``. .. versionchanged:: 2.0 New parameter: ``output``. .. versionchanged:: 3.5 New parameter: ``extra_node_attrs``. """ # For show_backrefs(), it makes sense to stop when reaching a # module because you'll end up in sys.modules and explode the # graph with useless clutter. That's why we're specifying # cull_func here, but not in show_graph(). return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore, filter=filter, too_many=too_many, highlight=highlight, edge_func=gc.get_referrers, swap_source_target=False, filename=filename, output=output, extra_info=extra_info, refcounts=refcounts, shortnames=shortnames, cull_func=is_proper_module, extra_node_attrs=extra_node_attrs) def show_refs(objs, max_depth=3, extra_ignore=(), filter=None, too_many=10, highlight=None, filename=None, extra_info=None, refcounts=False, shortnames=True, output=None, extra_node_attrs=None): """Generate an object reference graph starting at ``objs``. The graph will show you what objects are reachable from ``objs``, directly and indirectly. ``objs`` can be a single object, or it can be a list of objects. If unsure, wrap the single object in a new list. ``filename`` if specified, can be the name of a .dot or a image file, whose extension indicates the desired output format; note that output to a specific format is entirely handled by GraphViz: if the desired format is not supported, you just get the .dot file. If ``filename`` and ``output`` is not specified, ``show_refs`` will try to display the graph inline (if you're using IPython), otherwise it'll try to produce a .dot file and spawn a viewer (xdot). If xdot is not available, ``show_refs`` will convert the .dot file to a .png and print its name. ``output`` if specified, the GraphViz output will be written to this file object. ``output`` and ``filename`` should not both be specified. Use ``max_depth`` and ``too_many`` to limit the depth and breadth of the graph. Use ``filter`` (a predicate) and ``extra_ignore`` (a list of object IDs) to remove undesired objects from the graph. Use ``highlight`` (a predicate) to highlight certain graph nodes in blue. Use ``extra_info`` (a function returning a string) to report extra information for objects. Use ``extra_node_attrs`` (a function taking the current object as argument, returning a dict of strings) to add extra attributes to the nodes. See https://www.graphviz.org/doc/info/attrs.html for a list of possible node attributes. Specify ``refcounts=True`` if you want to see reference counts. Examples: >>> show_refs(obj) >>> show_refs([obj1, obj2]) >>> show_refs(obj, max_depth=5) >>> show_refs(obj, filter=lambda x: not inspect.isclass(x)) >>> show_refs(obj, highlight=inspect.isclass) >>> show_refs(obj, extra_ignore=[id(locals())]) >>> show_refs(obj, extra_node_attrs=lambda x: dict(URL=str(id(x)))) .. versionadded:: 1.1 .. versionchanged:: 1.3 New parameters: ``filename``, ``extra_info``. .. versionchanged:: 1.5 Follows references from module objects instead of stopping. New parameter: ``refcounts``. .. versionchanged:: 1.8 New parameter: ``shortnames``. .. versionchanged:: 2.0 New parameter: ``output``. .. versionchanged:: 3.5 New parameter: ``extra_node_attrs``. """ return _show_graph(objs, max_depth=max_depth, extra_ignore=extra_ignore, filter=filter, too_many=too_many, highlight=highlight, edge_func=gc.get_referents, swap_source_target=True, filename=filename, extra_info=extra_info, refcounts=refcounts, shortnames=shortnames, output=output, extra_node_attrs=extra_node_attrs) def show_chain(*chains, **kw): """Show a chain (or several chains) of object references. Useful in combination with :func:`find_ref_chain` or :func:`find_backref_chain`, e.g. >>> show_chain(find_backref_chain(obj, is_proper_module)) You can specify if you want that chain traced backwards or forwards by passing a ``backrefs`` keyword argument, e.g. >>> show_chain(find_ref_chain(obj, is_proper_module), ... backrefs=False) Ideally this shouldn't matter, but for some objects :func:`gc.get_referrers` and :func:`gc.get_referents` are not perfectly symmetrical. You can specify ``highlight``, ``extra_info``, ``refcounts``, ``shortnames``, ``filename`` or ``output`` arguments like for :func:`show_backrefs` or :func:`show_refs`. .. versionadded:: 1.5 .. versionchanged:: 1.7 New parameter: ``backrefs``. .. versionchanged:: 2.0 New parameter: ``output``. """ backrefs = kw.pop('backrefs', True) chains = [chain for chain in chains if chain] # remove empty ones def in_chains(x, ids=set(map(id, itertools.chain(*chains)))): return id(x) in ids max_depth = max(map(len, chains)) - 1 if backrefs: show_backrefs([chain[-1] for chain in chains], max_depth=max_depth, filter=in_chains, **kw) else: show_refs([chain[0] for chain in chains], max_depth=max_depth, filter=in_chains, **kw) def is_proper_module(obj): """ Returns ``True`` if ``obj`` can be treated like a garbage collector root. That is, if ``obj`` is a module that is in ``sys.modules``. >>> import types >>> is_proper_module([]) False >>> is_proper_module(types) True >>> is_proper_module(types.ModuleType('foo')) False .. versionadded:: 1.8 """ return ( inspect.ismodule(obj) and obj is sys.modules.get(getattr(obj, '__name__', None)) ) # # Internal helpers # def _find_chain(obj, predicate, edge_func, max_depth=20, extra_ignore=()): queue = [obj] depth = {id(obj): 0} parent = {id(obj): None} ignore = set(extra_ignore) ignore.add(id(extra_ignore)) ignore.add(id(queue)) ignore.add(id(depth)) ignore.add(id(parent)) ignore.add(id(ignore)) ignore.add(id(sys._getframe())) # this function ignore.add(id(sys._getframe(1))) # find_chain/find_backref_chain gc.collect() while queue: target = queue.pop(0) if predicate(target): chain = [target] while parent[id(target)] is not None: target = parent[id(target)] chain.append(target) return chain tdepth = depth[id(target)] if tdepth < max_depth: referrers = edge_func(target) ignore.add(id(referrers)) for source in referrers: if id(source) in ignore: continue if id(source) not in depth: depth[id(source)] = tdepth + 1 parent[id(source)] = target queue.append(source) return [obj] # not found def _show_graph(objs, edge_func, swap_source_target, max_depth=3, extra_ignore=(), filter=None, too_many=10, highlight=None, filename=None, extra_info=None, refcounts=False, shortnames=True, output=None, cull_func=None, extra_node_attrs=None): if not _isinstance(objs, (list, tuple)): objs = [objs] is_interactive = False if filename and output: raise ValueError('Cannot specify both output and filename.') elif output: f = output elif filename and filename.endswith('.dot'): f = codecs.open(filename, 'w', encoding='utf-8') dot_filename = filename elif IS_INTERACTIVE and not filename: is_interactive = True f = StringIO() else: fd, dot_filename = tempfile.mkstemp(prefix='objgraph-', suffix='.dot', text=True) f = os.fdopen(fd, "w") if getattr(f, 'encoding', None): # pragma: PY3 # Python 3 will wrap the file in the user's preferred encoding # Re-wrap it for utf-8 import io f = io.TextIOWrapper(f.detach(), 'utf-8') f.write('digraph ObjectGraph {\n' ' node[shape=box, style=filled, fillcolor=white];\n') queue = [] depth = {} ignore = set(extra_ignore) ignore.add(id(objs)) ignore.add(id(extra_ignore)) ignore.add(id(queue)) ignore.add(id(depth)) ignore.add(id(ignore)) ignore.add(id(sys._getframe())) # this function ignore.add(id(sys._getframe().f_locals)) ignore.add(id(sys._getframe(1))) # show_refs/show_backrefs ignore.add(id(sys._getframe(1).f_locals)) for obj in objs: f.write(' %s[fontcolor=red];\n' % (_obj_node_id(obj))) depth[id(obj)] = 0 queue.append(obj) del obj gc.collect() nodes = 0 while queue: nodes += 1 # The names "source" and "target" are reversed here because # originally there was just show_backrefs() and we were # traversing the reference graph backwards. target = queue.pop(0) tdepth = depth[id(target)] f.write(' %s[label="%s"%s];\n' % (_obj_node_id(target), _obj_label(target, extra_info, refcounts, shortnames), _obj_attrs(target, extra_node_attrs))) h, s, v = _gradient((0, 0, 1), (0, 0, .3), tdepth, max_depth) if inspect.ismodule(target): h = .3 s = 1 if highlight and highlight(target): h = .6 s = .6 v = 0.5 + v * 0.5 f.write(' %s[fillcolor="%g,%g,%g"];\n' % (_obj_node_id(target), h, s, v)) if v < 0.5: f.write(' %s[fontcolor=white];\n' % (_obj_node_id(target))) if hasattr(getattr(target, '__class__', None), '__del__'): f.write(' %s->%s_has_a_del[color=red,style=dotted,' 'len=0.25,weight=10];\n' % (_obj_node_id(target), _obj_node_id(target))) f.write(' %s_has_a_del[label="__del__",shape=doublecircle,' 'height=0.25,color=red,fillcolor="0,.5,1",fontsize=6];\n' % (_obj_node_id(target))) if tdepth >= max_depth: continue if cull_func is not None and cull_func(target): continue neighbours = edge_func(target) ignore.add(id(neighbours)) n = 0 skipped = 0 for source in neighbours: if id(source) in ignore: continue if filter and not filter(source): continue if n >= too_many: skipped += 1 continue if swap_source_target: srcnode, tgtnode = target, source else: srcnode, tgtnode = source, target elabel = _edge_label(srcnode, tgtnode, shortnames) f.write(' %s -> %s%s;\n' % (_obj_node_id(srcnode), _obj_node_id(tgtnode), elabel)) if id(source) not in depth: depth[id(source)] = tdepth + 1 queue.append(source) n += 1 del source del neighbours if skipped > 0: h, s, v = _gradient((0, 1, 1), (0, 1, .3), tdepth + 1, max_depth) if swap_source_target: label = "%d more references" % skipped edge = "%s->too_many_%s" % (_obj_node_id(target), _obj_node_id(target)) else: label = "%d more backreferences" % skipped edge = "too_many_%s->%s" % (_obj_node_id(target), _obj_node_id(target)) f.write(' %s[color=red,style=dotted,len=0.25,weight=10];\n' % edge) f.write(' too_many_%s[label="%s",shape=box,height=0.25,' 'color=red,fillcolor="%g,%g,%g",fontsize=6];\n' % (_obj_node_id(target), label, h, s, v)) f.write(' too_many_%s[fontcolor=white];\n' % (_obj_node_id(target))) f.write("}\n") if output: return if is_interactive: return graphviz.Source(f.getvalue()) else: # The file should only be closed if this function was in charge of # opening the file. f.close() print("Graph written to %s (%d nodes)" % (dot_filename, nodes)) _present_graph(dot_filename, filename) def _present_graph(dot_filename, filename=None): """Present a .dot file to the user in the requested fashion. If ``filename`` is provided, runs ``dot`` to convert the .dot file into the desired format, determined by the filename extension. If ``filename`` is not provided, tries to launch ``xdot``, a graphical .dot file viewer. If ``xdot`` is not present on the system, converts the graph to a PNG. """ if filename == dot_filename: # nothing to do, the user asked for a .dot file and got it return if not filename and _program_in_path('xdot'): print("Spawning graph viewer (xdot)") subprocess.Popen(['xdot', dot_filename], close_fds=True) elif _program_in_path('dot'): if not filename: print("Graph viewer (xdot) not found, generating a png instead") filename = dot_filename[:-4] + '.png' stem, ext = os.path.splitext(filename) cmd = ['dot', '-T' + ext[1:], '-o' + filename, dot_filename] dot = subprocess.Popen(cmd, close_fds=False) dot.wait() if dot.returncode != 0: # XXX: shouldn't this go to stderr or a log? print('dot failed (exit code %d) while executing "%s"' % (dot.returncode, ' '.join(cmd))) else: print("Image generated as %s" % filename) else: if not filename: print("Graph viewer (xdot) and image renderer (dot) not found," " not doing anything else") else: print("Image renderer (dot) not found, not doing anything else") def _obj_node_id(obj): return ('o%d' % id(obj)).replace('-', '_') def _obj_attrs(obj, extra_node_attrs): if extra_node_attrs is not None: attrs = extra_node_attrs(obj) return ", " + ", ".join('%s="%s"' % (name, _quote(value)) for name, value in sorted(iteritems(attrs)) if value is not None) else: return "" def _obj_label(obj, extra_info=None, refcounts=False, shortnames=True): if shortnames: label = [_short_typename(obj)] else: label = [_long_typename(obj)] if refcounts: label[0] += ' [%d]' % (sys.getrefcount(obj) - 4) # Why -4? To ignore the references coming from # obj_label's frame (obj) # show_graph's frame (target variable) # sys.getrefcount()'s argument # something else that doesn't show up in gc.get_referrers() label.append(_safe_repr(obj)) if extra_info: label.append(str(extra_info(obj))) return _quote('\n'.join(label)) def _quote(s): return (s.replace("\\", "\\\\") .replace("\"", "\\\"") .replace("\n", "\\n") .replace("\0", "\\\\0")) def _get_obj_type(obj): objtype = type(obj) if type(obj) == InstanceType: # pragma: PY2 -- no old-style classes on PY3 objtype = obj.__class__ return objtype def _short_typename(obj): return _get_obj_type(obj).__name__ def _long_typename(obj): objtype = _get_obj_type(obj) name = objtype.__name__ module = getattr(objtype, '__module__', None) if module: return '%s.%s' % (module, name) else: return name def _safe_repr(obj): try: return _short_repr(obj) except Exception: return '(unrepresentable)' def _name_or_repr(value): try: result = value.__name__ except AttributeError: result = repr(value)[:40] if _isinstance(result, basestring): return result else: return repr(value)[:40] def _short_repr(obj): if _isinstance(obj, (type, types.ModuleType, types.BuiltinMethodType, types.BuiltinFunctionType)): return _name_or_repr(obj) if _isinstance(obj, types.MethodType): name = _name_or_repr(obj.__func__) if obj.__self__: return name + ' (bound)' else: # pragma: PY2 -- no unbound methods on Python 3 return name # NB: types.LambdaType is an alias for types.FunctionType! if _isinstance(obj, types.LambdaType) and obj.__name__ == '<lambda>': return 'lambda: %s:%s' % (os.path.basename(obj.__code__.co_filename), obj.__code__.co_firstlineno) if _isinstance(obj, types.FrameType): return '%s:%s' % (obj.f_code.co_filename, obj.f_lineno) if _isinstance(obj, (tuple, list, dict, set)): return '%d items' % len(obj) return repr(obj)[:40] def _gradient(start_color, end_color, depth, max_depth): if max_depth == 0: # avoid division by zero return start_color h1, s1, v1 = start_color h2, s2, v2 = end_color f = float(depth) / max_depth h = h1 * (1-f) + h2 * f s = s1 * (1-f) + s2 * f v = v1 * (1-f) + v2 * f return h, s, v def _edge_label(source, target, shortnames=True): if (_isinstance(target, dict) and target is getattr(source, '__dict__', None)): return ' [label="__dict__",weight=10]' if _isinstance(source, types.FrameType): if target is source.f_locals: return ' [label="f_locals",weight=10]' if target is source.f_globals: return ' [label="f_globals",weight=10]' if _isinstance(source, types.MethodType): try: if target is source.__self__: return ' [label="__self__",weight=10]' if target is source.__func__: return ' [label="__func__",weight=10]' except AttributeError: # pragma: nocover # Python < 2.6 compatibility if target is source.im_self: return ' [label="im_self",weight=10]' if target is source.im_func: return ' [label="im_func",weight=10]' if _isinstance(source, types.FunctionType): for k in dir(source): if target is getattr(source, k): return ' [label="%s",weight=10]' % _quote(k) if _isinstance(source, dict): for k, v in iteritems(source): if v is target: if _isinstance(k, basestring) and _is_identifier(k): return ' [label="%s",weight=2]' % _quote(k) else: if shortnames: tn = _short_typename(k) else: tn = _long_typename(k) return ' [label="%s"]' % _quote(tn + "\n" + _safe_repr(k)) return '' _is_identifier = re.compile('[a-zA-Z_][a-zA-Z_0-9]*$').match def _program_in_path(program): # XXX: Consider using distutils.spawn.find_executable or shutil.which path = os.environ.get("PATH", os.defpath).split(os.pathsep) path = [os.path.join(dir, program) for dir in path] path = [True for file in path if os.path.isfile(file) or os.path.isfile(file + '.exe')] return bool(path)
mit
6,699,779,760,792,617,000
33.575854
79
0.588202
false
3.878731
false
false
false
acg/lwpb
python/pbsplit.py
1
1605
#!/usr/bin/env python ''' pbsplit - split a protobuf stream into multiple files ''' import sys import getopt import lwpb import lwpb.stream import lwpb.codec def shift(L): e = L[0] ; del L[0:1] ; return e def main(): typename = "" skip = 0 count = -1 splitsize = 1000 # in number of records pb2file = None infile = "-" fin = sys.stdin template = None opts, args = getopt.getopt(sys.argv[1:], 'p:m:s:c:t:z:') for o, a in opts: if o == '-p': pb2file = a elif o == '-m': typename = a elif o == '-s': skip = int(a) elif o == '-c': count = int(a) elif o == '-t': template = a elif o == '-z': splitsize = int(a) if len(args): infile = shift(args) fin = file(infile) if template == None: template = infile+".%05u" codec = lwpb.codec.MessageCodec(pb2file=pb2file, typename=typename) reader = lwpb.stream.StreamReader(fin, codec=codec) writer = None fout = None outfile = None splitnum = 0 splitwritten = 0 written = 0 for record in reader: if reader.current_number < skip: continue if count >= 0 and written >= count: break if fout == None: outfile = template % splitnum fout = file(outfile, 'w') writer = lwpb.stream.StreamWriter(fout, codec=codec) splitwritten = 0 writer.write_raw( reader.current_raw ) written += 1 splitwritten += 1 if splitwritten >= splitsize: fout.close() fout = None splitnum += 1 if fout: fout.close() return 0 if __name__ == '__main__': sys.exit(main())
apache-2.0
1,052,393,894,243,850,500
16.637363
69
0.576947
false
3.336798
false
false
false
bitmovin/bitmovin-python
examples/encoding/create_progressive_webm_encoding_with_vp9_and_opus_codecs.py
1
4978
import datetime from bitmovin import Bitmovin, Encoding, HTTPSInput, S3Output, \ StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \ MuxingStream, CloudRegion, ProgressiveWebMMuxing, VP9CodecConfiguration, OpusCodecConfiguration, VP9Quality from bitmovin.errors import BitmovinError API_KEY = '<INSERT_YOUR_API_KEY>' # https://<INSERT_YOUR_HTTP_HOST>/<INSERT_YOUR_HTTP_PATH> HTTPS_INPUT_HOST = '<INSERT_YOUR_HTTPS_HOST>' HTTPS_INPUT_PATH = '<INSERT_YOUR_HTTPS_PATH>' S3_OUTPUT_ACCESSKEY = '<INSERT_YOUR_ACCESS_KEY>' S3_OUTPUT_SECRETKEY = '<INSERT_YOUR_SECRET_KEY>' S3_OUTPUT_BUCKETNAME = '<INSERT_YOUR_BUCKET_NAME>' date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__') OUTPUT_BASE_PATH = '/output/base/path/{}/'.format(date_component) def main(): bitmovin = Bitmovin(api_key=API_KEY) https_input = HTTPSInput(name='create_simple_encoding HTTPS input', host=HTTPS_INPUT_HOST) https_input = bitmovin.inputs.HTTPS.create(https_input).resource s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY, secret_key=S3_OUTPUT_SECRETKEY, bucket_name=S3_OUTPUT_BUCKETNAME, name='Sample S3 Output') s3_output = bitmovin.outputs.S3.create(s3_output).resource encoding = Encoding(name='example webm encoding', cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1, encoder_version='BETA') encoding = bitmovin.encodings.Encoding.create(encoding).resource video_codec_configuration_1080p = VP9CodecConfiguration(name='example_video_codec_configuration_1080p', bitrate=4800000, rate=25.0, width=1920, height=1080, tile_columns=2, quality=VP9Quality.GOOD) video_codec_configuration_1080p = bitmovin.codecConfigurations.VP9.create(video_codec_configuration_1080p).resource audio_codec_configuration = OpusCodecConfiguration(name='example_audio_codec_configuration_english', bitrate=128000, rate=48000) audio_codec_configuration = bitmovin.codecConfigurations.Opus.create(audio_codec_configuration).resource video_input_stream = StreamInput(input_id=https_input.id, input_path=HTTPS_INPUT_PATH, selection_mode=SelectionMode.AUTO) audio_input_stream = StreamInput(input_id=https_input.id, input_path=HTTPS_INPUT_PATH, selection_mode=SelectionMode.AUTO) video_stream_1080p = Stream(codec_configuration_id=video_codec_configuration_1080p.id, input_streams=[video_input_stream], name='Sample Stream 1080p') video_stream_1080p = bitmovin.encodings.Stream.create(object_=video_stream_1080p, encoding_id=encoding.id).resource audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id, input_streams=[audio_input_stream], name='Sample Stream AUDIO') audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream, encoding_id=encoding.id).resource audio_muxing_stream = MuxingStream(audio_stream.id) video_muxing_stream_1080p = MuxingStream(video_stream_1080p.id) acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ) webm_muxing_output = EncodingOutput(output_id=s3_output.id, output_path=OUTPUT_BASE_PATH, acl=[acl_entry]) webm_muxing = ProgressiveWebMMuxing(streams=[video_muxing_stream_1080p, audio_muxing_stream], filename='myfile.webm', outputs=[webm_muxing_output], name='Sample WebM Muxing 1080p') webm_muxing = bitmovin.encodings.Muxing.ProgressiveWebM.create(object_=webm_muxing, encoding_id=encoding.id).resource bitmovin.encodings.Encoding.start(encoding_id=encoding.id) try: bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id) except BitmovinError as bitmovin_error: print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error)) print("File successfully encoded") if __name__ == '__main__': main()
unlicense
-3,270,803,099,738,486,000
48.78
119
0.57955
false
4.211506
true
false
false
f3at/feat
src/feat/models/value.py
1
25453
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # See "LICENSE.GPL" in the source distribution for more information. # Headers in this file shall remain intact. from zope.interface import implements, classImplements from feat.common import annotate, container from feat.models import meta as models_meta, action from feat.models.interface import IValueInfo, NotSupported, IValueOptions from feat.models.interface import IValidator, IValueRange, ValueTypes from feat.models.interface import IEncodingInfo, IModel, IReference from feat.models.interface import IValueOption, IResponse, MissingParameters from feat.models.interface import UnknownParameters, InvalidParameters from feat.models.interface import IValueCollection, IValueList from feat.interface.serialization import ISnapshotable meta = models_meta.meta def label(lable): """ Annotates the IValueInfo label. @param lable: label of the IValueInfo being defined. @type lable: str or unicode """ _annotate("label", lable) def desc(desc): """ Annotates the IValueInfo description. @param desc: description of the IValueInfo being defined. @type desc: str or unicode """ _annotate("desc", desc) def value_type(vtype): """ Annotates the IValueInfo value type. @param vtype: type of the IValueInfo being defined. @type vtype: ValueTypes """ _annotate("value_type", vtype) def default(default): """ Annotates the IValueInfo default value, will be validated at instance creation time. @param default: default value of the IValueInfo being defined. @type default: Any """ _annotate("default", default) def option(value, is_default=False, label=None): """ Annotates a possible value for IValueOptions, will be validated at instance creation time. @param value: a possible value for the IValueOptions being defined. @type value: Any @param is_default: if the option should be the default value. @type is_default: bool @param label: option label or None; if none the string representation of the value will be used as label. @type label: str or unicode or None """ _annotate("option", value, is_default=is_default, label=label) def options_only(): """ Annotates to enforce the value to be one of the specified options. """ _annotate("options_only") def allows(value_info): """ Annotate an allowed value info for a collection. @param value_info: an allowed value for the collection. @type value_info: IValueInfo """ _annotate("allows", value_info) def is_ordered(flag): """Annotate a collection to be ordered. @param flag: if the collection order is important. @type flag: bool """ _annotate("is_ordered", flag) def min_size(size): """ Annotate a collection minimum size. @param size: the collection minimum size. @type size: int """ _annotate("min_size", size) def max_size(size): """ Annotate a collection maximum size. @param size: the collection maximum size. @type size: int """ _annotate("max_size", size) def _annotate(name, *args, **kwargs): method_name = "annotate_" + name annotate.injectClassCallback(name, 4, method_name, *args, **kwargs) class BaseValue(models_meta.Metadata): implements(IValueInfo, IValidator) _class_label = None _class_desc = None _class_value_type = None _class_use_default = False _class_default = None ### IValueInfo ### @property def label(self): return self._class_label @property def desc(self): return self._class_desc @property def value_type(self): return self._class_value_type @property def use_default(self): return self._class_use_default @property def default(self): return self._class_default def __eq__(self, other): if not IValueInfo.providedBy(other): return NotSupported other = IValueInfo(other) if self.value_type != other.value_type: return False if self.use_default != other.use_default: return False if self.use_default and (self._default != other.default): return False if IValueOptions.providedBy(self) != IValueOptions.providedBy(other): return False if IValueOptions.providedBy(self): other = IValueOptions(other) other_options = set(other.iter_options()) self_options = set(self.iter_options()) if other_options != self_options: return False if self.is_restricted != other.is_restricted: return False if IValueRange.providedBy(self) != IValueRange.providedBy(other): return False if IValueRange.providedBy(self): other = IValueRange(other) if (self.minimum != other.minimum or self.maximum != other.maximum or self.increment != other.increment): return False return True def __ne__(self, other): eq = self.__eq__(other) return eq if eq is NotSupported else not eq ### IValidator ### def validate(self, value): if value is None and self.use_default: value = self.default return value def publish(self, value): if value is None and self.use_default: value = self.default return value def as_string(self, value): return unicode(self.publish(value)) ### annotations ### @classmethod def annotate_label(cls, label): """@see: feat.models.value.label""" cls._class_label = label @classmethod def annotate_desc(cls, desc): """@see: feat.models.value.desc""" cls._class_desc = desc @classmethod def annotate_value_type(cls, value_type): """@see: feat.models.value.value_type""" if value_type not in ValueTypes: raise ValueError(value_type) cls._class_value_type = value_type @classmethod def annotate_default(cls, default): """@see: feat.models.value.default""" cls._class_use_default = True cls._class_default = default class Binary(BaseValue): implements(IEncodingInfo) value_type(ValueTypes.binary) def __init__(self, mime_type=None, encoding=None): self._mime_type = mime_type self._encoding = encoding ### IEncodingInfo ### @property def mime_type(self): return self._mime_type @property def encoding(self): return self._encoding class InterfaceValue(BaseValue): _value_interface = None def __init__(self, value_interface=None): if type(self)._value_interface is None: self._value_interface = value_interface def validate(self, value): new_value = BaseValue.validate(self, value) if not self._value_interface.providedBy(value): raise ValueError(value) return new_value def publish(self, value): new_value = BaseValue.publish(self, value) if not self._value_interface.providedBy(value): raise ValueError("%r does not provide %r interface" % (value, self._value_interface)) return new_value class Response(InterfaceValue): """Definition of a model value.""" _value_interface = IResponse value_type(ValueTypes.model) class Model(InterfaceValue): """Definition of a model value.""" _value_interface = IModel value_type(ValueTypes.model) class Reference(InterfaceValue): """Definition of a model value.""" _value_interface = IReference value_type(ValueTypes.reference) class Struct(BaseValue): """Definition of a model value.""" _value_interface = ISnapshotable value_type(ValueTypes.struct) class Value(BaseValue): _class_options = None _class_options_only = False def __init__(self, *args, **kwargs): label = self._class_label desc = self._class_desc self._label = unicode(label) if label is not None else None self._desc = unicode(desc) if desc is not None else None self._value_type = self._class_value_type self._options_only = False self._options = [] if self._class_options is not None: for v, l in self._class_options: self._add_option(v, l) self._options_only = self._class_options_only self._use_default = self._class_use_default self._default = None if self._use_default: self._default = self._validate_default(self._class_default) if "default" in kwargs: if len(args) > 0: raise ValueError("If the default value is specified " "as a keyword, no argument are allowed") self._set_default(kwargs.pop("default")) else: if len(args) > 1: raise ValueError("Only default value is " "supported as argument") if len(args) > 0: self._set_default(args[0]) if kwargs: raise ValueError("Unsupported keyword arguments") ### IValueInfo ### @property def label(self): return self._label @property def desc(self): return self._desc @property def value_type(self): return self._value_type @property def use_default(self): return self._use_default @property def default(self): return self._default ### IValidator ### def validate(self, value): value = BaseValue.validate(self, value) if self._options_only and not self._has_option(value): raise ValueError("Value not allowed: %r" % (value, )) return value def publish(self, value): value = BaseValue.validate(self, value) if self._options_only and not self._has_option(value): raise ValueError("Value not allowed: %r" % (value, )) return value ### IValueOptions ### @property def is_restricted(self): return self._options_only def count_options(self): return len(self._options) def iter_options(self): return iter(self._options) def has_option(self, value): try: return self._has_option(self._validate_option(value)) except ValueError: return False def get_option(self, value): value = unicode(value) try: return next((o for o in self._options if o.value == value)) except StopIteration: return None ### protected ### def _validate_default(self, value): return self.validate(value) def _validate_option(self, value): return self.validate(value) def _has_option(self, value): try: next((o for o in self._options if o.value == value)) return True except StopIteration: return False def _set_default(self, default): self._default = self._validate_default(default) self._use_default = True def _add_option(self, value, label=None): # Disable options_only to be able to validate the value options_only = self._options_only self._options_only = False try: self._validate_option(value) option = ValueOption(value, label) self._options.append(option) finally: self._options_only = options_only ### annotations ### @classmethod def annotate_option(cls, value, is_default=False, label=None): """@see: feat.models.value.option""" if cls._class_options is None: cls._class_options = container.MroList("_mro_options") classImplements(cls, IValueOptions) if is_default: cls._class_default = value cls._class_use_default = True cls._class_options.append((value, label)) @classmethod def annotate_options_only(cls): """@see: feat.models.value.options_only""" cls._class_options_only = True class ValueOption(object): """Pair of value/label defining a possible option. @see: feat.models.interface.IValueOption""" implements(IValueOption) def __init__(self, value, label=None): self._value = value self._label = unicode(label) if label is not None else unicode(value) ### IValueOption ### @property def value(self): return self._value @property def label(self): return self._label def __eq__(self, other): if not IValueOption.providedBy(other): return False return (self._value == other.value and self._label == other.label) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self._value) ^ hash(self._label) class String(Value): """String value definition.""" value_type(ValueTypes.string) ### overridden ### def validate(self, value): """ Accepts: str, unicode Returns: unicode """ val = value if isinstance(val, str): #FIXME: unsafe decoding val = unicode(value) val = super(String, self).validate(val) if not isinstance(val, unicode): raise ValueError("Not a string: %r" % (value, )) return val def publish(self, value): """ Accepts: unicode, str Returns: unicode """ val = value if isinstance(val, str): #FIXME: unsafe decoding val = unicode(value) val = super(String, self).publish(val) if not isinstance(val, unicode): raise ValueError("Not a string: %r" % (value, )) return val class Float(Value): value_type(ValueTypes.number) def validate(self, value): """ Accepts: float, int, long, str, unicode Returns: float """ if isinstance(value, (str, unicode, int, long)): value = float(value) value = super(Float, self).validate(value) if not isinstance(value, (float)): raise ValueError("Not an float: %r" % (value, )) return value def publish(self, value): """ Accepts: float Returns: float """ value = super(Float, self).publish(value) if isinstance(value, int): value = float(value) return value class Integer(Value): """Definition of an basic integer value.""" value_type(ValueTypes.integer) ### overridden ### def validate(self, value): """ Accepts: int, long, str, unicode Returns: int, long """ if isinstance(value, (str, unicode, float)): value = int(value) value = super(Integer, self).validate(value) if not isinstance(value, (int, long)): raise ValueError("Not an integer: %r" % (value, )) return value def publish(self, value): """ Accepts: int, long Returns: int, long """ value = super(Integer, self).publish(value) if isinstance(value, float): value = int(value) if not isinstance(value, (int, long)): raise ValueError("Not an integer: %r" % (value, )) return value class Boolean(Value): """Definition of an basic integer value.""" value_type(ValueTypes.boolean) option(True, label="True") option(False, label="False") options_only() ### overridden ### def validate(self, value): """ Accepts: str, unicode, bool Returns: bool """ if isinstance(value, bool): return value if isinstance(value, (str, unicode)): if value.lower() == "true": value = True elif value.lower() == "false": value = False else: raise ValueError("Not a boolean: %r" % (value, )) value = super(Boolean, self).validate(value) if not isinstance(value, bool): raise ValueError("Not a boolean: %r" % (value, )) return value def publish(self, value): value = super(Boolean, self).publish(value) if not isinstance(value, bool): raise ValueError("Not a boolean: %r" % (value, )) return value class Enum(Value): """Definition of integer value with a fixed set of possible values taken from an enumeration.""" value_type(ValueTypes.string) options_only() implements(IValueOptions) def __init__(self, enum, *args, **kwargs): self._enum = enum Value.__init__(self, *args, **kwargs) for i in enum: self._add_option(i) ### IValidator ### def validate(self, value): if value is None and self._use_default: value = self._default if isinstance(value, (str, unicode, int)): if value in self._enum: return self._enum[value] if isinstance(value, int): if value in self._enum: return unicode(self._enum[value].name) raise ValueError(value) def publish(self, value): if value is None and self._use_default: value = self._default if isinstance(value, (str, unicode)): if value in self._enum: return unicode(value) if isinstance(value, int): if value in self._enum: return unicode(self._enum[value].name) raise ValueError(value) ### overridden ### def _validate_option(self, value): return unicode(self.validate(value).name) def _add_option(self, value, label=None): if isinstance(value, self._enum): value = unicode(value.name) return Value._add_option(self, value, label) class FixedValues(Value): ''' String value of one of defined options. Use: FixedValue(["option1", "option2", ...]) ''' value_type(ValueTypes.string) options_only() implements(IValueOptions) def __init__(self, values, *args, **kwargs): Value.__init__(self, *args, **kwargs) for v in values: self._add_option(v) class Structure(Value): implements(IValueList) value_type(ValueTypes.struct) _fields = container.MroList("_mro_fields") def validate(self, value): if not isinstance(value, dict): raise ValueError("Expected dictionary, got %r" % (value, )) fields = self.fields params = set(value.keys()) expected = set([p.name for p in fields]) required = set([p.name for p in fields if p.is_required]) missing = required - params if missing: raise MissingParameters("", params=missing) unknown = params - expected if unknown: raise UnknownParameters("", params=unknown) param_index = dict([(p.name, p) for p in fields]) validated = {} errors = {} for param_name, param_value in value.iteritems(): param_name = str(param_name) info = param_index[param_name].value_info try: valval = IValidator(info).validate(param_value) validated[param_name] = valval except ValueError, e: errors[param_name] = e if errors: raise InvalidParameters("", params=errors) for param in fields: if not param.is_required: info = param.value_info if param.name not in validated and info.use_default: validated[str(param.name)] = info.default return validated def publish(self, value): def getter(value, name): try: if isinstance(value, dict): return value[name] else: return getattr(value, name) except (KeyError, AttributeError) as e: raise ValueError(str(e)) result = dict() for field in self.fields: try: v = getter(value, field.name) result[field.name] = field.value_info.publish(v) except ValueError: if field.is_required: raise if field.value_info.use_default: result[field.name] = field.value_info.publish( field.value_info.default) return result ### IValueList ### @property def fields(self): inverted_result = [] already_added = set() for p in reversed(self._fields): if p.name not in already_added: inverted_result.append(p) already_added.add(p.name) return list(reversed(inverted_result)) ### annotations ### @classmethod def annotate_param(cls, name, value_info, is_required=True, label=None, desc=None): name = unicode(name) param = action.Param(name, value_info, is_required=is_required, label=label, desc=desc) cls._fields.append(param) field = action.param class MetaCollection(type(Value)): @staticmethod def new(name, allowed_types=[], min_size=None, max_size=None, is_ordered=True): cls = MetaCollection(name, (Collection, ), {}) for value_info in allowed_types: cls.annotate_allows(value_info) cls.annotate_is_ordered(is_ordered) if min_size is not None: cls.annotate_min_size(min_size) if max_size is not None: cls.annotate_max_size(max_size) return cls class Collection(Value): implements(IValueCollection) _class_allowed_types = container.MroList("_mro_allowed_types") _class_is_ordered = True _class_min_size = None _class_max_size = None value_type(ValueTypes.collection) ### IValueCollection ### @property def allowed_types(self): return list(self._class_allowed_types) @property def is_ordered(self): return self._class_is_ordered @property def min_size(self): return self._class_min_size @property def max_size(self): return self._class_max_size ### overridden ### def validate(self, value): return self._convert(value, "validate") def publish(self, value): return self._convert(value, "publish") ### annotations ### @classmethod def annotate_allows(cls, value_info): """@see: feat.models.value.allows""" value_info = _validate_value_info(value_info) cls._class_allowed_types.append(value_info) @classmethod def annotate_is_ordered(cls, flag): """@see: feat.models.value.is_ordered""" cls._class_is_ordered = _validate_flag(flag) @classmethod def annotate_min_size(cls, size): """@see: feat.models.value.min_size""" cls._class_min_size = _validate_size(size) @classmethod def annotate_max_size(cls, size): """@see: feat.models.value.max_size""" cls._class_max_size = _validate_size(size) ### private ### def _convert(self, value, method_name): if isinstance(value, (str, unicode)): raise ValueError(value) try: all_values = list(value) except TypeError: raise ValueError(value) result = [] if self._class_min_size is not None: if len(all_values) < self._class_min_size: raise ValueError(value) if self._class_max_size is not None: if len(all_values) > self._class_max_size: raise ValueError(value) allowed_types = list(self._class_allowed_types) for v in all_values: for allowed in allowed_types: try: result.append(getattr(allowed, method_name)(v)) break except (ValueError, InvalidParameters), e: continue else: raise ValueError(value) return result ### private ### def _validate_value_info(value_info): return IValueInfo(value_info) def _validate_size(size): return int(size) def _validate_flag(flag): return bool(flag)
gpl-2.0
6,844,360,681,269,452,000
26.606291
78
0.591011
false
4.179475
false
false
false
beiko-lab/gengis
bin/Lib/site-packages/numpy/ma/mrecords.py
1
28557
""":mod:`numpy.ma..mrecords` Defines the equivalent of :class:`numpy.recarrays` for masked arrays, where fields can be accessed as attributes. Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes and the masking of individual fields. :author: Pierre Gerard-Marchant """ #!!!: * We should make sure that no field is called '_mask','mask','_fieldmask', #!!!: or whatever restricted keywords. #!!!: An idea would be to no bother in the first place, and then rename the #!!!: invalid fields with a trailing underscore... #!!!: Maybe we could just overload the parser function ? __author__ = "Pierre GF Gerard-Marchant" import sys import numpy as np from numpy import bool_, dtype, \ ndarray, recarray, array as narray import numpy.core.numerictypes as ntypes from numpy.core.records import fromarrays as recfromarrays, \ fromrecords as recfromrecords _byteorderconv = np.core.records._byteorderconv _typestr = ntypes._typestr import numpy.ma as ma from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \ getdata, getmaskarray, filled _check_fill_value = ma.core._check_fill_value import warnings __all__ = ['MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', 'fromtextfile', 'addfield', ] reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] def _getformats(data): "Returns the formats of each array of arraylist as a comma-separated string." if hasattr(data, 'dtype'): return ",".join([desc[1] for desc in data.dtype.descr]) formats = '' for obj in data: obj = np.asarray(obj) formats += _typestr[obj.dtype.type] if issubclass(obj.dtype.type, ntypes.flexible): formats += `obj.itemsize` formats += ',' return formats[:-1] def _checknames(descr, names=None): """Checks that the field names of the descriptor ``descr`` are not some reserved keywords. If this is the case, a default 'f%i' is substituted. If the argument `names` is not None, updates the field names to valid names. """ ndescr = len(descr) default_names = ['f%i' % i for i in range(ndescr)] if names is None: new_names = default_names else: if isinstance(names, (tuple, list)): new_names = names elif isinstance(names, str): new_names = names.split(',') else: raise NameError("illegal input names %s" % `names`) nnames = len(new_names) if nnames < ndescr: new_names += default_names[nnames:] ndescr = [] for (n, d, t) in zip(new_names, default_names, descr.descr): if n in reserved_fields: if t[0] in reserved_fields: ndescr.append((d, t[1])) else: ndescr.append(t) else: ndescr.append((n, t[1])) return np.dtype(ndescr) def _get_fieldmask(self): mdescr = [(n, '|b1') for n in self.dtype.names] fdmask = np.empty(self.shape, dtype=mdescr) fdmask.flat = tuple([False] * len(mdescr)) return fdmask class MaskedRecords(MaskedArray, object): """ *IVariables*: _data : {recarray} Underlying data, as a record array. _mask : {boolean array} Mask of the records. A record is masked when all its fields are masked. _fieldmask : {boolean recarray} Record array of booleans, setting the mask of each individual field of each record. _fill_value : {record} Filling values for each field. """ #............................................ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, copy=False, **options): # self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, strides=strides, formats=formats, names=names, titles=titles, byteorder=byteorder, aligned=aligned,) # mdtype = ma.make_mask_descr(self.dtype) if mask is nomask or not np.size(mask): if not keep_mask: self._mask = tuple([False] * len(mdtype)) else: mask = np.array(mask, copy=copy) if mask.shape != self.shape: (nd, nm) = (self.size, mask.size) if nm == 1: mask = np.resize(mask, self.shape) elif nm == nd: mask = np.reshape(mask, self.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MAError(msg % (nd, nm)) copy = True if not keep_mask: self.__setmask__(mask) self._sharedmask = True else: if mask.dtype == mdtype: _mask = mask else: _mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) self._mask = _mask return self #...................................................... def __array_finalize__(self, obj): # Make sure we have a _fieldmask by default .. _mask = getattr(obj, '_mask', None) if _mask is None: objmask = getattr(obj, '_mask', nomask) _dtype = ndarray.__getattribute__(self, 'dtype') if objmask is nomask: _mask = ma.make_mask_none(self.shape, dtype=_dtype) else: mdescr = ma.make_mask_descr(_dtype) _mask = narray([tuple([m] * len(mdescr)) for m in objmask], dtype=mdescr).view(recarray) # Update some of the attributes _dict = self.__dict__ _dict.update(_mask=_mask) self._update_from(obj) if _dict['_baseclass'] == ndarray: _dict['_baseclass'] = recarray return def _getdata(self): "Returns the data as a recarray." return ndarray.view(self, recarray) _data = property(fget=_getdata) def _getfieldmask(self): "Alias to mask" return self._mask _fieldmask = property(fget=_getfieldmask) def __len__(self): "Returns the length" # We have more than one record if self.ndim: return len(self._data) # We have only one record: return the nb of fields return len(self.dtype) def __getattribute__(self, attr): try: return object.__getattribute__(self, attr) except AttributeError: # attr must be a fieldname pass fielddict = ndarray.__getattribute__(self, 'dtype').fields try: res = fielddict[attr][:2] except (TypeError, KeyError): raise AttributeError("record array has no attribute %s" % attr) # So far, so good... _localdict = ndarray.__getattribute__(self, '__dict__') _data = ndarray.view(self, _localdict['_baseclass']) obj = _data.getfield(*res) if obj.dtype.fields: raise NotImplementedError("MaskedRecords is currently limited to"\ "simple records...") # Get some special attributes # Reset the object's mask hasmasked = False _mask = _localdict.get('_mask', None) if _mask is not None: try: _mask = _mask[attr] except IndexError: # Couldn't find a mask: use the default (nomask) pass hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any() if (obj.shape or hasmasked): obj = obj.view(MaskedArray) obj._baseclass = ndarray obj._isfield = True obj._mask = _mask # Reset the field values _fill_value = _localdict.get('_fill_value', None) if _fill_value is not None: try: obj._fill_value = _fill_value[attr] except ValueError: obj._fill_value = None else: obj = obj.item() return obj def __setattr__(self, attr, val): "Sets the attribute attr to the value val." # Should we call __setmask__ first ? if attr in ['mask', 'fieldmask']: self.__setmask__(val) return # Create a shortcut (so that we don't have to call getattr all the time) _localdict = object.__getattribute__(self, '__dict__') # Check whether we're creating a new field newattr = attr not in _localdict try: # Is attr a generic attribute ? ret = object.__setattr__(self, attr, val) except: # Not a generic attribute: exit if it's not a valid field fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} optinfo = ndarray.__getattribute__(self, '_optinfo') or {} if not (attr in fielddict or attr in optinfo): exctype, value = sys.exc_info()[:2] raise exctype, value else: # Get the list of names ...... fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} # Check the attribute if attr not in fielddict: return ret if newattr: # We just added this one try: # or this setattr worked on an internal # attribute. object.__delattr__(self, attr) except: return ret # Let's try to set the field try: res = fielddict[attr][:2] except (TypeError, KeyError): raise AttributeError("record array has no attribute %s" % attr) # if val is masked: _fill_value = _localdict['_fill_value'] if _fill_value is not None: dval = _localdict['_fill_value'][attr] else: dval = val mval = True else: dval = filled(val) mval = getmaskarray(val) obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) _localdict['_mask'].__setitem__(attr, mval) return obj def __getitem__(self, indx): """Returns all the fields sharing the same fieldname base. The fieldname base is either `_data` or `_mask`.""" _localdict = self.__dict__ _mask = ndarray.__getattribute__(self, '_mask') _data = ndarray.view(self, _localdict['_baseclass']) # We want a field ........ if isinstance(indx, basestring): #!!!: Make sure _sharedmask is True to propagate back to _fieldmask #!!!: Don't use _set_mask, there are some copies being made... #!!!: ...that break propagation #!!!: Don't force the mask to nomask, that wrecks easy masking obj = _data[indx].view(MaskedArray) obj._mask = _mask[indx] obj._sharedmask = True fval = _localdict['_fill_value'] if fval is not None: obj._fill_value = fval[indx] # Force to masked if the mask is True if not obj.ndim and obj._mask: return masked return obj # We want some elements .. # First, the data ........ obj = np.array(_data[indx], copy=False).view(mrecarray) obj._mask = np.array(_mask[indx], copy=False).view(recarray) return obj #.... def __setitem__(self, indx, value): "Sets the given record to value." MaskedArray.__setitem__(self, indx, value) if isinstance(indx, basestring): self._mask[indx] = ma.getmaskarray(value) def __str__(self): "Calculates the string representation." if self.size > 1: mstr = ["(%s)" % ",".join([str(i) for i in s]) for s in zip(*[getattr(self, f) for f in self.dtype.names])] return "[%s]" % ", ".join(mstr) else: mstr = ["%s" % ",".join([str(i) for i in s]) for s in zip([getattr(self, f) for f in self.dtype.names])] return "(%s)" % ", ".join(mstr) # def __repr__(self): "Calculates the repr representation." _names = self.dtype.names fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] reprstr.insert(0, 'masked_records(') reprstr.extend([fmt % (' fill_value', self.fill_value), ' )']) return str("\n".join(reprstr)) # #...................................................... def view(self, dtype=None, type=None): """Returns a view of the mrecarray.""" # OK, basic copy-paste from MaskedArray.view... if dtype is None: if type is None: output = ndarray.view(self) else: output = ndarray.view(self, type) # Here again... elif type is None: try: if issubclass(dtype, ndarray): output = ndarray.view(self, dtype) dtype = None else: output = ndarray.view(self, dtype) # OK, there's the change except TypeError: dtype = np.dtype(dtype) # we need to revert to MaskedArray, but keeping the possibility # ...of subclasses (eg, TimeSeriesRecords), so we'll force a type # ...set to the first parent if dtype.fields is None: basetype = self.__class__.__bases__[0] output = self.__array__().view(dtype, basetype) output._update_from(self) else: output = ndarray.view(self, dtype) output._fill_value = None else: output = ndarray.view(self, dtype, type) # Update the mask, just like in MaskedArray.view if (getattr(output, '_mask', nomask) is not nomask): mdtype = ma.make_mask_descr(output.dtype) output._mask = self._mask.view(mdtype, ndarray) output._mask.shape = output.shape return output def harden_mask(self): "Forces the mask to hard" self._hardmask = True def soften_mask(self): "Forces the mask to soft" self._hardmask = False def copy(self): """Returns a copy of the masked record.""" _localdict = self.__dict__ copied = self._data.copy().view(type(self)) copied._mask = self._mask.copy() return copied def tolist(self, fill_value=None): """Copy the data portion of the array to a hierarchical python list and returns that list. Data items are converted to the nearest compatible Python type. Masked values are converted to fill_value. If fill_value is None, the corresponding entries in the output list will be ``None``. """ if fill_value is not None: return self.filled(fill_value).tolist() result = narray(self.filled().tolist(), dtype=object) mask = narray(self._mask.tolist()) result[mask] = None return result.tolist() #-------------------------------------------- # Pickling def __getstate__(self): """Return the internal state of the masked array, for pickling purposes. """ state = (1, self.shape, self.dtype, self.flags.fnc, self._data.tostring(), self._mask.tostring(), self._fill_value, ) return state # def __setstate__(self, state): """Restore the internal state of the masked array, for pickling purposes. ``state`` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask. """ (ver, shp, typ, isf, raw, msk, flv) = state ndarray.__setstate__(self, (shp, typ, isf, raw)) mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) self.fill_value = flv # def __reduce__(self): """Return a 3-tuple for pickling a MaskedArray. """ return (_mrreconstruct, (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) def _mrreconstruct(subtype, baseclass, baseshape, basetype,): """Internal function that builds a new MaskedArray from the information stored in a pickle. """ _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) # _data._mask = ndarray.__new__(ndarray, baseshape, 'b1') # return _data _mask = ndarray.__new__(ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) mrecarray = MaskedRecords #####--------------------------------------------------------------------------- #---- --- Constructors --- #####--------------------------------------------------------------------------- def fromarrays(arraylist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None): """Creates a mrecarray from a (flat) list of masked arrays. Parameters ---------- arraylist : sequence A list of (masked) arrays. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None, integer}, optional Number of records. If None, shape is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing. """ datalist = [getdata(x) for x in arraylist] masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] _array = recfromarrays(datalist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) _array._mask.flat = zip(*masklist) if fill_value is not None: _array.fill_value = fill_value return _array #.............................................................................. def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, fill_value=None, mask=nomask): """Creates a MaskedRecords from a list of records. Parameters ---------- reclist : sequence A list of records. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None,int}, optional Number of records. If None, ``shape`` is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. mask : {nomask, sequence}, optional. External mask to apply on the data. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing. """ # Grab the initial _fieldmask, if needed: _mask = getattr(reclist, '_mask', None) # Get the list of records..... try: nfields = len(reclist[0]) except TypeError: nfields = len(reclist[0].dtype) if isinstance(reclist, ndarray): # Make sure we don't have some hidden mask if isinstance(reclist, MaskedArray): reclist = reclist.filled().view(ndarray) # Grab the initial dtype, just in case if dtype is None: dtype = reclist.dtype reclist = reclist.tolist() mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) # Set the fill_value if needed if fill_value is not None: mrec.fill_value = fill_value # Now, let's deal w/ the mask if mask is not nomask: mask = np.array(mask, copy=False) maskrecordlength = len(mask.dtype) if maskrecordlength: mrec._mask.flat = mask elif len(mask.shape) == 2: mrec._mask.flat = [tuple(m) for m in mask] else: mrec.__setmask__(mask) if _mask is not None: mrec._mask[:] = _mask return mrec def _guessvartypes(arr): """Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise conversion. Returns a list of dtypes. The array is first converted to ndarray. If the array is 2D, the test is performed on the first line. An exception is raised if the file is 3D or more. """ vartypes = [] arr = np.asarray(arr) if len(arr.shape) == 2 : arr = arr[0] elif len(arr.shape) > 2: raise ValueError("The array should be 2D at most!") # Start the conversion loop ....... for f in arr: try: int(f) except ValueError: try: float(f) except ValueError: try: val = complex(f) except ValueError: vartypes.append(arr.dtype) else: vartypes.append(np.dtype(complex)) else: vartypes.append(np.dtype(float)) else: vartypes.append(np.dtype(int)) return vartypes def openfile(fname): "Opens the file handle of file `fname`" # A file handle ................... if hasattr(fname, 'readline'): return fname # Try to open the file and guess its type try: f = open(fname) except IOError: raise IOError("No such file: '%s'" % fname) if f.readline()[:2] != "\\x": f.seek(0, 0) return f f.close() raise NotImplementedError("Wow, binary file") def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', varnames=None, vartypes=None): """Creates a mrecarray from data stored in the file `filename`. Parameters ---------- filename : {file name/handle} Handle of an opened file. delimitor : {None, string}, optional Alphanumeric character used to separate columns in the file. If None, any (group of) white spacestring(s) will be used. commentchar : {'#', string}, optional Alphanumeric character used to mark the start of a comment. missingchar : {'', string}, optional String indicating missing data, and used to create the masks. varnames : {None, sequence}, optional Sequence of the variable names. If None, a list will be created from the first non empty line of the file. vartypes : {None, sequence}, optional Sequence of the variables dtypes. If None, it will be estimated from the first non-commented line. Ultra simple: the varnames are in the header, one line""" # Try to open the file ...................... f = openfile(fname) # Get the first non-empty line as the varnames while True: line = f.readline() firstline = line[:line.find(commentchar)].strip() _varnames = firstline.split(delimitor) if len(_varnames) > 1: break if varnames is None: varnames = _varnames # Get the data .............................. _variables = masked_array([line.strip().split(delimitor) for line in f if line[0] != commentchar and len(line) > 1]) (_, nfields) = _variables.shape f.close() # Try to guess the dtype .................... if vartypes is None: vartypes = _guessvartypes(_variables[0]) else: vartypes = [np.dtype(v) for v in vartypes] if len(vartypes) != nfields: msg = "Attempting to %i dtypes for %i fields!" msg += " Reverting to default." warnings.warn(msg % (len(vartypes), nfields)) vartypes = _guessvartypes(_variables[0]) # Construct the descriptor .................. mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] mfillv = [ma.default_fill_value(f) for f in vartypes] # Get the data and the mask ................. # We just need a list of masked_arrays. It's easier to create it like that: _mask = (_variables.T == missingchar) _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] return fromarrays(_datalist, dtype=mdescr) #.................................................................... def addfield(mrecord, newfield, newfieldname=None): """Adds a new field to the masked record array, using `newfield` as data and `newfieldname` as name. If `newfieldname` is None, the new field name is set to 'fi', where `i` is the number of existing fields. """ _data = mrecord._data _mask = mrecord._mask if newfieldname is None or newfieldname in reserved_fields: newfieldname = 'f%i' % len(_data.dtype) newfield = ma.array(newfield) # Get the new data ............ # Create a new empty recarray newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) newdata = recarray(_data.shape, newdtype) # Add the exisintg field [newdata.setfield(_data.getfield(*f), *f) for f in _data.dtype.fields.values()] # Add the new field newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) newdata = newdata.view(MaskedRecords) # Get the new mask ............. # Create a new empty recarray newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) newmask = recarray(_data.shape, newmdtype) # Add the old masks [newmask.setfield(_mask.getfield(*f), *f) for f in _mask.dtype.fields.values()] # Add the mask of the new field newmask.setfield(getmaskarray(newfield), *newmask.dtype.fields[newfieldname]) newdata._mask = newmask return newdata
gpl-3.0
-4,077,217,941,820,372,500
37.226648
91
0.536366
false
4.228787
false
false
false
schleichdi2/OPENNFR-6.1-CORE
opennfr-openembedded-core/meta/lib/oeqa/utils/qemurunner.py
1
24225
# Copyright (C) 2013 Intel Corporation # # Released under the MIT license (see COPYING.MIT) # This module provides a class for starting qemu images using runqemu. # It's used by testimage.bbclass. import subprocess import os import sys import time import signal import re import socket import select import errno import string import threading import codecs from oeqa.utils.dump import HostDumper import logging logger = logging.getLogger("BitBake.QemuRunner") logger.addHandler(logging.StreamHandler()) # Get Unicode non printable control chars control_range = list(range(0,32))+list(range(127,160)) control_chars = [chr(x) for x in control_range if chr(x) not in string.printable] re_control_char = re.compile('[%s]' % re.escape("".join(control_chars))) class QemuRunner: def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds, use_kvm): # Popen object for runqemu self.runqemu = None # pid of the qemu process that runqemu will start self.qemupid = None # target ip - from the command line or runqemu output self.ip = None # host ip - where qemu is running self.server_ip = None # target ip netmask self.netmask = None self.machine = machine self.rootfs = rootfs self.display = display self.tmpdir = tmpdir self.deploy_dir_image = deploy_dir_image self.logfile = logfile self.boottime = boottime self.logged = False self.thread = None self.use_kvm = use_kvm self.runqemutime = 60 self.host_dumper = HostDumper(dump_host_cmds, dump_dir) def create_socket(self): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setblocking(0) sock.bind(("127.0.0.1",0)) sock.listen(2) port = sock.getsockname()[1] logger.info("Created listening socket for qemu serial console on: 127.0.0.1:%s" % port) return (sock, port) except socket.error: sock.close() raise def log(self, msg): if self.logfile: # It is needed to sanitize the data received from qemu # because is possible to have control characters msg = msg.decode("utf-8", errors='ignore') msg = re_control_char.sub('', msg) with codecs.open(self.logfile, "a", encoding="utf-8") as f: f.write("%s" % msg) def getOutput(self, o): import fcntl fl = fcntl.fcntl(o, fcntl.F_GETFL) fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK) return os.read(o.fileno(), 1000000).decode("utf-8") def handleSIGCHLD(self, signum, frame): if self.runqemu and self.runqemu.poll(): if self.runqemu.returncode: logger.info('runqemu exited with code %d' % self.runqemu.returncode) logger.info("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout)) self.stop() self._dump_host() raise SystemExit def start(self, qemuparams = None, get_ip = True, extra_bootparams = None, runqemuparams='', launch_cmd=None, discard_writes=True): if self.display: os.environ["DISPLAY"] = self.display # Set this flag so that Qemu doesn't do any grabs as SDL grabs # interact badly with screensavers. os.environ["QEMU_DONT_GRAB"] = "1" if not os.path.exists(self.rootfs): logger.error("Invalid rootfs %s" % self.rootfs) return False if not os.path.exists(self.tmpdir): logger.error("Invalid TMPDIR path %s" % self.tmpdir) return False else: os.environ["OE_TMPDIR"] = self.tmpdir if not os.path.exists(self.deploy_dir_image): logger.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image) return False else: os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image if not launch_cmd: launch_cmd = 'runqemu %s %s ' % ('snapshot' if discard_writes else '', runqemuparams) if self.use_kvm: logger.info('Using kvm for runqemu') launch_cmd += ' kvm' else: logger.info('Not using kvm for runqemu') if not self.display: launch_cmd += ' nographic' launch_cmd += ' %s %s' % (self.machine, self.rootfs) return self.launch(launch_cmd, qemuparams=qemuparams, get_ip=get_ip, extra_bootparams=extra_bootparams) def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None): try: threadsock, threadport = self.create_socket() self.server_socket, self.serverport = self.create_socket() except socket.error as msg: logger.error("Failed to create listening socket: %s" % msg[1]) return False bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1' if extra_bootparams: bootparams = bootparams + ' ' + extra_bootparams self.qemuparams = 'bootparams="{0}" qemuparams="-serial tcp:127.0.0.1:{1}"'.format(bootparams, threadport) if qemuparams: self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams) self.origchldhandler = signal.getsignal(signal.SIGCHLD) signal.signal(signal.SIGCHLD, self.handleSIGCHLD) logger.info('launchcmd=%s'%(launch_cmd)) # FIXME: We pass in stdin=subprocess.PIPE here to work around stty # blocking at the end of the runqemu script when using this within # oe-selftest (this makes stty error out immediately). There ought # to be a proper fix but this will suffice for now. self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp) output = self.runqemu.stdout # # We need the preexec_fn above so that all runqemu processes can easily be killed # (by killing their process group). This presents a problem if this controlling # process itself is killed however since those processes don't notice the death # of the parent and merrily continue on. # # Rather than hack runqemu to deal with this, we add something here instead. # Basically we fork off another process which holds an open pipe to the parent # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills # the process group. This is like pctrl's PDEATHSIG but for a process group # rather than a single process. # r, w = os.pipe() self.monitorpid = os.fork() if self.monitorpid: os.close(r) self.monitorpipe = os.fdopen(w, "w") else: # child process os.setpgrp() os.close(w) r = os.fdopen(r) x = r.read() os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) sys.exit(0) logger.info("runqemu started, pid is %s" % self.runqemu.pid) logger.info("waiting at most %s seconds for qemu pid" % self.runqemutime) endtime = time.time() + self.runqemutime while not self.is_alive() and time.time() < endtime: if self.runqemu.poll(): if self.runqemu.returncode: # No point waiting any longer logger.info('runqemu exited with code %d' % self.runqemu.returncode) self._dump_host() self.stop() logger.info("Output from runqemu:\n%s" % self.getOutput(output)) return False time.sleep(1) out = self.getOutput(output) netconf = False # network configuration is not required by default if self.is_alive(): logger.info("qemu started - qemu procces pid is %s" % self.qemupid) if get_ip: cmdline = '' with open('/proc/%s/cmdline' % self.qemupid) as p: cmdline = p.read() # It is needed to sanitize the data received # because is possible to have control characters cmdline = re_control_char.sub('', cmdline) try: ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) self.ip = ips[0] self.server_ip = ips[1] logger.info("qemu cmdline used:\n{}".format(cmdline)) except (IndexError, ValueError): # Try to get network configuration from runqemu output match = re.match('.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*', out, re.MULTILINE|re.DOTALL) if match: self.ip, self.server_ip, self.netmask = match.groups() # network configuration is required as we couldn't get it # from the runqemu command line, so qemu doesn't run kernel # and guest networking is not configured netconf = True else: logger.error("Couldn't get ip from qemu command line and runqemu output! " "Here is the qemu command line used:\n%s\n" "and output from runqemu:\n%s" % (cmdline, out)) self._dump_host() self.stop() return False logger.info("Target IP: %s" % self.ip) logger.info("Server IP: %s" % self.server_ip) self.thread = LoggingThread(self.log, threadsock, logger) self.thread.start() if not self.thread.connection_established.wait(self.boottime): logger.error("Didn't receive a console connection from qemu. " "Here is the qemu command line used:\n%s\nand " "output from runqemu:\n%s" % (cmdline, out)) self.stop_thread() return False logger.info("Output from runqemu:\n%s", out) logger.info("Waiting at most %d seconds for login banner" % self.boottime) endtime = time.time() + self.boottime socklist = [self.server_socket] reachedlogin = False stopread = False qemusock = None bootlog = '' data = b'' while time.time() < endtime and not stopread: try: sread, swrite, serror = select.select(socklist, [], [], 5) except InterruptedError: continue for sock in sread: if sock is self.server_socket: qemusock, addr = self.server_socket.accept() qemusock.setblocking(0) socklist.append(qemusock) socklist.remove(self.server_socket) logger.info("Connection from %s:%s" % addr) else: data = data + sock.recv(1024) if data: try: data = data.decode("utf-8", errors="surrogateescape") bootlog += data data = b'' if re.search(".* login:", bootlog): self.server_socket = qemusock stopread = True reachedlogin = True logger.info("Reached login banner") except UnicodeDecodeError: continue else: socklist.remove(sock) sock.close() stopread = True if not reachedlogin: logger.info("Target didn't reached login boot in %d seconds" % self.boottime) lines = "\n".join(bootlog.splitlines()[-25:]) logger.info("Last 25 lines of text:\n%s" % lines) logger.info("Check full boot log: %s" % self.logfile) self._dump_host() self.stop() return False # If we are not able to login the tests can continue try: (status, output) = self.run_serial("root\n", raw=True) if re.search("root@[a-zA-Z0-9\-]+:~#", output): self.logged = True logger.info("Logged as root in serial console") if netconf: # configure guest networking cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask) output = self.run_serial(cmd, raw=True)[1] if re.search("root@[a-zA-Z0-9\-]+:~#", output): logger.info("configured ip address %s", self.ip) else: logger.info("Couldn't configure guest networking") else: logger.info("Couldn't login into serial console" " as root using blank password") except: logger.info("Serial console failed while trying to login") else: logger.info("Qemu pid didn't appeared in %s seconds" % self.runqemutime) self._dump_host() self.stop() logger.info("Output from runqemu:\n%s" % self.getOutput(output)) return False return self.is_alive() def stop(self): self.stop_thread() self.stop_qemu_system() if hasattr(self, "origchldhandler"): signal.signal(signal.SIGCHLD, self.origchldhandler) if self.runqemu: if hasattr(self, "monitorpid"): os.kill(self.monitorpid, signal.SIGKILL) logger.info("Sending SIGTERM to runqemu") try: os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) except OSError as e: if e.errno != errno.ESRCH: raise endtime = time.time() + self.runqemutime while self.runqemu.poll() is None and time.time() < endtime: time.sleep(1) if self.runqemu.poll() is None: logger.info("Sending SIGKILL to runqemu") os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL) self.runqemu = None if hasattr(self, 'server_socket') and self.server_socket: self.server_socket.close() self.server_socket = None self.qemupid = None self.ip = None def stop_qemu_system(self): if self.qemupid: try: # qemu-system behaves well and a SIGTERM is enough os.kill(self.qemupid, signal.SIGTERM) except ProcessLookupError as e: logger.warn('qemu-system ended unexpectedly') def stop_thread(self): if self.thread and self.thread.is_alive(): self.thread.stop() self.thread.join() def restart(self, qemuparams = None): logger.info("Restarting qemu process") if self.runqemu.poll() is None: self.stop() if self.start(qemuparams): return True return False def is_alive(self): if not self.runqemu: return False qemu_child = self.find_child(str(self.runqemu.pid)) if qemu_child: self.qemupid = qemu_child[0] if os.path.exists("/proc/" + str(self.qemupid)): return True return False def find_child(self,parent_pid): # # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd] # ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode("utf-8").split('\n') nfields = len(processes[0].split()) - 1 pids = {} commands = {} for row in processes[1:]: data = row.split(None, nfields) if len(data) != 3: continue if data[1] not in pids: pids[data[1]] = [] pids[data[1]].append(data[0]) commands[data[0]] = data[2] if parent_pid not in pids: return [] parents = [] newparents = pids[parent_pid] while newparents: next = [] for p in newparents: if p in pids: for n in pids[p]: if n not in parents and n not in next: next.append(n) if p not in parents: parents.append(p) newparents = next #print("Children matching %s:" % str(parents)) for p in parents: # Need to be careful here since runqemu runs "ldd qemu-system-xxxx" # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx" basecmd = commands[p].split()[0] basecmd = os.path.basename(basecmd) if "qemu-system" in basecmd and "-serial tcp" in commands[p]: return [int(p),commands[p]] def run_serial(self, command, raw=False, timeout=5): # We assume target system have echo to get command status if not raw: command = "%s; echo $?\n" % command data = '' status = 0 self.server_socket.sendall(command.encode('utf-8')) start = time.time() end = start + timeout while True: now = time.time() if now >= end: data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout break try: sread, _, _ = select.select([self.server_socket],[],[], end - now) except InterruptedError: continue if sread: answer = self.server_socket.recv(1024) if answer: data += answer.decode('utf-8') # Search the prompt to stop if re.search("[a-zA-Z0-9]+@[a-zA-Z0-9\-]+:~#", data): break else: raise Exception("No data on serial console socket") if data: if raw: status = 1 else: # Remove first line (command line) and last line (prompt) data = data[data.find('$?\r\n')+4:data.rfind('\r\n')] index = data.rfind('\r\n') if index == -1: status_cmd = data data = "" else: status_cmd = data[index+2:] data = data[:index] if (status_cmd == "0"): status = 1 return (status, str(data)) def _dump_host(self): self.host_dumper.create_dir("qemu") logger.warn("Qemu ended unexpectedly, dump data from host" " is in %s" % self.host_dumper.dump_dir) self.host_dumper.dump_host() # This class is for reading data from a socket and passing it to logfunc # to be processed. It's completely event driven and has a straightforward # event loop. The mechanism for stopping the thread is a simple pipe which # will wake up the poll and allow for tearing everything down. class LoggingThread(threading.Thread): def __init__(self, logfunc, sock, logger): self.connection_established = threading.Event() self.serversock = sock self.logfunc = logfunc self.logger = logger self.readsock = None self.running = False self.errorevents = select.POLLERR | select.POLLHUP | select.POLLNVAL self.readevents = select.POLLIN | select.POLLPRI threading.Thread.__init__(self, target=self.threadtarget) def threadtarget(self): try: self.eventloop() finally: self.teardown() def run(self): self.logger.info("Starting logging thread") self.readpipe, self.writepipe = os.pipe() threading.Thread.run(self) def stop(self): self.logger.info("Stopping logging thread") if self.running: os.write(self.writepipe, bytes("stop", "utf-8")) def teardown(self): self.logger.info("Tearing down logging thread") self.close_socket(self.serversock) if self.readsock is not None: self.close_socket(self.readsock) self.close_ignore_error(self.readpipe) self.close_ignore_error(self.writepipe) self.running = False def eventloop(self): poll = select.poll() event_read_mask = self.errorevents | self.readevents poll.register(self.serversock.fileno()) poll.register(self.readpipe, event_read_mask) breakout = False self.running = True self.logger.info("Starting thread event loop") while not breakout: events = poll.poll() for event in events: # An error occurred, bail out if event[1] & self.errorevents: raise Exception(self.stringify_event(event[1])) # Event to stop the thread if self.readpipe == event[0]: self.logger.info("Stop event received") breakout = True break # A connection request was received elif self.serversock.fileno() == event[0]: self.logger.info("Connection request received") self.readsock, _ = self.serversock.accept() self.readsock.setblocking(0) poll.unregister(self.serversock.fileno()) poll.register(self.readsock.fileno(), event_read_mask) self.logger.info("Setting connection established event") self.connection_established.set() # Actual data to be logged elif self.readsock.fileno() == event[0]: data = self.recv(1024) self.logfunc(data) # Since the socket is non-blocking make sure to honor EAGAIN # and EWOULDBLOCK. def recv(self, count): try: data = self.readsock.recv(count) except socket.error as e: if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK: return '' else: raise if data is None: raise Exception("No data on read ready socket") elif not data: # This actually means an orderly shutdown # happened. But for this code it counts as an # error since the connection shouldn't go away # until qemu exits. raise Exception("Console connection closed unexpectedly") return data def stringify_event(self, event): val = '' if select.POLLERR == event: val = 'POLLER' elif select.POLLHUP == event: val = 'POLLHUP' elif select.POLLNVAL == event: val = 'POLLNVAL' return val def close_socket(self, sock): sock.shutdown(socket.SHUT_RDWR) sock.close() def close_ignore_error(self, fd): try: os.close(fd) except OSError: pass
gpl-2.0
2,719,740,783,573,503,000
39.107616
159
0.533375
false
4.175284
false
false
false
Orpheus11/nile
nile/common/lockutils.py
1
3733
import threading import weakref import contextlib import logging import fasteners import os LOG = logging.getLogger(__name__) class Semaphores(object): def __init__(self): self._semaphores = weakref.WeakValueDictionary() self._lock = threading.Lock() def get(self, name): with self._lock: try: return self._semaphores[name] except KeyError: sem = threading.Semaphore() self._semaphores[name] = sem return sem def __len__(self): return len(self._semaphores) _semaphores = Semaphores() InterProcessLock = fasteners.InterProcessLock ReaderWriterLock = fasteners.ReaderWriterLock def internal_lock(name, semaphores=None): if semaphores is None: semaphores = _semaphores return semaphores.get(name) def external_lock(name, lock_file_prefix=None, lock_path=None): lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) return InterProcessLock(lock_file_path) def _get_lock_path(name, lock_file_prefix, lock_path=None): name = name.replace(os.sep, '_') if lock_file_prefix: sep = '' if lock_file_prefix.endswith('-') else '-' name = '%s%s%s' % (lock_file_prefix, sep, name) local_lock_path = lock_path if not local_lock_path: # raise cfg.RequiredOptError('lock_path') raise return os.path.join(local_lock_path, name) @contextlib.contextmanager def lock(name, lock_file_prefix=None, external=False, lock_path=None, do_log=True, semaphores=None, delay=0.01): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. :param lock_path: The path in which to store external lock files. For external locking to work properly, this must be the same for all references to the lock. :param do_log: Whether to log acquire/release messages. This is primarily intended to reduce log message duplication when `lock` is used from the `synchronized` decorator. :param semaphores: Container that provides semaphores to use when locking. This ensures that threads inside the same application can not collide, due to the fact that external process locks are unaware of a processes active threads. :param delay: Delay between acquisition attempts (in seconds). .. versionchanged:: 0.2 Added *do_log* optional parameter. .. versionchanged:: 0.3 Added *delay* and *semaphores* optional parameters. """ int_lock = internal_lock(name, semaphores=semaphores) with int_lock: if do_log: LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name}) try: if external: ext_lock = external_lock(name, lock_file_prefix, lock_path) ext_lock.acquire(delay=delay) try: yield ext_lock finally: ext_lock.release() else: yield int_lock finally: if do_log: LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name})
apache-2.0
-4,795,306,328,737,834,000
33.247706
78
0.646397
false
4.013978
false
false
false
Fokko/incubator-airflow
tests/test_utils/mock_operators.py
1
1355
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import NamedTuple from airflow.models.baseoperator import BaseOperator from airflow.utils.decorators import apply_defaults # Namedtuple for testing purposes class MockNamedTuple(NamedTuple): var1: str var2: str class MockOperator(BaseOperator): """Operator for testing purposes.""" template_fields = ("arg1", "arg2") @apply_defaults def __init__(self, arg1: str = "", arg2: str = "", **kwargs): super().__init__(**kwargs) self.arg1 = arg1 self.arg2 = arg2 def execute(self, context): pass
apache-2.0
3,725,370,474,415,175,700
31.261905
65
0.723985
false
4.143731
false
false
false
Greymerk/python-rpg
src/world/terrain/chunkmanager.py
1
1344
from random import choice from mapcache import MapCache from chunk import Chunk class ChunkManager: def __init__(self, world): self.world = world self.chunkCache = [] self.mapCache = MapCache(self, self.world.seed) self.maxCacheSize = 64 def getChunk(self, x, y): chunkX = int(x) >> 4 chunkY = int(y) >> 4 for c in self.chunkCache: if c.getPos() == (chunkX, chunkY): return c toLoad = Chunk((chunkX, chunkY), self.world.getSeed(), self.world.mobManager, self.mapCache) self.chunkCache.append(toLoad) if len(self.chunkCache) > self.maxCacheSize: toUnload = self.chunkCache.popleft() toUnload.unload() return toLoad def getMap(self, x, y): return self.mapCache.get(x, y) def getTile(self, pos): x = int(pos[0]) y = int(pos[1]) c = self.getChunk(x, y) return c.getTile(x % Chunk.size, y % Chunk.size) def isLoaded(self, x, y): for c in self.chunkCache: if c.pos is (x, y): return True return False def setTile(self, (x, y), id): c = self.getChunk(x, y) c.setTile((x, y), id) def saveChunks(self): for c in self.chunkCache: c.unload() def getRandomChunk(self): return choice(self.chunkCache) def cull(self, center, dist): for c in self.chunkCache: if c.getDistToChunk(center) > dist: c.unload() self.chunkCache.remove(c)
gpl-3.0
-1,728,710,524,475,288,800
19.676923
94
0.65253
false
2.688
false
false
false
WaveBlocks/WaveBlocks
src/WaveBlocks/MatrixPotential1S.py
1
13237
"""The WaveBlocks Project This file contains code for the representation of potentials for a single component. These potential are of course scalar ones. @author: R. Bourquin @copyright: Copyright (C) 2010, 2011 R. Bourquin @license: Modified BSD License """ import sympy import numpy from MatrixPotential import MatrixPotential class MatrixPotential1S(MatrixPotential): r""" This class represents a scalar potential :math:`V\left(x\right)`. The potential is given as an analytical :math:`1 \times 1` matrix expression. Some symbolic calculations with the potential are supported. For example calculation of eigenvalues and exponentials and numerical evaluation. Further, there are methods for splitting the potential into a Taylor expansion and for basis transformations between canonical and eigenbasis. """ def __init__(self, expression, variables): r""" Create a new ``MatrixPotential1S`` instance for a given potential matrix :math:`V\left(x\right)`. :param expression: An expression representing the potential. """ #: The variable :math:`x` that represents position space. self.x = variables[0] #: The matrix of the potential :math:`V\left(x\right)`. self.potential = expression # Unpack single matrix entry self.potential = self.potential[0,0] self.exponential = None self.number_components = 1 # prepare the function in every potential matrix cell for numerical evaluation self.potential_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.potential, "numpy")) # Symbolic and numerical eigenvalues and eigenvectors self.eigenvalues_s = None self.eigenvalues_n = None self.eigenvectors_s = None self.eigenvectors_n = None self.taylor_eigen_s = None self.taylor_eigen_n = None self.remainder_eigen_s = None self.remainder_eigen_n = None def __str__(self): r""" Put the number of components and the analytical expression (the matrix) into a printable string. """ return """Scalar potential given by the expression: V(x) = \n""" + str(self.potential) def get_number_components(self): r""" :return: The number :math:`N` of components the potential supports. In the one dimensional case, it's just 1. """ return 1 def evaluate_at(self, nodes, component=0, as_matrix=False): r""" Evaluate the potential matrix elementwise at some given grid nodes :math:`\gamma`. :param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at. :param component: The component :math:`V_{i,j}` that gets evaluated or 'None' to evaluate all. :param as_matrix: Dummy parameter which has no effect here. :return: A list with the single entry evaluated at the nodes. """ return tuple([ numpy.array(self.potential_n(nodes), dtype=numpy.floating) ]) def calculate_eigenvalues(self): r""" Calculate the eigenvalue :math:`\lambda_0\left(x\right)` of the potential :math:`V\left(x\right)`. In the scalar case this is just the matrix entry :math:`V_{0,0}`. .. note:: This function is idempotent and the eigenvalues are memoized for later reuse. """ if self.eigenvalues_s is None: self.eigenvalues_s = self.potential self.eigenvalues_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.potential, "numpy")) def evaluate_eigenvalues_at(self, nodes, component=None, as_matrix=False): r""" Evaluate the eigenvalue :math:`\lambda_0\left(x\right)` at some grid nodes :math:`\gamma`. :param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvalue at. :param diagonal_component: Dummy parameter that has no effect here. :param as_matrix: Dummy parameter which has no effect here. :return: A list with the single eigenvalue evaluated at the nodes. """ self.calculate_eigenvalues() return tuple([ numpy.array(self.eigenvalues_n(nodes)) ]) def calculate_eigenvectors(self): r""" Calculate the eigenvector :math:`nu_0\left(x\right)` of the potential :math:`V\left(x\right)`. In the scalar case this is just the value :math:`1`. .. note:: This function is idempotent and the eigenvectors are memoized for later reuse. """ if self.eigenvectors_s is None: self.eigenvectors_s = sympy.Matrix([[1]]) self.eigenvectors_n = sympy.vectorize(0)(sympy.lambdify(self.x, 1, "numpy")) def evaluate_eigenvectors_at(self, nodes): r""" Evaluate the eigenvector :math:`nu_0\left(x\right)` at some grid nodes :math:`\gamma`. :param nodes: The grid nodes :math:`\gamma` we want to evaluate the eigenvector at. :return: A list with the eigenvector evaluated at the given nodes. """ self.calculate_eigenvectors() return tuple([ numpy.ones((1, len(nodes)), dtype=numpy.floating) ]) def project_to_eigen(self, nodes, values, basis=None): r""" Project a given vector from the canonical basis to the eigenbasis of the potential. :param nodes: The grid nodes :math:`\gamma` for the pointwise transformation. :param values: The list of vectors :math:`\varphi_i` containing the values we want to transform. :param basis: A list of basisvectors :math:`nu_i`. Allows to use this function for external data, similar to a static function. :return: This method does nothing and returns the values. """ return [ values[0].copy() ] def project_to_canonical(self, nodes, values, basis=None): r""" Project a given vector from the potential's eigenbasis to the canonical basis. :param nodes: The grid nodes :math:`\gamma` for the pointwise transformation. :param values: The list of vectors :math:`\varphi_i` containing the values we want to transform. :param basis: A list of basis vectors :math:`nu_i`. Allows to use this function for external data, similar to a static function. :return: This method does nothing and returns the values. """ return [ values[0].copy() ] def calculate_exponential(self, factor=1): r""" Calculate the matrix exponential :math:`E = \exp\left(\alpha M\right)`. In this case the matrix is of size :math:`1 \times 1` thus the exponential simplifies to the scalar exponential function. :param factor: A prefactor :math:`\alpha` in the exponential. .. note:: This function is idempotent. """ if self.exponential is None: self.exponential = sympy.exp(factor*self.potential) def evaluate_exponential_at(self, nodes): r""" Evaluate the exponential of the potential matrix :math:`V` at some grid nodes :math:`\gamma`. :param nodes: The grid nodes :math:`\gamma` we want to evaluate the exponential at. :return: The numerical approximation of the matrix exponential at the given grid nodes. """ # Hack for older sympy versions, see recent issue: # http://www.mail-archive.com/[email protected]/msg05137.html lookup = {"I" : 1j} # prepare the function of every potential matrix exponential cell for numerical evaluation self.expfunctions = sympy.vectorize(0)(sympy.lambdify(self.x, self.exponential, (lookup, "numpy"))) return tuple([ numpy.array(self.expfunctions(nodes)) ]) def calculate_jacobian(self): r""" Calculate the jacobian matrix for the component :math:`V_{0,0}` of the potential. For potentials which depend only one variable :math:`x`, this equals the first derivative. """ self.jacobian_s = sympy.diff(self.potential, self.x) self.jacobian_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.jacobian_s, "numpy")) def evaluate_jacobian_at(self, nodes, component=None): r""" Evaluate the potential's jacobian at some grid nodes :math:`\gamma`. :param nodes: The grid nodes :math:`\gamma` the jacobian gets evaluated at. :param component: Dummy parameter that has no effect here. :return: The value of the potential's jacobian at the given nodes. """ return tuple([ self.jacobian_n(nodes) ]) def calculate_hessian(self): r""" Calculate the hessian matrix for component :math:`V_{0,0}` of the potential. For potentials which depend only one variable :math:`x`, this equals the second derivative. """ self.hessian_s = sympy.diff(self.potential, self.x, 2) self.hessian_n = sympy.vectorize(0)(sympy.lambdify(self.x, self.hessian_s, "numpy")) def evaluate_hessian_at(self, nodes, component=None): r""" Evaluate the potential's hessian at some grid nodes :math:`\gamma`. :param nodes: The grid nodes :math:`\gamma` the hessian gets evaluated at. :param component: Dummy parameter that has no effect here. :return: The value of the potential's hessian at the given nodes. """ return tuple([ self.hessian_n(nodes) ]) def calculate_local_quadratic(self, diagonal_component=None): r""" Calculate the local quadratic approximation :math:`U` of the potential's eigenvalue :math:`\lambda`. :param diagonal_component: Dummy parameter that has no effect here. .. note:: This function is idempotent. """ # Calculation already done at some earlier time? if self.taylor_eigen_s is not None: return self.calculate_eigenvalues() self.calculate_jacobian() self.calculate_hessian() self.taylor_eigen_s = [ (0, self.eigenvalues_s), (1, self.jacobian_s), (2, self.hessian_s) ] # Construct function to evaluate the approximation at point q at the given nodes assert(self.taylor_eigen_n is None) self.taylor_eigen_n = [ (order, sympy.vectorize(0)(sympy.lambdify([self.x], f, "numpy"))) for order, f in self.taylor_eigen_s ] def evaluate_local_quadratic_at(self, nodes, diagonal_component=None): r""" Numerically evaluate the local quadratic approximation :math:`U` of the potential's eigenvalue :math:`\lambda` at the given grid nodes :math:`\gamma`. This function is used for the homogeneous case. :param nodes: The grid nodes :math:`\gamma` we want to evaluate the quadratic approximation at. :return: An array containing the values of :math:`U` at the nodes :math:`\gamma`. """ return tuple([ numpy.array(f(nodes), dtype=numpy.floating) for order, f in self.taylor_eigen_n ]) def calculate_local_remainder(self, diagonal_component=None): r""" Calculate the non-quadratic remainder :math:`W` of the quadratic approximation :math:`U` of the potential's eigenvalue :math:`\lambda`. This function is used for the homogeneous case and takes into account the leading component :math:`\chi`. :param diagonal_component: Dummy parameter that has no effect here. .. note:: This function is idempotent. """ # Calculation already done at some earlier time? if self.remainder_eigen_s is not None: return self.calculate_eigenvalues() f = self.eigenvalues_s # point where the taylor series is computed q = sympy.Symbol("q") p = f.subs(self.x, q) j = sympy.diff(f, self.x) j = j.subs(self.x, q) h = sympy.diff(f, self.x, 2) h = h.subs(self.x, q) quadratic = p + j*(self.x-q) + sympy.Rational(1,2)*h*(self.x-q)**2 # Symbolic expression for the taylor expansion remainder term self.remainder_eigen_s = self.potential - quadratic # Construct functions to evaluate the approximation at point q at the given nodes assert(self.remainder_eigen_n is None) self.remainder_eigen_n = sympy.vectorize(1)(sympy.lambdify([q, self.x], self.remainder_eigen_s, "numpy")) def evaluate_local_remainder_at(self, position, nodes, diagonal_component=None, component=None): r""" Numerically evaluate the non-quadratic remainder :math:`W` of the quadratic approximation :math:`U` of the potential's eigenvalue :math:`\lambda` at the given nodes :math:`\gamma`. This function is used for the homogeneous and the inhomogeneous case and just evaluates the remainder :math:`W`. :param position: The point :math:`q` where the Taylor series is computed. :param nodes: The grid nodes :math:`\gamma` we want to evaluate the potential at. :param component: Dummy parameter that has no effect here. :return: A list with a single entry consisting of an array containing the values of :math:`W` at the nodes :math:`\gamma`. """ return tuple([ numpy.array(self.remainder_eigen_n(position, nodes), dtype=numpy.floating) ])
bsd-3-clause
-6,184,886,920,704,668,000
40.495298
136
0.652187
false
4.079199
false
false
false
tobijk/ecromedos
lib/net/ecromedos/ecmlprocessor.py
1
4602
# -*- coding: utf-8 -*- # # Desc: This file is part of the ecromedos Document Preparation System # Author: Tobias Koch <[email protected]> # License: MIT # URL: http://www.ecromedos.net # import os, sys import lxml.etree as etree from net.ecromedos.error import ECMDSError, ECMDSPluginError from net.ecromedos.configreader import ECMDSConfigReader from net.ecromedos.dtdresolver import ECMDSDTDResolver from net.ecromedos.preprocessor import ECMDSPreprocessor class ECMLProcessor(ECMDSConfigReader, ECMDSDTDResolver, ECMDSPreprocessor): def __init__(self, options={}): ECMDSConfigReader.__init__(self) ECMDSDTDResolver. __init__(self) ECMDSPreprocessor.__init__(self) self.readConfig(options) self.loadPlugins() self.loadStylesheet() #end function def loadXMLDocument(self, filename): """Try to load XML document from @filename.""" try: # create parser parser = etree.XMLParser( load_dtd=True, no_network=True, strip_cdata=True, remove_comments=True, resolve_entities=True ) # register custom resolver parser.resolvers.add(self) # parse the document tree = etree.parse(filename, parser=parser) except Exception as e: raise ECMDSError(str(e)) # return document tree return tree #end function def loadStylesheet(self): """Load matching stylesheet for desired output format.""" target_format = self.config['target_format'] try: style_dir = self.config['style_dir'] except KeyError: msg = "Please specify the location of the stylesheets." raise ECMDSError(msg) #end try filename = os.path.join(style_dir, target_format, "ecmds.xsl") try: tree = self.loadXMLDocument(filename) except ECMDSError as e: msg = "Could not load stylesheet:\n %s" % (e.msg(),) raise ECMDSError(msg) #end try try: self.stylesheet = etree.XSLT(tree) except Exception as e: raise ECMDSError(str(e)) #end if return self.stylesheet #end function def validateDocument(self, document): """Validate the given document.""" try: style_dir = self.config['style_dir'] except KeyError: msg = "Please specify the location of the stylesheets." raise ECMDSError(msg) #end try # load the DTD dtd_filename = os.path.join(style_dir, "DTD", "ecromedos.dtd") dtd = etree.DTD(dtd_filename) # validate the document result = dtd.validate(document) if result == False: raise ECMDSError(dtd.error_log.last_error) return result #end function def applyStylesheet(self, document): """Apply stylesheet to document.""" params = None try: params = self.config['xsl_params'] except KeyError: pass try: result = self.stylesheet(document, **params) except Exception as e: msg = "Error transforming document:\n %s." % (str(e),) raise ECMDSError(msg) #end try return result #end function def process(self, filename, verbose=True): """Convert the document stored under filename.""" def message(msg, verbose): if not verbose: return sys.stdout.write(" * " + msg) sys.stdout.write(" " * (40 - len(msg))) sys.stdout.flush() #end inline function def status(status, verbose): if not verbose: return sys.stdout.write(status + "\n") #end inline function # load document message("Reading document...", verbose) document = self.loadXMLDocument(filename) status("DONE", verbose) # validate document if self.config['do_validate']: message("Validating document...", verbose) self.validateDocument(document) status("VALID", verbose) #end if # prepare document message("Pre-processing document tree...", verbose) self.prepareDocument(document) status("DONE", verbose) # apply stylesheet message("Transforming document...", verbose) self.applyStylesheet(document) status("DONE", verbose) #end function #end class
mit
7,828,527,572,711,646,000
27.407407
76
0.58279
false
4.225895
true
false
false
danielquinn/spirithunter
src/spirits/api/resources.py
1
8242
import json import random from math import sin, cos from django.conf import settings from django.core.exceptions import ValidationError from django.shortcuts import get_object_or_404 from tastypie import fields from tastypie import http from tastypie.authentication import MultiAuthentication, Authentication, BasicAuthentication, SessionAuthentication from tastypie.resources import ModelResource, convert_post_to_patch from tastypie.exceptions import BadRequest from aspects.models import Element, Facet from geography.models import Country from spirithunter import logger from .authorization import SpiritAuthorization from ..forms import PatchForm from ..models.spirit import ElementalStrength, Spirit class ImageMixin(object): def dehydrate(self, bundle): bundle.data.update({ "images": {} }) for size in self.AVAILABLE_IMAGE_SIZES: bundle.data["images"][str(size)] = getattr( bundle.obj, 'image{size}'.format(size=size) ) return bundle class ElementResource(ImageMixin, ModelResource): AVAILABLE_IMAGE_SIZES = (16, 32) class Meta: queryset = Element.objects.all() include_resource_uri = False resource_name = "elements" class ElementalStrengthResource(ModelResource): AVAILABLE_IMAGE_SIZES = (16, 32) element = fields.ToOneField(ElementResource, "element", full=True) class Meta: queryset = ElementalStrength.objects.all() include_resource_uri = False resource_name = "elements" class FacetResource(ImageMixin, ModelResource): AVAILABLE_IMAGE_SIZES = (16, 32) class Meta: queryset = Facet.objects.all() include_resource_uri = False resource_name = "facets" class NationalityResource(ModelResource): class Meta: queryset = Country.objects.all() include_resource_uri = False resource_name = "nationalities" def dehydrate(self, bundle): return { "code": bundle.obj.country.code, "name": bundle.obj.country.name, } class SpiritResource(ImageMixin, ModelResource): AVAILABLE_IMAGE_SIZES = (16, 32, 64, 128, 256) SPIRITS_TO_GENERATE = 5 SPAWN_RADIUS = 50 owner = fields.ToOneField("users.api.UserResource", "owner", null=True) elementals = fields.ManyToManyField( ElementalStrengthResource, "elemental_strengths", full=True ) facets = fields.ManyToManyField( FacetResource, "facets", full=True ) nationalities = fields.ManyToManyField( NationalityResource, "nationalities", full=True ) class Meta: allowed_methods = ("get", "patch",) authentication = MultiAuthentication( SessionAuthentication(), BasicAuthentication(), Authentication() ) authorization = SpiritAuthorization() object_class = Spirit queryset = Spirit.objects.all() resource_name = "spirits" filtering = { "id": ("exact",), "owner": ("exact",), "activity": ("exact",), } def dehydrate(self, bundle): bundle = ModelResource.dehydrate(self, bundle) bundle = ImageMixin.dehydrate(self, bundle) if bundle.obj.activity == Spirit.ACTIVITY_WANDER: if bundle.obj.health_current == 0: bundle.data["experience_given"] = bundle.obj.get_ladder().xp_given return bundle @staticmethod def dehydrate_origin(bundle): if bundle.obj.origin: r = json.loads(bundle.obj.origin.geojson) r["coordinates"][0] = round(r["coordinates"][0], settings.COORDINATES_ROUNDING) r["coordinates"][1] = round(r["coordinates"][1], settings.COORDINATES_ROUNDING) return r return None @staticmethod def dehydrate_location(bundle): if bundle.obj.location: r = json.loads(bundle.obj.location.geojson) r["coordinates"][0] = round(r["coordinates"][0], settings.COORDINATES_ROUNDING) r["coordinates"][1] = round(r["coordinates"][1], settings.COORDINATES_ROUNDING) return r return None @staticmethod def dehydrate_activity(bundle): return { "id": bundle.obj.activity, "name": bundle.obj.get_activity_display() } def obj_get_list(self, bundle, **kwargs): if bundle.request.GET.get("finder"): if not bundle.request.location: raise BadRequest( "Finder cannot be invoked without a location header" ) if not bundle.request.user.is_authenticated(): raise BadRequest( "Finder is only available to authenticated users" ) try: return self._finder(bundle.request) except ValidationError as e: raise BadRequest(e.messages[0]) else: return ModelResource.obj_get_list(self, bundle, **kwargs) def patch_list(self, request, **kwargs): return http.HttpNotImplemented() def patch_detail(self, request, **kwargs): pk = kwargs.get("pk") request = convert_post_to_patch(request) self.authorized_update_detail( Spirit.objects.filter(pk=pk), self.build_bundle(request=request) ) form = PatchForm( request, get_object_or_404(Spirit, pk=pk), self.deserialize( request, request.body, format=request.META.get("CONTENT_TYPE", "application/json") ) ) if form.is_valid(): form.save() return self.create_response(request, "", status=202) raise BadRequest(form.errors.as_text()) def _finder(self, request): """ Open the app and show me what's here. If there's nothing here (common) make some spirits relevant to the environment to play with. """ lat, lng = (request.location.y, request.location.x) if lat > 80 or lat < -80: raise ValidationError("Invalid lat value: %s" % lat) if lng > 180 or lng < -180: raise ValidationError("Invalid lng value: %s" % lng) level_low, level_high = 1, 1 if request.user.is_authenticated(): spirit_levels = sorted( request.user.spirits.filter( activity=Spirit.ACTIVITY_JARRED ).values_list( "level", flat=True ) ) if spirit_levels: level_low, level_high = spirit_levels[0], spirit_levels[-1] spirits = list(Spirit.objects.filter( activity=Spirit.ACTIVITY_WANDER, health_current__gt=0, location__distance_lte=(request.location, self.SPAWN_RADIUS) )) while len(spirits) < self.SPIRITS_TO_GENERATE: # Magic centre_x = float(lat) centre_y = float(lng) r = random.uniform(0, self.SPAWN_RADIUS) a = random.uniform(0, 360) target_x = centre_x + ((r * cos(a)) / settings.M_LNG) target_y = centre_y + ((r * sin(a)) / settings.M_LAT) # /Magic logger.debug("Creating a spirit at {lat},{lng}".format( lat=target_x, lng=target_y )) spirit = Spirit.objects.create_for_environment( centre=(centre_x, centre_y), target=(target_x, target_y), level_low=level_low, level_high=level_high ) spirits.append(spirit) # Feel lucky? if random.randint(1, 10) == 5: # Start encounter immediately pass return SpiritResource.get_object_list(self, request).filter( activity=Spirit.ACTIVITY_WANDER, health_current__gt=0, location__distance_lte=(request.location, 5000) )
agpl-3.0
-7,481,127,565,425,262,000
26.565217
115
0.579592
false
4.050123
false
false
false
linuxrocks123/MailTask
mt_attache.py
1
3151
#! /usr/bin/env python # MailTask Alpha: The Email Manager # Copyright (C) 2015 Patrick Simmons # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import codecs import fltk from html2text import html2text import os import tempfile #Note: EVERY method here must correctly handle unicode by decoding it with utf-8/replace, #then ENCODING it with utf-8 #Note: FLTK 1.1 seems to use ISO-8859-1 as its native encoding. # FLTK 1.3 changes this to UTF-8. #FLTK_ENCODING="ISO-8859-1" FLTK_ENCODING="UTF-8" def text_plain(submsg,mime_encoding): return submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace").encode(encoding=FLTK_ENCODING,errors="replace") def text_html(submsg,mime_encoding): return html2text(submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace")).encode(encoding=FLTK_ENCODING,errors="replace") def application_pdf(submsg,mime_encoding): temptuple=tempfile.mkstemp() os.fdopen(temptuple[0],'w').write(submsg.get_payload(decode=True)) os.system("xpdf "+temptuple[1]+" & ( sleep 10; rm "+temptuple[1]+" ) &") return "PDF file opened" def application_octetstream(submsg,mime_encoding): fc = fltk.Fl_File_Chooser(".","*",fltk.Fl_File_Chooser.CREATE,"Select Save Location") fc.show() while fc.shown(): fltk.Fl_wait() if fc.value()==None: return submsg.get_payload(decode=True).decode(encoding=mime_encoding,errors="replace").encode(encoding=FLTK_ENCODING,errors="replace") open(fc.value(),'w').write(submsg.get_payload(decode=True)) return "Undisplayable file; saved to "+fc.value() def display_submessage(submsg): if submsg['Content-Transfer-Encoding']==None: del submsg['Content-Transfer-Encoding'] if submsg.get_payload(decode=True)==None: return "" ATTACHE = { "text/plain" : text_plain, "text/html" : text_html, "application/pdf" : application_pdf } mime_encoding = submsg.get_content_charset() if mime_encoding==None: mime_encoding="utf-8" else: try: codecs.lookup(mime_encoding) valid_encoding = True except LookupError: valid_encoding = False if not valid_encoding: mime_encoding="utf-8" mimetype = submsg.get_content_type() print mimetype if mimetype in ATTACHE: return ATTACHE[mimetype](submsg,mime_encoding) elif mimetype.find("text/")==0: return text_plain(submsg,mime_encoding) return application_octetstream(submsg,mime_encoding)
gpl-3.0
-9,170,419,339,616,157,000
35.218391
149
0.699778
false
3.56448
false
false
false
unapiedra/BBChop
tests/dumbdag.py
1
3423
# Copyright 2008 Ealdwulf Wuffinga # This file is part of BBChop. # # BBChop is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # BBChop is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BBChop. If not, see <http://www.gnu.org/licenses/>. from . import dagAlg from BBChop.listUtils import listSub,prod # class for computing over directed acyclic graphs. # values are held outside the graph object, in lists # the dag is defined by a parents relation: for each index, which indexes are its parents. # it is required that < and > on indexes is consistent with the transitive closure of the parents # relation. That is, if parent*(a,b) then a<b and b>a. This is checked. # this version of the class has a simple O(N^2) implementation for test purposes class IllFormedDAGFile(Exception): pass class DAGWrongLength(Exception): pass # abstract dag class: defines sum,and type functions in terms of comb functions class absDag: def sumUpto(self,values): return self.combUpto(values,sum) def sumAfter(self,values): return self.combAfter(values,sum) def anyUpto(self,values): return self.combUpto(values,any) def anyAfter(self,values): return self.combAfter(values,any) def prodAfter(self,values): return self.combAfter(values,prod) class dag(absDag): def __init__(self,parents,N): self.parents=parents children=[[] for i in range(N)] for i in range(N): for p in parents[i]: children[p].append(i) self.children=children childRel=dagAlg.childLists2Rel(self.children) self.decendentRel=dagAlg.transitiveClosure(childRel,N) # these methods assume the consistentency defined above. # for each location, return the sum of lower locations from values def combUpto(self,values,comb): res=[comb([v for (i,v) in enumerate(values) if (i,j) in self.decendentRel]) for j in range(len(values))] return res # for each location, return the sum of higher locations from values def combAfter(self,values,comb): res=[comb([v for (i,v) in enumerate(values) if (j,i) in self.decendentRel]) for j in range(len(values))] return res # for each location, return the sum of locations neither lower or higher from values # we do this by taking the total and subtracting everything else. def sumOther(self,values,sumUpto=None,sumAfter=None): # save recalculating sumUpto/After if already known if sumUpto is None: sumUpto=self.sumUpto(values) if sumAfter is None: sumAfter=self.sumAfter(values) sums=[sum(values)]*len(values) # sums=listSub(sums,values,sumUpto,sumAfter) return sums def linearTestDag(N): parents=['%d %d' %(a+1,a) for a in range(N-1)] parents[:0]='0' return dag(parents,N)
gpl-2.0
-1,333,789,492,420,188,700
29.5625
112
0.674847
false
3.704545
false
false
false
jleete97/python-graphics
games/turns/reversi/reversi.py
1
3731
import random import sys import time from reversiboard import * from games.turns.reversi.reversimoves import * # Window parameters WINDOW_WIDTH = 800 WINDOW_HEIGHT = 700 # Colors DARK_GREEN = (0, 128, 0) DARK_GREY = (128, 128, 128) LIGHT_RED = (255, 192, 192) GREEN = (0, 255, 0) WHITE = (255, 255, 255) BLACK = (0, 0, 0) # Board size (number of squares on each side) BOARD_SIZE = 8 HUMAN = 'human' COMPUTER = 'computer' # Players: computer is 'W', human is 'B' # Pick random starting player sides = [ HUMAN, COMPUTER ] colors = { HUMAN : WHITE , COMPUTER : BLACK } pygame.init() surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT), 0, 32) another_game = True while another_game: playerIndex = random.randrange(2) board = ReversiBoard(BOARD_SIZE, sides) drawer = ReversiBoardDrawer(board, surface, WINDOW_WIDTH, WINDOW_HEIGHT, DARK_GREY, DARK_GREEN, GREEN, sides, colors) try: playing = True missedMoves = 0 winner = None while playing: opponentIndex = 1 - playerIndex player = sides[playerIndex] opponent = sides[opponentIndex] drawer.drawBoard() moveResult = [] if board.noLegalMoves(player, opponent): print(player + " has no legal move.") move = None time.sleep(3) else: print(player + " is moving...") if player == HUMAN: while moveResult == []: move = getPlayerMove(drawer) moveResult = board.resultOfMove(move, player, opponent) else: move = getComputerMove(board, COMPUTER, HUMAN) moveResult = board.resultOfMove(move, player, opponent) print(" move result: " + str(moveResult)) displayMove = None if (move is not None): displayMove = (move[0] + 1, move[1] + 1); print(player + " has moved: " + str(displayMove)) if move is None: missedMoves += 1 else: missedMoves = 0 if missedMoves == 2: winner = board.determineWinner() playing = False else: board.apply(move, moveResult, player) drawer.drawMove(move, player) if board.isFull(): winner = board.determineWinner() playing = False playerIndex = 1 - playerIndex except PlayerQuitException: pass if winner is None: outcome = "The game is a tie." else: outcome = "The " + winner + " wins!" fontObj = pygame.font.Font('freesansbold.ttf', 32) textSurface = fontObj.render(outcome, True, LIGHT_RED, DARK_GREY) textRect = textSurface.get_rect() textRect.center = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2) surface.blit(textSurface, textRect) pygame.display.update() asking_about_another_game = True while asking_about_another_game: for event in pygame.event.get(): if event.type == QUIT: another_game = False asking_about_another_game = False break elif event.type == KEYUP and event.key in [K_ESCAPE, ord('r')]: asking_about_another_game = False break pygame.display.update() pygame.quit() sys.exit()
mit
-7,343,959,332,232,993,000
26.233577
79
0.5197
false
3.977612
false
false
false
googleads/google-ads-python
google/ads/googleads/v8/errors/types/keyword_plan_error.py
1
1758
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v8.errors", marshal="google.ads.googleads.v8", manifest={"KeywordPlanErrorEnum",}, ) class KeywordPlanErrorEnum(proto.Message): r"""Container for enum describing possible errors from applying a keyword plan resource (keyword plan, keyword plan campaign, keyword plan ad group or keyword plan keyword) or KeywordPlanService RPC. """ class KeywordPlanError(proto.Enum): r"""Enum describing possible errors from applying a keyword plan.""" UNSPECIFIED = 0 UNKNOWN = 1 BID_MULTIPLIER_OUT_OF_RANGE = 2 BID_TOO_HIGH = 3 BID_TOO_LOW = 4 BID_TOO_MANY_FRACTIONAL_DIGITS = 5 DAILY_BUDGET_TOO_LOW = 6 DAILY_BUDGET_TOO_MANY_FRACTIONAL_DIGITS = 7 INVALID_VALUE = 8 KEYWORD_PLAN_HAS_NO_KEYWORDS = 9 KEYWORD_PLAN_NOT_ENABLED = 10 KEYWORD_PLAN_NOT_FOUND = 11 MISSING_BID = 13 MISSING_FORECAST_PERIOD = 14 INVALID_FORECAST_DATE_RANGE = 15 INVALID_NAME = 16 __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
8,135,299,712,212,321,000
32.169811
76
0.677474
false
3.724576
false
false
false
TribeMedia/synapse
synapse/handlers/e2e_keys.py
2
12592
# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ujson as json import logging from canonicaljson import encode_canonical_json from twisted.internet import defer from synapse.api.errors import SynapseError, CodeMessageException from synapse.types import get_domain_from_id from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination logger = logging.getLogger(__name__) class E2eKeysHandler(object): def __init__(self, hs): self.store = hs.get_datastore() self.federation = hs.get_replication_layer() self.device_handler = hs.get_device_handler() self.is_mine_id = hs.is_mine_id self.clock = hs.get_clock() # doesn't really work as part of the generic query API, because the # query request requires an object POST, but we abuse the # "query handler" interface. self.federation.register_query_handler( "client_keys", self.on_federation_query_client_keys ) @defer.inlineCallbacks def query_devices(self, query_body, timeout): """ Handle a device key query from a client { "device_keys": { "<user_id>": ["<device_id>"] } } -> { "device_keys": { "<user_id>": { "<device_id>": { ... } } } } """ device_keys_query = query_body.get("device_keys", {}) # separate users by domain. # make a map from domain to user_id to device_ids local_query = {} remote_queries = {} for user_id, device_ids in device_keys_query.items(): if self.is_mine_id(user_id): local_query[user_id] = device_ids else: remote_queries[user_id] = device_ids # Firt get local devices. failures = {} results = {} if local_query: local_result = yield self.query_local_devices(local_query) for user_id, keys in local_result.items(): if user_id in local_query: results[user_id] = keys # Now attempt to get any remote devices from our local cache. remote_queries_not_in_cache = {} if remote_queries: query_list = [] for user_id, device_ids in remote_queries.iteritems(): if device_ids: query_list.extend((user_id, device_id) for device_id in device_ids) else: query_list.append((user_id, None)) user_ids_not_in_cache, remote_results = ( yield self.store.get_user_devices_from_cache( query_list ) ) for user_id, devices in remote_results.iteritems(): user_devices = results.setdefault(user_id, {}) for device_id, device in devices.iteritems(): keys = device.get("keys", None) device_display_name = device.get("device_display_name", None) if keys: result = dict(keys) unsigned = result.setdefault("unsigned", {}) if device_display_name: unsigned["device_display_name"] = device_display_name user_devices[device_id] = result for user_id in user_ids_not_in_cache: domain = get_domain_from_id(user_id) r = remote_queries_not_in_cache.setdefault(domain, {}) r[user_id] = remote_queries[user_id] # Now fetch any devices that we don't have in our cache @defer.inlineCallbacks def do_remote_query(destination): destination_query = remote_queries_not_in_cache[destination] try: limiter = yield get_retry_limiter( destination, self.clock, self.store ) with limiter: remote_result = yield self.federation.query_client_keys( destination, {"device_keys": destination_query}, timeout=timeout ) for user_id, keys in remote_result["device_keys"].items(): if user_id in destination_query: results[user_id] = keys except CodeMessageException as e: failures[destination] = { "status": e.code, "message": e.message } except NotRetryingDestination as e: failures[destination] = { "status": 503, "message": "Not ready for retry", } except Exception as e: # include ConnectionRefused and other errors failures[destination] = { "status": 503, "message": e.message } yield preserve_context_over_deferred(defer.gatherResults([ preserve_fn(do_remote_query)(destination) for destination in remote_queries_not_in_cache ])) defer.returnValue({ "device_keys": results, "failures": failures, }) @defer.inlineCallbacks def query_local_devices(self, query): """Get E2E device keys for local users Args: query (dict[string, list[string]|None): map from user_id to a list of devices to query (None for all devices) Returns: defer.Deferred: (resolves to dict[string, dict[string, dict]]): map from user_id -> device_id -> device details """ local_query = [] result_dict = {} for user_id, device_ids in query.items(): if not self.is_mine_id(user_id): logger.warning("Request for keys for non-local user %s", user_id) raise SynapseError(400, "Not a user here") if not device_ids: local_query.append((user_id, None)) else: for device_id in device_ids: local_query.append((user_id, device_id)) # make sure that each queried user appears in the result dict result_dict[user_id] = {} results = yield self.store.get_e2e_device_keys(local_query) # Build the result structure, un-jsonify the results, and add the # "unsigned" section for user_id, device_keys in results.items(): for device_id, device_info in device_keys.items(): r = dict(device_info["keys"]) r["unsigned"] = {} display_name = device_info["device_display_name"] if display_name is not None: r["unsigned"]["device_display_name"] = display_name result_dict[user_id][device_id] = r defer.returnValue(result_dict) @defer.inlineCallbacks def on_federation_query_client_keys(self, query_body): """ Handle a device key query from a federated server """ device_keys_query = query_body.get("device_keys", {}) res = yield self.query_local_devices(device_keys_query) defer.returnValue({"device_keys": res}) @defer.inlineCallbacks def claim_one_time_keys(self, query, timeout): local_query = [] remote_queries = {} for user_id, device_keys in query.get("one_time_keys", {}).items(): if self.is_mine_id(user_id): for device_id, algorithm in device_keys.items(): local_query.append((user_id, device_id, algorithm)) else: domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = device_keys results = yield self.store.claim_e2e_one_time_keys(local_query) json_result = {} failures = {} for user_id, device_keys in results.items(): for device_id, keys in device_keys.items(): for key_id, json_bytes in keys.items(): json_result.setdefault(user_id, {})[device_id] = { key_id: json.loads(json_bytes) } @defer.inlineCallbacks def claim_client_keys(destination): device_keys = remote_queries[destination] try: limiter = yield get_retry_limiter( destination, self.clock, self.store ) with limiter: remote_result = yield self.federation.claim_client_keys( destination, {"one_time_keys": device_keys}, timeout=timeout ) for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: json_result[user_id] = keys except CodeMessageException as e: failures[destination] = { "status": e.code, "message": e.message } except NotRetryingDestination as e: failures[destination] = { "status": 503, "message": "Not ready for retry", } except Exception as e: # include ConnectionRefused and other errors failures[destination] = { "status": 503, "message": e.message } yield preserve_context_over_deferred(defer.gatherResults([ preserve_fn(claim_client_keys)(destination) for destination in remote_queries ])) defer.returnValue({ "one_time_keys": json_result, "failures": failures }) @defer.inlineCallbacks def upload_keys_for_user(self, user_id, device_id, keys): time_now = self.clock.time_msec() # TODO: Validate the JSON to make sure it has the right keys. device_keys = keys.get("device_keys", None) if device_keys: logger.info( "Updating device_keys for device %r for user %s at %d", device_id, user_id, time_now ) # TODO: Sign the JSON with the server key changed = yield self.store.set_e2e_device_keys( user_id, device_id, time_now, device_keys, ) if changed: # Only notify about device updates *if* the keys actually changed yield self.device_handler.notify_device_update(user_id, [device_id]) one_time_keys = keys.get("one_time_keys", None) if one_time_keys: logger.info( "Adding %d one_time_keys for device %r for user %r at %d", len(one_time_keys), device_id, user_id, time_now ) key_list = [] for key_id, key_json in one_time_keys.items(): algorithm, key_id = key_id.split(":") key_list.append(( algorithm, key_id, encode_canonical_json(key_json) )) yield self.store.add_e2e_one_time_keys( user_id, device_id, time_now, key_list ) # the device should have been registered already, but it may have been # deleted due to a race with a DELETE request. Or we may be using an # old access_token without an associated device_id. Either way, we # need to double-check the device is registered to avoid ending up with # keys without a corresponding device. self.device_handler.check_device_registered(user_id, device_id) result = yield self.store.count_e2e_one_time_keys(user_id, device_id) defer.returnValue({"one_time_key_counts": result})
apache-2.0
1,883,954,235,118,884,400
37.98452
87
0.542011
false
4.343567
false
false
false
hoomanlogic/hoomancmd
hoomancmd/matchsuggestion.py
1
17803
### score matched, proximity, missing, or nomatch to find the best fit command ### # todo: Improve suggestion engine # >> plns # Did you mean 'logs'? : 97 : journal:90 # used by all versions proximity_mapping = { 'q': ['a', 's', 'w', '2', '1', '`'], 'w': ['q', 'a', 's', 'd', 'e', '3', '2', '1'], 'e': ['w', 's', 'd', 'f', 'r', '4', '3', '2'], 'r': ['e', 'd', 'f', 'g', 't', '5', '4', '3'], 't': ['r', 'f', 'g', 'h', 'y', '6', '5', '4'], 'y': ['t', 'g', 'h', 'j', 'u', '7', '6', '5'], 'u': ['y', 'h', 'j', 'k', 'i', '8', '7', '6'], 'i': ['u', 'j', 'k', 'l', 'o', '9', '8', '7'], 'o': ['i', 'k', 'l', ';', 'p', '0', '9', '8'], 'p': ['o', 'l', ';', '\'', '[', '-', '0', '9'], '[': ['p', ';', '\'', ']', '=', '-', '0'], ']': ['[', '\'', '\\', '='], 'a': ['z', 'x', 's', 'w', 'q'], 's': ['a', 'z', 'x', 'c', 'd', 'e', 'w', 'q'], 'd': ['s', 'x', 'c', 'v', 'f', 'r', 'e', 'w'], 'f': ['d', 'c', 'v', 'b', 'g', 't', 'r', 'e'], 'g': ['f', 'v', 'b', 'n', 'h', 'y', 't', 'r'], 'h': ['g', 'b', 'n', 'm', 'j', 'u', 'y', 't'], 'j': ['h', 'n', 'm', ',', 'k', 'i', 'u', 'y'], 'k': ['j', 'm', ',', '.', 'l', 'o', 'i', 'u'], 'l': ['k', ',', '.', '/', ';', 'p', 'o', 'i'], ';': ['l', '.', '/', '\'', '[', 'p'], '\'': [';', '/', ']', '[', 'p'], 'z': [ 'x', 's', 'a'], 'x': ['z', 'c', 'd', 's', 'a'], 'c': ['x', 'v', 'f', 'd', 's'], 'v': ['c', 'b', 'g', 'f', 'd'], 'b': ['v', 'n', 'h', 'g', 'f'], 'n': ['b', 'm', 'j', 'h', 'g'], 'm': ['n', ',', 'k', 'j', 'h'], '1': ['q', 'w', '2', '`'], '2': ['1', 'q', 'w', 'e', '3'], '3': ['2', 'w', 'e', 'r', '4'], '4': ['3', 'e', 'r', 't', '5'], '5': ['4', 'r', 't', 'y', '6'], '6': ['5', 't', 'y', 'u', '7'], '7': ['6', 'y', 'u', 'i', '8'], '8': ['7', 'u', 'i', 'o', '9'], '9': ['8', 'i', 'o', 'p', '0'], '0': ['9', 'o', 'p', '[', '-'], '-': ['0', 'p', '[', ']', '='], '+': ['-', '[', ']', '\\'] } # version 1 variables max_extra = 1 # input has extra characters max_missing = -1 # input has less characters class MatchStats(object): def __init__(self, item, disparity): self.match = 0 self.proximity = 0 self.disparity = disparity self.item = item self.too_disparate = False self.missing = 0 def increment_match(self): self.match += 1 def increment_proximity(self): self.proximity += 1 def increment_proximity(self): self.proximity += 1 def increment_missing(self): self.missing += 1 def compare(self, other_instance): if other_instance is None: return self if self.proximity > other_instance.proximity: return other_instance elif self.proximity < other_instance.proximity: return self else: if self.match > other_instance.match: return self elif self.match < other_instance.match: return other_instance else: if self.disparity > other_instance.disparity: return other_instance else: return self class BetterMatchStats(object): # version 2 & 3 variables max_sequential_disparity = 2 def __init__(self, matchterm): self.match = 0 self.proximity = 0 self.disparity = 0 self.sequential_disparity = 0 self.matchterm = matchterm self.too_disparate = False self.runner_up_score = 0 self.runner_up_matchterm = '' def increment_match(self): self.match += 1 self._reset_sequential_disparity() def increment_proximity(self): self.proximity += 1 self._reset_sequential_disparity() def increment_disparity(self): self.disparity += 1 self._increment_sequential_disparity() if self.disparity > len(self.matchterm): self.too_disparate = True def _increment_sequential_disparity(self): self.sequential_disparity += 1 if self.sequential_disparity > BetterMatchStats.max_sequential_disparity: self.too_disparate = True def _reset_sequential_disparity(self): self.sequential_disparity = 0 def get_score(self): if self.disparity == 0 and self.proximity == 0: return 100 else: return 100 - ((self.disparity * 2) + self.proximity) def compare(self, other_instance): if other_instance is None or other_instance.too_disparate: return self if self.too_disparate: other_instance.runner_up_score = self.get_score() other_instance.runner_up_matchterm = self.matchterm return other_instance if self.disparity > other_instance.disparity: other_instance.runner_up_score = self.get_score() other_instance.runner_up_matchterm = self.matchterm return other_instance elif self.disparity < other_instance.disparity: return self if self.match > other_instance.match: return self elif self.match < other_instance.match: other_instance.runner_up_score = self.get_score() other_instance.runner_up_matchterm = self.matchterm return other_instance if self.proximity > other_instance.proximity: other_instance.runner_up_score = self.get_score() other_instance.runner_up_matchterm = self.matchterm return other_instance else: return self def copy_attributes(self, other_instance): self.match = other_instance.match self.proximity = other_instance.proximity self.disparity = other_instance.disparity self.sequential_disparity = other_instance.sequential_disparity self.too_disparate = other_instance.too_disparate @classmethod def copy(cls, obj): instance = BetterMatchStats(obj.matchterm) instance.match = obj.match instance.proximity = obj.proximity instance.disparity = obj.disparity instance.sequential_disparity = obj.sequential_disparity instance.too_disparate = obj.too_disparate return instance def is_in_proximity(char1, char2): if char2 in proximity_mapping[char1]: return True else: return False # version 1 def getbestmatch_v1(input_, list_): input_ = input_.lower() matchstats_best = None for item in list_: item = item.lower() disparity = len(input_) - len(item) # ensure disparity isn't too great if disparity < max_missing or disparity > max_extra: continue inner = input_ outer = item if disparity < 0: inner = input_ outer = item elif disparity > 0: inner = item outer = input_ # now we put the smaller as the inner to move around # so we use the absolute val of disparity to # put the smaller through the scenarios for i in range(0, abs(disparity) + 1): outer_subset = outer[i:] matchstats = MatchStats(item, abs(disparity)) # loop through characters and compare them for j, inner_char in enumerate(inner): if inner_char == outer_subset[j]: matchstats.increment_match() continue elif is_in_proximity(inner_char, outer_subset[j]): matchstats.increment_proximity() continue else: matchstats.too_disparate = True break if not matchstats.too_disparate: matchstats_best = matchstats.compare(matchstats_best) if matchstats_best is None: return None else: return matchstats_best.item # version 2 def getbestmatch_v2(input_, list_): # case insenitive matching input_ = input_.lower() # stores best match so far current_matchstats_best = None # iterate through all the possible matchterms # to find the best match for matchterm in list_: # case insenitive matching matchterm = matchterm.lower() # ensure disparity isn't too great from the get go # by comparing overall length, if it is too disparate # then move on to the next matchterm # if abs(len(input_) - len(matchterm)) > max_sequential_disparity: # continue # create object to hold the match stats matchstats = BetterMatchStats(matchterm) # run the input_ and matchterm through # scenarios find a potential match matchup_v2(input_, matchterm, matchstats) # done with while because we hit the end of an index # now let's calculate the leftover disparity max_char_len = 0 if len(input_) > len(matchterm): max_char_len = len(input_) else: max_char_len = len(matchterm) for i in (range(0, abs(max_char_len - (matchstats.match + matchstats.proximity + matchstats.disparity)))): matchstats.increment_disparity() # compare the matchstats after matchup with the current best matchstats # and set the better of the two to the best match so far # -- may the best match win... current_matchstats_best = matchstats.compare(current_matchstats_best) return current_matchstats_best.matchterm def matchup_v2(input_, matchterm, matchstats, depth=0): input_index = 0 matchterm_index = 0 while matchterm_index < len(matchterm) and input_index < len(input_): if input_[input_index] == matchterm[matchterm_index]: matchstats.increment_match() input_index = input_index + 1 matchterm_index = matchterm_index + 1 continue elif is_in_proximity(input_[input_index], matchterm[matchterm_index]): matchstats.increment_proximity() input_index = input_index + 1 matchterm_index = matchterm_index + 1 else: # increment disparity and check if we are too disparate matchstats.increment_disparity() if matchstats.too_disparate: return # here we need to branch and try both the possibility that input_ has # missing or extra chars, then compare the two branches to pick the # best matchup # input_ may have bad chars, similar to the proximity solution, # but treats it as a disparity bad_char_scenario = None if input_index + 1 <= len(input_) and matchterm_index + 1 <= len(matchterm): bad_char_scenario = BetterMatchStats.copy(matchstats) matchup_v2(input_[input_index + 1:], matchterm[matchterm_index + 1:], bad_char_scenario, depth=depth+1) # input_ may have missing chars missing_char_scenario = None if matchterm_index + 1 <= len(matchterm): missing_char_scenario = BetterMatchStats.copy(matchstats) matchup_v2(input_[input_index:], matchterm[matchterm_index + 1:], missing_char_scenario, depth=depth+1) # input_ may have extra chars extra_char_scenario = None if input_index + 1 <= len(input_): extra_char_scenario = BetterMatchStats.copy(matchstats) matchup_v2(input_[input_index + 1:], matchterm[matchterm_index:], extra_char_scenario, depth=depth+1) # if both the input_ and matchterm have reached the end of their input_ # then return if input_index + 1 >= len(input_) and matchterm_index + 1 >= len(matchterm): return # grab either one that is not None and compare to the other # one, which may be None, but one of these scenarios is # guaranteed to not be None by this point best_scenario = None if missing_char_scenario is not None: best_scenario = missing_char_scenario.compare(extra_char_scenario) else: best_scenario = extra_char_scenario.compare(missing_char_scenario) # compare the winner of missing vs extra with the bad chars scenario best_scenario = best_scenario.compare(bad_char_scenario) # copy the attributes from the best scenario # because simply setting the object makes the # root caller lose the changes matchstats.copy_attributes(best_scenario) return # investigate this # >> veweerython # Did you mean "deleteprop"? # version 3 def getbestmatch_v3(input_, list_, set_max_sequential_disparity=None): # case insenitive matching input_ = input_.lower() # stores best match so far current_matchstats_best = None if set_max_sequential_disparity is not None: BetterMatchStats.max_sequential_disparity = set_max_sequential_disparity # iterate through all the possible matchterms # to find the best match for matchterm in list_: # case insenitive matching matchterm = matchterm.lower() # ensure disparity isn't too great from the get go # by comparing overall length, if it is too disparate # then move on to the next matchterm # if abs(len(input_) - len(matchterm)) > max_sequential_disparity: # continue # create object to hold the match stats matchstats = BetterMatchStats(matchterm) if len(input_) > len(matchterm): max_char_len = len(input_) inner = matchterm outer = input_ else: max_char_len = len(matchterm) inner = input_ outer = matchterm # run the input_ and matchterm through # scenarios find a potential match matchup_v3(inner, outer, matchstats) for i in (range(0, abs(max_char_len - (matchstats.match + matchstats.proximity + matchstats.disparity)))): matchstats.disparity = matchstats.disparity + 1 # compare the matchstats after matchup with the current best matchstats # and set the better of the two to the best match so far # -- may the best match win... current_matchstats_best = matchstats.compare(current_matchstats_best) # >> testmatch hooman human humous humid # humid 90 0 return current_matchstats_best def matchup_v3(input_, matchterm, matchstats, depth=0): input_index = 0 matchterm_index = 0 while matchterm_index < len(matchterm) and input_index < len(input_): if input_[input_index] == matchterm[matchterm_index]: matchstats.increment_match() input_index = input_index + 1 matchterm_index = matchterm_index + 1 continue elif is_in_proximity(input_[input_index], matchterm[matchterm_index]): matchstats.increment_proximity() input_index = input_index + 1 matchterm_index = matchterm_index + 1 else: # increment disparity and check if we are too disparate matchstats.increment_disparity() if matchstats.too_disparate: return # here we need to branch and try both the possibility that input_ has # missing or extra chars, then compare the two branches to pick the # best matchup # input_ may have bad chars, similar to the proximity solution, # but treats it as a disparity bad_char_scenario = None if input_index + 1 <= len(input_) and matchterm_index + 1 <= len(matchterm): bad_char_scenario = BetterMatchStats.copy(matchstats) matchup_v3(input_[input_index + 1:], matchterm[matchterm_index + 1:], bad_char_scenario, depth=depth+1) # input_ may have missing chars missing_char_scenario = None if matchterm_index + 1 <= len(matchterm): missing_char_scenario = BetterMatchStats.copy(matchstats) matchup_v3(input_[input_index:], matchterm[matchterm_index + 1:], missing_char_scenario, depth=depth+1) # input_ may have extra chars extra_char_scenario = None if input_index + 1 <= len(input_): extra_char_scenario = BetterMatchStats.copy(matchstats) matchup_v3(input_[input_index + 1:], matchterm[matchterm_index:], extra_char_scenario, depth=depth+1) # if both the input_ and matchterm have reached the end of their input_ # then return if input_index + 1 >= len(input_) and matchterm_index + 1 >= len(matchterm): return # grab either one that is not None and compare to the other # one, which may be None, but one of these scenarios is # guaranteed to not be None by this point best_scenario = None if missing_char_scenario is not None: best_scenario = missing_char_scenario.compare(extra_char_scenario) else: best_scenario = extra_char_scenario.compare(missing_char_scenario) # compare the winner of missing vs extra with the bad chars scenario best_scenario = best_scenario.compare(bad_char_scenario) # copy the attributes from the best scenario # because simply setting the object makes the # root caller lose the changes matchstats.copy_attributes(best_scenario) return
apache-2.0
-2,147,103,867,725,780,500
35.935685
119
0.559569
false
3.726036
false
false
false
pernici/sympy
sympy/series/tests/test_order.py
1
6982
from sympy import Symbol, Rational, Order, C, exp, ln, log, O, var, nan, pi, S from sympy.utilities.pytest import XFAIL, raises from sympy.abc import w, x, y, z def test_caching_bug(): #needs to be a first test, so that all caches are clean #cache it e = O(w) #and test that this won't raise an exception f = O(w**(-1/x/log(3)*log(5)), w) def test_simple_1(): o = Rational(0) assert Order(2*x) == Order(x) assert Order(x)*3 == Order(x) assert -28*Order(x) == Order(x) assert Order(-23) == Order(1) assert Order(exp(x)) == Order(1,x) assert Order(exp(1/x)).expr == exp(1/x) assert Order(x*exp(1/x)).expr == x*exp(1/x) assert Order(x**(o/3)).expr == x**(o/3) assert Order(x**(5*o/3)).expr == x**(5*o/3) assert Order(x**2 + x + y, x) == \ Order(x**2 + x + y, y) == O(1) raises(NotImplementedError, 'Order(x, 2 - x)') def test_simple_2(): assert Order(2*x)*x == Order(x**2) assert Order(2*x)/x == Order(1,x) assert Order(2*x)*x*exp(1/x) == Order(x**2*exp(1/x)) assert (Order(2*x)*x*exp(1/x)/ln(x)**3).expr == x**2*exp(1/x)*ln(x)**-3 def test_simple_3(): assert Order(x)+x == Order(x) assert Order(x)+2 == 2+Order(x) assert Order(x)+x**2 == Order(x) assert Order(x)+1/x == 1/x+Order(x) assert Order(1/x)+1/x**2 == 1/x**2+Order(1/x) assert Order(x)+exp(1/x) == Order(x)+exp(1/x) def test_simple_4(): assert Order(x)**2 == Order(x**2) assert Order(x**3)**-2 == Order(x**-6) def test_simple_5(): assert Order(x)+Order(x**2) == Order(x) assert Order(x)+Order(x**-2) == Order(x**-2) assert Order(x)+Order(1/x) == Order(1/x) def test_simple_6(): assert Order(x)-Order(x) == Order(x) assert Order(x)+Order(1) == Order(1) assert Order(x)+Order(x**2) == Order(x) assert Order(1/x)+Order(1) == Order(1/x) assert Order(x)+Order(exp(1/x)) == Order(exp(1/x)) assert Order(x**3)+Order(exp(2/x)) == Order(exp(2/x)) assert Order(x**-3)+Order(exp(2/x)) == Order(exp(2/x)) def test_simple_7(): assert 1+O(1) == O(1) assert 2+O(1) == O(1) assert x+O(1) == O(1) assert 1/x+O(1) == 1/x+O(1) def test_contains_0(): assert Order(1,x).contains(Order(1,x)) assert Order(1,x).contains(Order(1)) assert Order(1).contains(Order(1,x)) def test_contains_1(): assert Order(x).contains(Order(x)) assert Order(x).contains(Order(x**2)) assert not Order(x**2).contains(Order(x)) assert not Order(x).contains(Order(1/x)) assert not Order(1/x).contains(Order(exp(1/x))) assert not Order(x).contains(Order(exp(1/x))) assert Order(1/x).contains(Order(x)) assert Order(exp(1/x)).contains(Order(x)) assert Order(exp(1/x)).contains(Order(1/x)) assert Order(exp(1/x)).contains(Order(exp(1/x))) assert Order(exp(2/x)).contains(Order(exp(1/x))) assert not Order(exp(1/x)).contains(Order(exp(2/x))) def test_contains_2(): assert Order(x).contains(Order(y)) is None assert Order(x).contains(Order(y*x)) assert Order(y*x).contains(Order(x)) assert Order(y).contains(Order(x*y)) assert Order(x).contains(Order(y**2*x)) def test_contains_3(): assert Order(x*y**2).contains(Order(x**2*y)) is None assert Order(x**2*y).contains(Order(x*y**2)) is None def test_add_1(): assert Order(x+x) == Order(x) assert Order(3*x-2*x**2) == Order(x) assert Order(1+x) == Order(1,x) assert Order(1+1/x) == Order(1/x) assert Order(ln(x)+1/ln(x)) == Order(ln(x)) assert Order(exp(1/x)+x) == Order(exp(1/x)) assert Order(exp(1/x)+1/x**20) == Order(exp(1/x)) def test_ln_args(): assert O(log(x)) + O(log(2*x)) == O(log(x)) assert O(log(x)) + O(log(x**3)) == O(log(x)) assert O(log(x*y)) + O(log(x)+log(y)) == O(log(x*y)) def test_multivar_0(): assert Order(x*y).expr == x*y assert Order(x*y**2).expr == x*y**2 assert Order(x*y,x).expr == x assert Order(x*y**2,y).expr == y**2 assert Order(x*y*z).expr == x*y*z assert Order(x/y).expr == x/y assert Order(x*exp(1/y)).expr == x*exp(1/y) assert Order(exp(x)*exp(1/y)).expr == exp(1/y) def test_multivar_0a(): assert Order(exp(1/x)*exp(1/y)).expr == exp(1/x + 1/y) def test_multivar_1(): assert Order(x+y).expr == x+y assert Order(x+2*y).expr == x+y assert (Order(x+y)+x).expr == (x+y) assert (Order(x+y)+x**2) == Order(x+y) assert (Order(x+y)+1/x) == 1/x+Order(x+y) assert Order(x**2+y*x).expr == x**2+y*x def test_multivar_2(): assert Order(x**2*y+y**2*x,x,y).expr == x**2*y+y**2*x def test_multivar_mul_1(): assert Order(x+y)*x == Order(x**2+y*x,x,y) def test_multivar_3(): assert (Order(x)+Order(y)).args in [ (Order(x), Order(y)), (Order(y), Order(x))] assert Order(x)+Order(y)+Order(x+y) == Order(x+y) assert (Order(x**2*y)+Order(y**2*x)).args in [ (Order(x*y**2), Order(y*x**2)), (Order(y*x**2), Order(x*y**2))] assert (Order(x**2*y)+Order(y*x)) == Order(x*y) def test_issue369(): x = Symbol('x') y = Symbol('y', negative=True) z = Symbol('z', complex=True) # check that Order does not modify assumptions about symbols Order(x) Order(y) Order(z) assert x.is_positive == None assert y.is_positive == False assert z.is_positive == None assert x.is_infinitesimal == None assert y.is_infinitesimal == None assert z.is_infinitesimal == None def test_leading_order(): assert (x+1+1/x**5).extract_leading_order(x) == ((1/x**5, O(1/x**5)),) assert (1+1/x).extract_leading_order(x) == ((1/x, O(1/x)),) assert (1+x).extract_leading_order(x) == ((1, O(1, x)),) assert (1+x**2).extract_leading_order(x) == ((1, O(1, x)),) assert (2+x**2).extract_leading_order(x) == ((2, O(1, x)),) assert (x+x**2).extract_leading_order(x) == ((x, O(x)),) def test_leading_order2(): assert set((2+pi+x**2).extract_leading_order(x)) == set(((pi, O(1, x)), (S(2), O(1, x)))) assert set((2*x+pi*x+x**2).extract_leading_order(x)) == set(((2*x, O(x)), (x*pi, O(x)))) def test_order_leadterm(): assert O(x**2)._eval_as_leading_term(x) == O(x**2) def test_nan(): assert not O(x).contains(nan) def test_O1(): assert O(1) == O(1, x) assert O(1) == O(1, y) assert hash(O(1)) == hash(O(1, x)) assert hash(O(1)) == hash(O(1, y)) def test_getn(): # other lines are tested incidentally by the suite assert O(x).getn() == 1 assert O(x/log(x)).getn() == 1 assert O(x**2/log(x)**2).getn() == 2 assert O(x*log(x)).getn() == 1 raises(NotImplementedError, '(O(x) + O(y)).getn()') def test_diff(): assert O(x**2).diff(x) == O(x) def test_getO(): assert (x).getO() is None assert (x).removeO() == x assert (O(x)).getO() == O(x) assert (O(x)).removeO() == 0 assert (z + O(x) + O(y)).getO() == O(x) + O(y) assert (z + O(x) + O(y)).removeO() == z raises(NotImplementedError, '(O(x)+O(y)).getn()')
bsd-3-clause
-6,458,170,842,665,350,000
32.567308
78
0.563879
false
2.496246
true
false
false
nadgowdas/cargo
cli/cargo.py
1
2456
#!/usr/bin/env python #Copyright IBM Corporation 2015. #LICENSE: Apache License 2.0 http://opensource.org/licenses/Apache-2.0 import os import optparse import logging from voyage import * def main(): usage = "usage: python %prog -f <config_file> {--list | --migrate --source <source> --container <container> --target <target> (optional)--rootfs}" parser = optparse.OptionParser(usage=usage) parser.add_option("-l", "--list", action="store_true", dest="listc", default=False, help="list containers") parser.add_option("-m", "--migrate", action="store_true", dest="migrate", default=False, help="migrate container") parser.add_option("-f", "--failover", action="store_true", dest="failover", default=False, help="failover container") parser.add_option("--status", action="store_true", dest="status", default=False, help="query lazy replication status") parser.add_option("--source", action="store", dest="source", default = None, help="Source Host (agent name)") parser.add_option("--container", action="store", dest="container", default = None, help="Container name to be migrated") parser.add_option("--target", action="store", dest="target", default = None, help="Target Host (agent name)") parser.add_option("--rootfs", action="store_true", dest="rootfs", default=False, help="migrate rootfs") parser.add_option("-s", "--server", action="store", dest="server", default="127.0.0.1:5000", help="Cargo server and port") opts,args= parser.parse_args() listc = opts.listc migrate = opts.migrate failover = opts.failover server = opts.server source = opts.source target = opts.target rootfs = opts.rootfs container = opts.container status = opts.container if not listc and not migrate and not failover and not status: parser.print_help() if migrate and not source and not target and not container: parser.print_help() if failover and not target and not container and not server: parser.print_help() if status and not container: parser.print_help() voyage = Voyage(server) if listc: voyage.listcontainers() sys.exit(0) if migrate: voyage.migrate(source, container, target, rootfs) sys.exit(0) if failover: voyage.failover(container, target) sys.exit(0) if status: voyage.getStatus(container) if __name__=="__main__": main()
apache-2.0
8,801,125,674,314,020,000
35.117647
150
0.661645
false
3.732523
false
false
false
Ilphrin/TuxleTriad
Menu.py
1
16142
# coding: utf-8 import pygame import os import sys import gettext from functions import * from color import * from pygame.locals import * from game import Application from Sound import Sound from Text import Text from Buttons import Button from listOfCards import * from Card import Card pygame.init() class Menu(pygame.sprite.Sprite): def __init__(self, width, height): self.FONT = "Playball.ttf" # We create the window self.width = width self.height = height fullscreen = pygame.NOFRAME self.dimension = (self.width, self.height) self.screen = pygame.display.set_mode(self.dimension, fullscreen) pygame.display.set_caption("TuxleTriad") self._load_translation() self.bkgrnd, self.bkgrndRect = loadImage("background.jpg") self.bkgrndRect = self.bkgrnd.get_rect() # The Clock of the game, to manage the frame-rate self.clock = pygame.time.Clock() self.fps = 30 # We start the Sound object, playing music and sounds. self.sound = Sound() # Needed to keep track of the game if we do a pause during the game. self.app = None self.main() def main(self): elemText = [_("Play"), _("Options"), _("Rules"), _("About"), _("Quit Game")] self.menu = [] for elem in elemText: self.menu.append(Text(elem, self.FONT, white, 40)) posx = 400 posy = 400 - (60 * len(elemText)) for elem in self.menu: elem.rect.center = ((posx, posy)) posy += 100 pygame.event.clear() self.updateMenu() while 1: pygame.display.flip() deactivate() event = pygame.event.wait() if event.type == MOUSEBUTTONUP: self.clicked() elif event.type == QUIT: self.quitGame() self.clock.tick(self.fps) def updateMenu(self): self.screen.blit(self.bkgrnd, self.bkgrndRect) for i in range(len(self.menu)): self.screen.blit(self.menu[i].surface, self.menu[i].rect) self.clock.tick(self.fps) def quitGame(self): setConfig(self.sound.volume) pygame.quit() sys.exit() def oldMenu(self): while(1): for button in self.menu: button.rect.centerx -= 100 - self.fps if (button.rect.centerx <= - 500): return; self.updateMenu() pygame.display.flip() def clicked(self): for button in self.menu: if button.rect.collidepoint(pygame.mouse.get_pos()): self.sound.clicMenu.play() if button.text == _(u"Quit Game"): self.quitGame() self.oldMenu() if button.text == _(u"Play"): self.play() elif button.text == _(u"Options"): self.options() elif button.text == _(u"Rules"): self.rules() elif button.text == _(u"About"): self.about() self.main() def play(self): """User clicked on "Play" """ if self.app != None: texts = [_("Continue"),_("Adventure"), _("Solo"), _("Hot Seat"), _("Back")] else: texts = [_("Adventure"), _("Solo"), _("Hot Seat"), _("Back")] length = len(texts) if self.app != None: textPos = [(250, 100), (250,200), (250, 300), (250,400), (550, 500)] else: textPos = [(250, 100), (250,200), (250, 300), (550, 500)] self.menu = [] for i in range(length): self.menu.append(Text(texts[i], self.FONT, white, 45)) self.menu[i].rect.topleft = textPos[i] self.updateMenu() pygame.display.flip() self.clock.tick(self.fps) while 1: event = pygame.event.wait() if event.type == QUIT: pygame.quit() sys.exit() elif event.type == MOUSEBUTTONUP: coordinates = pygame.mouse.get_pos() for i in range(length): if self.menu[i].rect.collidepoint(coordinates): self.sound.clicMenu.play() self.oldMenu() if self.menu[i].text == _("Adventure"): return elif self.menu[i].text == _("Solo"): return elif self.menu[i].text == _("Hot Seat"): self.hotSeat() elif self.menu[i].text == _("Back"): return elif self.menu[i].text == _("Continue"): self.app.main() def options(self): texts = [_("Audio"), _("Sounds"), _("Music"), _("Back")] length = len(texts) textsPos = [(320, 100), (100, 200), (100, 300), (550, 500)] self.menu = [] for i in range(length): self.menu.append(Text(texts[i], self.FONT, white, 50)) self.menu[i].rect.topleft = textsPos[i] bar1, bar1Rect = loadImage("barSound.jpg") bar2, bar2Rect = loadImage("barSound.jpg") bar1Rect.topleft = (300, 220) bar2Rect.topleft = (300, 320) bars = [bar1Rect, bar2Rect] # X coordinates, relative to the bar's, of beginning and ending # of each volume cursor. MIN_VOLUME = 15 MAX_VOLUME = 240 # X absolute coordinates of the volume cursor. MIN = bars[0].x + MIN_VOLUME MAX = bars[0].x + MAX_VOLUME cursor1, cursor1Rect = loadImage("cursorSound.png") cursor2, cursor2Rect = loadImage("cursorSound.png") cursor1Rect.topleft = \ (bar1Rect.x + 225 * self.sound.soundVolume, bar1Rect.y - 23) cursor2Rect.topleft = \ (bar2Rect.x + 225 * self.sound.musicVolume, bar2Rect.y - 23) cursors = [cursor1Rect, cursor2Rect] self.screen.blit(self.bkgrnd, self.bkgrndRect) self.screen.blit(bar1, bar1Rect) self.screen.blit(bar2, bar2Rect) self.screen.blit(cursor1, cursors[0]) self.screen.blit(cursor2, cursors[1]) for i in range(length): self.screen.blit(self.menu[i].surface, self.menu[i].rect) pygame.display.update() move = 0 while 1: event = pygame.event.wait() mousex, mousey = pygame.mouse.get_pos() if event.type == QUIT: self.quitGame() elif event.type == MOUSEBUTTONDOWN: move = 1 reactivate() elif event.type == MOUSEBUTTONUP: move = 0 deactivate() for i in range(len(bars)): if move == 1 and bars[i].collidepoint((mousex, mousey)): if MIN <= mousex <= MAX: cursors[i].centerx = mousex elif mousex > bars[i].x + MAX_VOLUME: cursors[i].centerx = bars[i].x + MAX_VOLUME else: cursors[i].centerx = bars[i].x + MIN_VOLUME volume = cursors[i].centerx - MIN if volume != 0: volume = (volume / 2.25) / 100.0 assert (0.0 <= volume <= 1.0) if i == 0: self.sound.soundVolume = volume self.sound.playPutCard() self.sound.update() elif i == 1: self.sound.musicVolume = volume self.sound.update() self.screen.blit(self.bkgrnd, self.bkgrndRect) self.screen.blit(bar1, bar1Rect) self.screen.blit(bar2, bar2Rect) self.screen.blit(cursor1, cursors[0]) self.screen.blit(cursor2, cursors[1]) for j in range(4): self.screen.blit(self.menu[j].surface,\ self.menu[j].rect) pygame.display.update() self.clock.tick(self.fps) if move and self.menu[3].rect.collidepoint((mousex, mousey)): del bar1, bar2, bars, cursor1, cursor2, cursors self.oldMenu() self.sound.clicMenu.play() return def about(self): page = 1 allPage = [] pageList = [] index = 0 for number in range(len(allCards)): pageList.append(Card(number, 1)) index += 1 if index == 3 or number == (len(allCards) or len(allCards)-1): allPage.append(pageList) del pageList pageList = [] index = 0 maxPage = len(allPage) txtPage = str(page) + "/" + str(maxPage) navigation = [_("Back"), _("Next"), _("Quit"), "Programming:", "Kevin \"Ilphrin\" Pellet", "Graphics:", "Yunero Kisapsodos", txtPage] navigationPos = [(80,550), (650,550), (660,40), (630, 100), (640, 130), (630, 200), (640, 230), (350,550)] self.menu = [] for i in range(len(navigation)): if 2 < i < 7: size = 12 font = "rimouski sb.ttf" else: font = self.FONT size = 30 self.menu.append(Text(navigation[i], font, white, size)) self.menu[i].rect.topleft = navigationPos[i] cardPos = [(50,50), (50,200), (50, 350)] self.screen.blit(self.bkgrnd, self.bkgrndRect) for element in self.menu: self.screen.blit(element.surface,element.rect) for elem in range(len(allPage[page-1])): card = allPage[page-1][elem] card.rect.topleft = cardPos[elem] card.About.rect.topleft = card.rect.topright for elem in allPage[page-1]: self.screen.blit(elem.image, elem.rect) self.screen.blit(elem.About.surface, elem.About.rect) while 1: self.clock.tick(self.fps) pygame.display.flip() event = pygame.event.wait() if event.type == MOUSEBUTTONUP: coords = pygame.mouse.get_pos() for button in self.menu: if button.rect.collidepoint(coords): if button.text == _("Back"): if page > 1: page -= 1 self.sound.putcard.play() if button.text == _("Next"): if page < maxPage: page += 1 self.sound.putcard.play() if button.text == _("Quit"): self.oldMenu() return txtPage = str(page) + "/" + str(maxPage) self.menu[7] = Text(txtPage, self.FONT, white, 30) self.menu[7].rect.topleft = navigationPos[7] self.screen.blit(self.bkgrnd, self.bkgrndRect) for element in self.menu: self.screen.blit(element.surface,element.rect) for elem in range(len(allPage[page-1])): card = allPage[page-1][elem] card.rect.topleft = cardPos[elem] card.About.rect.topleft = card.rect.topright for elem in allPage[page-1]: self.screen.blit(elem.image, elem.rect) self.screen.blit(elem.About.surface, elem.About.rect) if event.type == QUIT: self.quitGame() def rules(self): tutorialButton = Button(_(u"Tutorial"), self.FONT, white) howtoButton = Button(_(u"How To"), self.FONT, white) backButton = Button(_(u"Back"), self.FONT, white) tutorialButton.rect.topleft = (250, 100) howtoButton.rect.topleft = (250, 200) backButton.rect.topleft = (550, 500) self.menu = [] self.menu.append(tutorialButton) self.menu.append(howtoButton) self.menu.append(backButton) self.updateMenu() while (1): self.clock.tick(self.fps) pygame.display.flip() event = pygame.event.wait() if event.type == MOUSEBUTTONUP: coords = pygame.mouse.get_pos() for i in range(len(self.menu)): if self.menu[i].rect.collidepoint(coords): self.oldMenu() if self.menu[i].text == _(u"Tutorial"): self.main() elif self.menu[i].text == _(u"How To"): self.HowTo() return elif self.menu[i].text == _(u"Back"): self.main() elif event.type == QUIT: self.quitGame() def HowTo(self): backButton = Button(_("Back"), self.FONT, white) prevButton = Button(_("Prev"), self.FONT, white) nextButton = Button(_("Next"), self.FONT, white) page = 1 maxPage = 2 pageList = [] for i in range(maxPage): pageList.append(pygame.image.load(getHowTo(i))) pageRect = pageList[i - 1].get_rect() pageRect.topleft = (-20, 0) backButton.rect.topleft = (600, 40) prevButton.rect.topleft = (80, 550) nextButton.rect.topleft = (660, 550) self.menu = [] self.menu.append(backButton) self.menu.append(prevButton) self.menu.append(nextButton) self.updateMenu() self.screen.blit(pageList[page - 1], pageRect) while (1): self.clock.tick(self.fps) pygame.display.flip() event = pygame.event.wait() if event.type == MOUSEBUTTONUP: coords = pygame.mouse.get_pos() if backButton.rect.collidepoint(coords): self.oldMenu() return elif prevButton.rect.collidepoint(coords) and page > 1: page -= 1 elif nextButton.rect.collidepoint(coords) and page < maxPage: page += 1 self.updateMenu() self.screen.blit(pageList[page - 1], pageRect) elif event.type == QUIT: self.quitGame() def _load_translation(self): base_path = os.getcwd() directory = os.path.join(base_path, 'translations') print "Loading translations at: ", directory params = { 'domain': 'tuxle-triad', 'fallback': True } if os.path.isdir(directory): params.update({'localedir': directory}) translation = gettext.translation(**params) translation.install("ngettext") def solo(self): """1vsIA mode""" print "Solo!" def adventure(self): """Adventure mode against IA""" print "Adventure!" def hotSeat(self): """1vs1 mode""" if self.app != None: del self.app Application(800, 600, self.screen, self.sound, self).main() else: Application(800, 600, self.screen, self.sound, self).main() Menu(800, 600)
mit
1,228,710,710,715,892,200
36.714953
78
0.477016
false
4.137913
false
false
false
mattilyra/gensim
docs/src/conf.py
1
7457
# -*- coding: utf-8 -*- # # gensim documentation build configuration file, created by # sphinx-quickstart on Wed Mar 17 13:42:21 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- html_theme = 'gensim_theme' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput'] autoclass_content = "both" napoleon_google_docstring = False # Disable support for google-style docstring # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = 'indextoc' # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {'index': './_templates/indexcontent.html'} # General information about the project. project = u'gensim' copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '3.4' # The full version, including alpha/beta/rc tags. release = '3.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # main_colour = "#ffbbbb" html_theme_options = { # "rightsidebar": "false", # "stickysidebar": "true", # "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'", # "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'", # "sidebarbgcolor": "fuckyou", # "footerbgcolor": "#771111", # "relbarbgcolor": "#993333", # "sidebartextcolor": "#000000", # "sidebarlinkcolor": "#330000", # "codebgcolor": "#fffff0", # "headtextcolor": "#000080", # "headbgcolor": "#f0f0ff", # "bgcolor": "#ffffff", } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "gensim" # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = '' # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']} # html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False html_domain_indices = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'gensimdoc' html_show_sphinx = False # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
lgpl-2.1
-371,278,304,731,654,850
32.868182
114
0.707153
false
3.577052
true
false
false
matham/cutils
cutils/knspace.py
1
15278
'''Provides namespace functionality for Kivy objects. It allows kivy objects to be named and then accessed using the namespace. :class:`KNSpace` instances are the namespaces that store the named objects. Classes need to inherit from :class:`KNSpaceBehavior` so that the class, when named, will be stored in the namespace. :attr:`knspace` is the default namespace where objects are stored, unless the object is associated with a different namespace. Simple Example ----------------- Here, because no namespace is specified, the default :attr:`knspace` is used so we can access its widgets directly, as in `knspace.keyboard`, to get the keyboard widget:: #:import knspace cutils.knspace.knspace #:import Factory kivy.lang.Factory <NamedTextInput@KNSpaceBehavior+TextInput> <Keyboard@Popup>: BoxLayout: GridLayout: cols: 1 NamedTextInput: name: 'keyboard' hint_text: 'Type something' Label: text: 'My Keyboard' Button: text: 'Close Keyboard' on_press: root.dismiss() <RootWidget@BoxLayout>: Button: on_parent: self.popup = Factory.Keyboard() on_release: self.popup.open() text: 'Open keyboard' Label: text: 'Keyboard output:\\n' + knspace.keyboard.text if knspace.keyboard else '' To test, run a app with `RootWidget`. Multiple Namespaces ------------------- In the previous example, only the default namespace was used. However, sometimes we need to split namespaces so we can reuse the name across multiple widgets using the same name. When a :class:`KNSpaceBehavior` derived widget is given a name, first we find the associated namespace using the :attr:`KNSpaceBehavior.knspace` property. Then, we create a :class:`~kivy.properties.ObjectProperty` in that namespace, whose name is that name and assign the named widget as its value. See :attr:`KNSpaceBehavior.knspace` for details on how that namespace is found. In short, we check if the widget was assigned one, if not, we find the namespace by walking up its parent tree using :attr:`KNSpaceBehavior.knspace_key` and finding the first one with a namespace. Finally, if not found, we use :attr:`knspace`. Therefore, above, the default namespace was used since none was specified. :: #:import Factory kivy.lang.Factory <NamedTextInput@KNSpaceBehavior+TextInput> <Keyboard@KNSpaceBehavior+Popup>: knspace_key: 'knspace_parent' knspace_parent: None BoxLayout: GridLayout: cols: 1 NamedTextInput: name: 'keyboard' hint_text: 'Type something' Label: text: 'My Keyboard' Button: text: 'Close Keyboard' on_press: root.dismiss() <Typist@KNSpaceBehavior+BoxLayout>: knspace: getattr(self, 'knspace').clone() # So we don't create a rule binding Button: on_parent: self.popup = Factory.Keyboard() self.popup.knspace_parent = root on_release: self.popup.open() text: 'Open keyboard' Label: text: 'Keyboard output:\\n' + root.knspace.keyboard.text if root.knspace.keyboard else '' <RootWidget@BoxLayout>: Typist Typist In this example, we wanted two typists, rather than a single keyboard. But within a typist we wanted to be able to use names, even though typist share identical names. To do this, we have `knspace: getattr(self, 'knspace').clone()`. This forks the current namespace (which happens to be the default, :attr:`knspace`) and create a namespace shared by widgets that are offspring of that `Typist`. Now, each `Typist` gets its own namespace, while still sharing the default namespaces from which it was cloned for widgets not in its namespace. `knspace_key: 'knspace_parent'` is required, since a `Popup` is not a child the `Typist`, but they do have to share the namspace, so instead of using `parent` to find the next namespace up the tree, we use the specified `knspace_parent` attribute which points to the Typist and hence its namespace. Traditional namespace --------------------- In the above example, we accessed the namespace using e.g. `root.knspace.keyboard`. We can also access it without having access to e.g. `root` like in a traditional namespace access. We can change the above `RootWidget` into:: <RootWidget@KNSpaceBehavior+BoxLayout>: name: 'root' Typist Typist Now, we can do:: knspace.root.children[0].knspace.keyboard.hint_text = 'Type something else' And the second Typist's keyboard will have a different hint text. Of course we could also have done `root.children[0].knspace.keyboard.hint_text = 'Type something else'` if had access to the root widget. ''' __all__ = ('KNSpace', 'KNSpaceBehavior', 'knspace') from kivy.event import EventDispatcher from kivy.properties import StringProperty, ObjectProperty, AliasProperty from kivy.lang import Factory knspace = None '''The default :class:`KNSpace` namespace. If a :class:`KNSpace` namespace has not been assigned to a :class:`KNSpaceBehavior` instance, then this :class:`KNSpace` namespace serves as the default namespace. See the examples and :class:`KNSpaceBehavior` for more details. ''' class KNSpace(EventDispatcher): '''Each :class:`KNSpace` instance is a namespace that stores the named Kivy objects when they are associated with this namespace. Each named object is stored as the value of a Kivy :class:`~kivy.properties.ObjectProperty` of this instance whose property name is the object's given name. Both `rebind` and `allownone` are set to `True` for the property. See :attr:`KNSpaceBehavior` for details on how a namespace is associated with a named object. When storing an object in the namespace, the object's `proxy_ref` is stored if the object has such an attribute. :Parameters: `parent`: (internal) A :class:`KNSpace` instance or None. If specified, it's a parent namespace, in which case, the current namespace will have in its namespace all its named objects as well as the named objects of its parent and parent's parent etc. See :meth:`clone` for more details. ''' parent = None '''(internal) The parent namespace instance, :class:`KNSpace`, or None. See :meth:`clone`. ''' __has_applied = None def __init__(self, parent=None, **kwargs): super(KNSpace, self).__init__(**kwargs) self.parent = parent self.__has_applied = set(self.properties().keys()) def __setattr__(self, name, value): prop = super(KNSpace, self).property(name, quiet=True) has_applied = self.__has_applied if prop is None: if hasattr(self, name): super(KNSpace, self).__setattr__(name, value) else: value = getattr(value, 'proxy_ref', value) self.apply_property( **{name: ObjectProperty(value, rebind=True, allownone=True)} ) has_applied.add(name) elif name not in has_applied: self.apply_property(**{name: prop}) has_applied.add(name) value = getattr(value, 'proxy_ref', value) super(KNSpace, self).__setattr__(name, value) else: value = getattr(value, 'proxy_ref', value) super(KNSpace, self).__setattr__(name, value) def __getattr__(self, name): parent = self.parent if parent is None: raise AttributeError(name) return getattr(parent, name) def property(self, name, quiet=False): # needs to overwrite EventDispatcher.property so kv lang will work prop = super(KNSpace, self).property(name, quiet=quiet) if prop is not None: return prop prop = ObjectProperty(None, rebind=True, allownone=True) self.apply_property(**{name: prop}) self.__has_applied.add(name) return prop def clone(self): '''Creates a new :class:`KNSpace` instance which will have access to all the named objects in the current namespace but will also have a namespace of its own that is unique to it. Any new names added to a :class:`KNSpaceBehavior` associated with this instance will be accesible only through this instance and not its parent(s). However, when looking for a named object using this namespace, if the object is not found in this namespace we search it's parent namespace and so on until we (don't) find it. ''' return KNSpace(parent=self) class KNSpaceBehavior(object): '''Inheriting from this class allows naming of the inherited object, which is then added to the associated namespace :attr:`knspace` and accessible through it. ''' _knspace = ObjectProperty(None, allownone=True) _name = StringProperty('') __last_knspace = None __callbacks = None def __init__(self, knspace=None, **kwargs): self.knspace = knspace super(KNSpaceBehavior, self).__init__(**kwargs) def __knspace_clear_callbacks(self, *largs): for obj, name, uid in self.__callbacks: obj.unbind_uid(name, uid) last = self.__last_knspace self.__last_knspace = self.__callbacks = None assert self._knspace is None assert last new = self.__set_parent_knspace() if new is last: return self.property('_knspace').dispatch(self) name = self.name if not name: return if getattr(last, name) == self: setattr(last, name, None) if new: setattr(new, name, self) else: raise ValueError('Object has name "{}", but no namespace'. format(name)) def __set_parent_knspace(self): callbacks = self.__callbacks = [] fbind = self.fbind append = callbacks.append parent_key = self.knspace_key clear = self.__knspace_clear_callbacks append((self, 'knspace_key', fbind('knspace_key', clear))) if not parent_key: self.__last_knspace = knspace return knspace append((self, parent_key, fbind(parent_key, clear))) parent = getattr(self, parent_key, None) while parent is not None: fbind = parent.fbind parent_knspace = getattr(parent, 'knspace', 0) if parent_knspace is not 0: append((parent, 'knspace', fbind('knspace', clear))) self.__last_knspace = parent_knspace return parent_knspace append((parent, parent_key, fbind(parent_key, clear))) new_parent = getattr(parent, parent_key, None) if new_parent is parent: break parent = new_parent self.__last_knspace = knspace return knspace def _get_knspace(self): _knspace = self._knspace if _knspace is not None: return _knspace if self.__callbacks is not None: return self.__last_knspace # we only get here if we never accessed our knspace return self.__set_parent_knspace() def _set_knspace(self, value): if value is self._knspace: return knspace = self._knspace or self.__last_knspace name = self.name if name and knspace: setattr(knspace, name, None) # reset old namespace if value == 'clone': if not knspace: knspace = self.knspace # get parents in case we haven't before if knspace: value = knspace.clone() else: raise ValueError('Cannot clone with no namesapce') for obj, prop_name, uid in self.__callbacks or []: obj.unbind_uid(prop_name, uid) self.__last_knspace = self.__callbacks = None if name: if value is None: # if None, first update the recursive knspace knspace = self.__set_parent_knspace() if knspace: setattr(knspace, name, self) self._knspace = None # cause a kv trigger else: setattr(value, name, self) knspace = self._knspace = value if not knspace: raise ValueError('Object has name "{}", but no namespace'. format(name)) else: if value is None: self.__set_parent_knspace() # update before trigger below self._knspace = value knspace = AliasProperty( _get_knspace, _set_knspace, bind=('_knspace', ), cache=False, rebind=True, allownone=True) '''The namespace instance, :class:`KNSpace`, associated with this widget. When this widget is named with :attr:`name` the name is added to the :attr:`knspace` namespace pointing to this widget. If the namespace has been set with a :class:`KNSpace` instance, e.g. with `self.knspace = ...`, then that instance is used. Otherwise, we look at the property named :attr:`knspace_key` of this obj. If that object has a knspace property we use that namespace. Otherwise, we look at its :attr:`knspace_key` object and walk up the parent tree until we find a parent who has a namespace instance. Finally, if there's no parent with a namespace, the default :attr:`~cutils.knspace.knspace` namespace is used. Both `rebind` and `allownone` are `True`. ''' knspace_key = StringProperty('parent', allownone=True) '''The name of the property of this instance, to use to find the namespace associated with this instance. Defaults to `'parent'` so that we'll look up the parent tree to find the namespace. See :attr:`knspace`. When `None`, we won't search the parent tree for the namespace. `allownone` is `True`. ''' def _get_name(self): return self._name def _set_name(self, value): old_name = self._name knspace = self.knspace if old_name and knspace: setattr(knspace, old_name, None) self._name = value if value: if knspace: setattr(knspace, value, self) else: raise ValueError('Object has name "{}", but no namespace'. format(value)) name = AliasProperty(_get_name, _set_name, bind=('_name', ), cache=False) '''The name given to this object. If named, the name will be added to the associated :attr:`knspace` and will point to the `proxy_ref` of this object. When named, one can access this object by e.g. knspace.name, where `name` is the given name of this instance. See :attr:`knspace` and the module description for more details. ''' knspace = KNSpace() Factory.register('KNSpaceBehavior', cls=KNSpaceBehavior)
mit
1,435,365,778,809,909,800
35.63789
101
0.623969
false
4.024763
false
false
false
prashrock/Python
leetCode/largest_number/create_largest_number_from_array.py
1
1340
# Use a custom sort comparator to sort the integers # Converted the sorted integer array into a string def cmp_to_key(mycmp): 'Convert a cmp= function into a key= function' class K: def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return mycmp(self.obj, other.obj) < 0 def __gt__(self, other): return mycmp(self.obj, other.obj) > 0 def __eq__(self, other): return mycmp(self.obj, other.obj) == 0 def __le__(self, other): return mycmp(self.obj, other.obj) <= 0 def __ge__(self, other): return mycmp(self.obj, other.obj) >= 0 def __ne__(self, other): return mycmp(self.obj, other.obj) != 0 return K # @param x, first integer # @param y, second integer # @return (xy - yx) def cmp_aggregate(x, y): str_xy = ''.join((str(x), str(y))) str_yx = ''.join((str(y), str(x))) return int(str_xy) - int(str_yx) #Sort with a custom comparator and get descending order def largestNumber(num): sorted_num = sorted(num, key=cmp_to_key(cmp_aggregate), reverse=True) print sorted_num sorted_str = ''.join(map(str, sorted_num)) if(int(sorted_str) == 0): return '0' else: return sorted_str num = [3, 30, 34, 5, 9] print num print largestNumber(num)
gpl-2.0
4,418,024,888,891,023,400
31.682927
73
0.581343
false
3.252427
false
false
false
emulbreh/lymph
lymph/core/events.py
1
3099
import re import logging from lymph.core.interfaces import Component from lymph.core import trace logger = logging.getLogger(__name__) class Event(object): def __init__(self, evt_type, body, source=None, headers=None, event_id=None): self.event_id = event_id self.evt_type = evt_type self.body = body self.source = source self.headers = headers or {} def __getitem__(self, key): return self.body[key] def __iter__(self): return iter(self.body) def __repr__(self): return '<Event type=%r body=%r>' % (self.evt_type, self.body) def __str__(self): return '{type=%s id=%s}' % (self.evt_type, self.event_id) @classmethod def deserialize(cls, data): return cls(data.get('type'), data.get('body', {}), source=data.get('source'), headers=data.get('headers')) def serialize(self): return { 'type': self.evt_type, 'headers': self.headers, 'body': self.body, 'source': self.source, } class EventHandler(Component): def __init__(self, interface, func, event_types, sequential=False, queue_name=None, active=True): self.func = func self.event_types = event_types self.sequential = sequential self.active = active self.interface = interface self._queue_name = queue_name or func.__name__ @property def queue_name(self): return '%s-%s' % (self.interface.name, self._queue_name) @queue_name.setter def queue_name(self, value): self._queue_name = value def on_start(self): self.interface.container.subscribe(self, consume=self.active) def __call__(self, event, *args, **kwargs): trace.set_id(event.headers.get('trace_id')) logger.debug('<E %s', event) return self.func(self.interface, event, *args, **kwargs) class EventDispatcher(object): wildcards = { '#': r'[\w.]*(?=\.|$)', '*': r'\w+', } def __init__(self, patterns=()): self.patterns = [] self.update(patterns) def compile(self, key): words = (self.wildcards.get(word, re.escape(word)) for word in key.split('.')) return re.compile('^%s$' % r'\.'.join(words)) def register(self, pattern, handler): self.patterns.append(( self.compile(pattern), pattern, handler, )) def __iter__(self): for regex, pattern, handler in self.patterns: yield pattern, handler def update(self, other): for pattern, handler in other: self.register(pattern, handler) def dispatch(self, evt_type): for regex, pattern, handler in self.patterns: if regex.match(evt_type): yield pattern, handler def __call__(self, event): handlers = set() for pattern, handler in self.dispatch(event.evt_type): if handler not in handlers: handlers.add(handler) handler(event) return bool(handlers)
apache-2.0
1,420,763,787,660,582,100
26.918919
114
0.571475
false
3.878598
false
false
false
mesocentrefc/Janua-SMS
janua/actions/sms_usage.py
1
2426
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- # # Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University # # This file is part of Janua-SMS # # http://github.com/mesocentrefc/Janua-SMS # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from janua import jdb from janua.actions.action import Action from janua.utils.utilities import get_role from janua.ws.services import urlconfig, jsonify class SmsUsage(Action): """ Get SMS usage based on administrator quota * Sample request with administrator level: .. code-block:: javascript GET /sms-usage HTTP/1.1 Host: janua.mydomain.com Content-Type: application/json JanuaAuthToken: abcdef123456789 Sample response: .. code-block:: javascript HTTP/1.1 200 { "smsusage": { "global": 18, "quota": "100 M", "sent": 18 } } * Sample request with supervisor level: .. code-block:: javascript GET /sms-usage HTTP/1.1 Host: janua.mydomain.com Content-Type: application/json Sample response: .. code-block:: javascript HTTP/1.1 200 { "smsusage": { "quota": "200 D", "sent": 4 } } """ category = '__INTERNAL__' @urlconfig('/sms-usage') def web(self): admin = jdb.admin.get_by_phone(self.phone_number) data = { 'success': True, 'params': [], 'num_params': 0 } reached, numsms = jdb.sms.is_admin_quota_reached(admin) quota = admin.sms_quota data = {'sent': int(numsms), 'quota': quota} if get_role(admin) == 'admin': data.update({'global': int(jdb.sms.month_usage())}) return jsonify(smsusage=data)
gpl-2.0
3,349,284,974,751,728,000
25.347826
76
0.60066
false
3.805338
false
false
false
nathanbjenx/cairis
cairis/controllers/TemplateGoalController.py
1
3319
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys if (sys.version_info > (3,)): import http.client from http.client import BAD_REQUEST, CONFLICT, NOT_FOUND, OK else: import httplib from httplib import BAD_REQUEST, CONFLICT, NOT_FOUND, OK from flask import session, request, make_response from flask_restful import Resource from cairis.data.TemplateGoalDAO import TemplateGoalDAO from cairis.tools.JsonConverter import json_serialize from cairis.tools.MessageDefinitions import TemplateGoalMessage from cairis.tools.ModelDefinitions import TemplateGoalModel from cairis.tools.SessionValidator import get_session_id __author__ = 'Shamal Faily' class TemplateGoalsAPI(Resource): def get(self): session_id = get_session_id(session, request) constraint_id = request.args.get('constraint_id', -1) dao = TemplateGoalDAO(session_id) tgs = dao.get_template_goals(constraint_id=constraint_id) dao.close() resp = make_response(json_serialize(tgs, session_id=session_id)) resp.headers['Content-Type'] = "application/json" return resp def post(self): session_id = get_session_id(session, request) dao = TemplateGoalDAO(session_id) new_tg = dao.from_json(request) dao.add_template_goal(new_tg) dao.close() resp_dict = {'message': 'Template Goal successfully added'} resp = make_response(json_serialize(resp_dict, session_id=session_id), OK) resp.contenttype = 'application/json' return resp class TemplateGoalByNameAPI(Resource): def get(self, name): session_id = get_session_id(session, request) dao = TemplateGoalDAO(session_id) found_tg = dao.get_template_goal(name) dao.close() resp = make_response(json_serialize(found_tg, session_id=session_id)) resp.headers['Content-Type'] = "application/json" return resp def put(self, name): session_id = get_session_id(session, request) dao = TemplateGoalDAO(session_id) upd_tg = dao.from_json(request) dao.update_template_goal(upd_tg, name) dao.close() resp_dict = {'message': 'Template Goal successfully updated'} resp = make_response(json_serialize(resp_dict), OK) resp.contenttype = 'application/json' return resp def delete(self, name): session_id = get_session_id(session, request) dao = TemplateGoalDAO(session_id) dao.delete_template_goal(name) dao.close() resp_dict = {'message': 'Template Goal successfully deleted'} resp = make_response(json_serialize(resp_dict), OK) resp.contenttype = 'application/json' return resp
apache-2.0
2,627,098,913,465,853,000
32.525253
78
0.726424
false
3.631291
false
false
false
andyr0id/PyGFNN
examples/gfnn/example1F.py
1
1657
#!/usr/bin/env python __author__ = 'Andrew J. Lambert, [email protected]' """ example1P A one layer network with fixed internal connections """ from pygfnn.tools.plotting.gfnn import * import pygfnn.tools.shortcuts as gfnn import numpy as np import timeit import matplotlib.pyplot as plt import scipy.io as sio if __name__ == '__main__': # Network parameters oscParams = { 'a': 1, 'b1': -1, 'b2': -1000, 'd1': 0, 'd2': 0, 'e': 1 } # Limit cycle learnParams = gfnn.NOLEARN_ALLFREQ freqDist = { 'fspac': 'log', 'min': 0.5, 'max': 8 } # Make network n = gfnn.buildGFNN(196, oscParams = oscParams, freqDist = freqDist, learnParams = learnParams) n.recurrentConns[0].c0[:] = gfnn.getInitC(n, n, [(1,1), (1,2), (1,3), (1,4), (1,6), (1,8), (2,3), (3,4), (3,8)], thresh=0.01) n.reset() # First plots, showing initial connection state ampFig1, phaseFig1 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max']) # Stimulus - 50 seconds of 1Hz sin t = np.arange(0, 50, n['h'].dt) x = np.sin(2 * np.pi * 1 * t) * 0.1 # Run the network timer = timeit.default_timer start = timer() for i in range(len(t)): out = n.activate(x[i]) end = timer() print('Elapsed time is %f seconds' % (end - start)) if learnParams is not None: # Second plots, showing final connection state ampFig2, phaseFig2 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max']) Z = n['h'].outputbuffer[:n.offset] fig1 = ampx(Z, n.dt, freqDist['min'], freqDist['max']) fig2 = phasex(Z, n.dt, freqDist['min'], freqDist['max']) plt.show()
gpl-2.0
3,656,434,342,488,919,600
29.685185
129
0.608328
false
2.780201
false
false
false
liberiun/cynin-intranet
src/ubify.viewlets/ubify/viewlets/browser/typetitle.py
1
3657
############################################################################### #cyn.in is an open source Collaborative Knowledge Management Appliance that #enables teams to seamlessly work together on files, documents and content in #a secure central environment. # #cyn.in v2 an open source appliance is distributed under the GPL v3 license #along with commercial support options. # #cyn.in is a Cynapse Invention. # #Copyright (C) 2008 Cynapse India Pvt. Ltd. # #This program is free software: you can redistribute it and/or modify it under #the terms of the GNU General Public License as published by the Free Software #Foundation, either version 3 of the License, or any later version and observe #the Additional Terms applicable to this program and must display appropriate #legal notices. In accordance with Section 7(b) of the GNU General Public #License version 3, these Appropriate Legal Notices must retain the display of #the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have #received a copy of the detailed Additional Terms License with this program. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General #Public License for more details. # #You should have received a copy of the GNU General Public License along with #this program. If not, see <http://www.gnu.org/licenses/>. # #You can contact Cynapse at [email protected] with any problems with cyn.in. #For any queries regarding the licensing, please send your mails to # [email protected] # #You can also contact Cynapse at: #802, Building No. 1, #Dheeraj Sagar, Malad(W) #Mumbai-400064, India ############################################################################### from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from plone.app.layout.viewlets.common import ViewletBase from zope.component import getMultiAdapter from Products.CMFCore.utils import getToolByName from ubify.viewlets.config import plone_site_type_title from ubify.policy import CyninMessageFactory as _ class TypetitleViewlet(ViewletBase): render = ViewPageTemplateFile('typetitle.pt') def update(self): portal_state = getMultiAdapter((self.context, self.request),name=u'plone_portal_state') context_state = getMultiAdapter((self.context, self.request),name=u'plone_context_state') tools = getMultiAdapter((self.context, self.request), name=u'plone_tools') typetool= getToolByName(self.context, 'portal_types') portal_title = portal_state.portal_title() object_title = context_state.object_title() self.object_icon = self.context.icon object_typename = self.context.portal_type object_typeobj = typetool[object_typename] self.typeiconname = object_typeobj.icon_expr if object_typeobj.title == '' and self.context.portal_type.lower() == 'plone site': self.typetitle = plone_site_type_title else: self.typetitle = _(object_typeobj.title,object_typeobj.title) self.app_name = object_title if self.context.portal_type.lower() == 'plone site': self.tdescription = 'cyn.in site|A cyn.in site allows instant collaboration among peers and provides a central social computer and network.' else: self.tdescription = self.typetitle + '|' + object_typeobj.description self.isaddscreen = False if hasattr(context_state.parent(),'portal_type') and context_state.parent().portal_type == 'TempFolder': self.isaddscreen = True
gpl-3.0
1,373,551,312,006,338,000
50.507042
152
0.710965
false
3.857595
false
false
false
rosenbrockc/fortpy
fortpy/stats/bp.py
1
5243
"""Methods for testing a code library against Fortran best practices to help uncover subtle bugs that took a while for us to track down. See especially http://www.cs.rpi.edu/~szymansk/OOF90/bugs.html""" def _exec_check_pointers(executable): """Checks the specified executable for the pointer condition that not all members of the derived type have had their values set. Returns (list of offending members, parameter name). """ oparams = [] pmembers = {} xassigns = map(lambda x: x.lower().strip(), executable.external_assignments()) def add_offense(pname, member): """Adds the specified member as an offender under the specified parameter.""" if pname not in oparams: oparams.append(pname) if pname not in pmembers: pmembers[pname] = [member] else: pmembers[pname].append(member) def check_buried(executable, pname, member): """Checks whether the member has its value changed by one of the dependency subroutines in the executable. """ for d in executable.dependencies: if pname in d.argnames: pindex = d.argnames.index(pname) dtarget = d.target if dtarget is not None: mparam = dtarget.ordered_parameters[pindex] for pname, param in executable.parameters.items(): if param.direction == "(out)" and param.is_custom: utype = param.customtype if utype is None: continue for mname, member in utype.members.items(): key = "{}%{}".format(pname, mname).lower().strip() if key not in xassigns: #We also need to check the dependency calls to other, buried subroutines. compname = "{}%{}".format(pname, mname).lower() if executable.changed(compname) is None: add_offense(pname, member) return (oparams, pmembers) def _type_check_pointers(utype): """Checks the user-derived type for non-nullified pointer array declarations in its base definition. Returns (list of offending members). """ result = [] for mname, member in utype.members.items(): if ("pointer" in member.modifiers and member.D > 0 and (member.default is None or "null" not in member.default)): result.append(member) return result def check_pointers(parser, codedir=None, mfilter=None, recursive=False): """Checks the modules in the specified code parser to see if they have common, but subtle, pointer bugs in: 1. subroutines with a parameter of intent(out) and user-derived type must* set *all* members of that parameter or they will have an *undefined* status. 2. pointer-type arrays that are not nullified are set to a valid target will return 'T' when passed to `associated`. Best practice is to nullify pointer arrays in user-derived types as the default value on those types. :arg parser: [fortpy.code.CodeParser] with the modules to search *already loaded*. :arg codedir: specify the full path to the library whose modules should be searched, just another way to filter which modules are generating the warnings. :arg mfilter: filter to apply to module names; can use the wildcard standard from bash. """ from fnmatch import fnmatch from fortpy.msg import std, set_verbosity, info set_verbosity(0) W1 = " {} '{}' does not set the value of members '{}' in parameter '{}'." W2 = " Type '{}' does not nullify members '{}' on creation." offenders = {} for (modname, module) in parser.modules.items(): if not recursive and codedir is not None and not codedir.lower() in module.filepath.lower(): continue if mfilter is not None and not fnmatch(module.name.lower(), mfilter.lower()): continue #Test the first condition above for all subroutines in the module; also handle #the recursively defined subroutines. hprinted = False for xname, xvalue in module.executables.items(): oparams, pmembers = _exec_check_pointers(xvalue) if len(oparams) > 0: if not hprinted: info("Best practice suggestions: {}".format(module.filepath)) hprinted = True for oparam in oparams: plist = ', '.join([p.name for p in pmembers[oparam]]) std(W1.format(type(xvalue).__name__, xname, plist, oparam), 0) offenders[xvalue.full_name] = (oparams, pmembers) for tname, tvalue in module.types.items(): result = _type_check_pointers(tvalue) if len(result) > 0: if not hprinted: info("Best practice suggestions: {}".format(module.filepath)) hprinted = True plist = ', '.join([p.name for p in result]) std(W2.format(tname, plist), 0) offenders[xvalue.full_name] = result return offenders
mit
-214,200,888,926,042,880
42.330579
100
0.60576
false
4.262602
false
false
false
mitoNGS/MToolBox
aux/filter_HF.py
1
2956
#!/usr/bin/env python import fileinput import sys, os def usage(): print ''' This script is compatible with MToolBox versions < 1.2 only This script filters the MToolBox vcf file based on Heteroplasmy threshold Usage: filter_HF.py <sample_name> <vcf_file> <HF_threshold[float]> <DP_threshold[float]> <out_type[vcf|txt]> <outfilename> <convert_to_homoplamy[Yes|No]> \n<vcf_file> can also be .gz file\n\n<convert_to_homoplasmy> is boolean and takes Yes or No values and converts HF >= 0.9 to GT=1/1. Useful for haplogroup prediction with other methods (e.g. haplogrep)\n\n''' if __name__ == "__main__": if len(sys.argv[1:]) < 7: sys.stderr.write('ERROR: argument missing\n') usage() sys.exit(1) samplename,vcf,HFt,DPt,out_type,outfile,homo_convert= sys.argv[1:] HFt = float(HFt) DPt = float(DPt) out = open(outfile,'w') homo_convert = str(homo_convert) if homo_convert not in ['Yes','No']: sys.stderr.write('Values accepted for <convert_to_homoplasmy> are [Yes|No].\nExit!\n') sys.exit(1) if 'gz' in vcf or 'gzip' or 'bz2' in vcf: ifile = fileinput.input(vcf,openhook=fileinput.hook_compressed) else: ifile = fileinput.input(vcf) for line in ifile: if line.startswith('##'): if out_type == 'vcf': command_string = "##contig=<ID=chrMT,length=16569>\n##filter_VCF_command=filter_vcf.py {0} {1} {2} {3} {4} {5}\n".format(vcf,HFt,DPt,out_type,outfile,homo_convert) out.write(line) else: pass else: if line.startswith('#CHROM') and out_type == 'vcf': out.write(command_string) line = line.split('\t') line[-1] = samplename+'\n' line = '\t'.join(line) out.write(line) elif line.startswith('#CHROM') and out_type == 'txt': header='CHROM\tPOS\tID\tREF\tALT\tDP\tHF\tCIL\tCIU\t'+samplename out.write(header+'\n') else: line = line.split('\t') geno,DPv,HFv_l,CIL,CIU = line[-1].split(':') geno = geno.split('/') if '0' in geno: geno.remove('0') HFv_l = HFv_l.split(',') CIL = CIL.split(',') CIU = CIU.split(',') ALT = line[4].split(',') c =0 while c < (len(geno)): HFv = float(HFv_l[c]) CILv = float(CIL[c]) CIUv = float(CIU[c]) DPv = float(DPv) ALTv = str(ALT[c]) if DPv >= float(DPt) and HFv >= float(HFt): if out_type == 'txt': res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,DPv,HFv,CILv,CIUv,samplename])) out.write(res+'\n') else: if HFv == 1: res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=2,AN=2','GT','1/1'])) elif HFv >= 0.9 and homo_convert == 'Yes': res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=2,AN=2','GT','1/1'])) else: res='\t'.join(map(lambda x:str(x),[line[0],line[1],line[2],line[3],ALTv,'.','PASS','AC=1,AN=2','GT','0/1'])) out.write(res+'\n') else: pass c += 1 out.close()
gpl-3.0
-3,761,606,398,086,951,400
33.776471
356
0.60115
false
2.436933
false
false
false
fogleman/DCPU-16
app/assembler.py
1
16148
import ply.lex as lex import ply.yacc as yacc # Constants SIZE = 0x10000 # Lookups BASIC_OPCODES = { 'SET': 0x01, 'ADD': 0x02, 'SUB': 0x03, 'MUL': 0x04, 'MLI': 0x05, 'DIV': 0x06, 'DVI': 0x07, 'MOD': 0x08, 'MDI': 0x09, 'AND': 0x0a, 'BOR': 0x0b, 'XOR': 0x0c, 'SHR': 0x0d, 'ASR': 0x0e, 'SHL': 0x0f, 'IFB': 0x10, 'IFC': 0x11, 'IFE': 0x12, 'IFN': 0x13, 'IFG': 0x14, 'IFA': 0x15, 'IFL': 0x16, 'IFU': 0x17, 'ADX': 0x1a, 'SUX': 0x1b, 'STI': 0x1e, 'STD': 0x1f, } SPECIAL_OPCODES = { 'JSR': 0x01, 'INT': 0x08, 'IAG': 0x09, 'IAS': 0x0a, 'RFI': 0x0b, 'IAQ': 0x0c, 'HWN': 0x10, 'HWQ': 0x11, 'HWI': 0x12, } COMMAND_OPCODES = { 'NOP': 0x0000, 'BRK': 0x0040, 'RFI': 0x0160, } REGISTERS = { 'A': 0x0, 'B': 0x1, 'C': 0x2, 'X': 0x3, 'Y': 0x4, 'Z': 0x5, 'I': 0x6, 'J': 0x7, } DST_CODES = { 'PUSH': 0x18, 'PEEK': 0x19, 'SP': 0x1b, 'PC': 0x1c, 'EX': 0x1d, } SRC_CODES = { 'POP': 0x18, 'PEEK': 0x19, 'SP': 0x1b, 'PC': 0x1c, 'EX': 0x1d, } # Reverse Lookups REV_BASIC_OPCODES = dict((v, k) for k, v in BASIC_OPCODES.items()) REV_SPECIAL_OPCODES = dict((v, k) for k, v in SPECIAL_OPCODES.items()) REV_COMMAND_OPCODES = dict((v, k) for k, v in COMMAND_OPCODES.items()) REV_REGISTERS = dict((v, k) for k, v in REGISTERS.items()) REV_DST_CODES = dict((v, k) for k, v in DST_CODES.items()) REV_SRC_CODES = dict((v, k) for k, v in SRC_CODES.items()) # Helper Functions def pretty_value(x): return '%d' % x if x <= 0xff else '0x%04x' % x def do_lookup(lookup, word): if isinstance(word, basestring): try: word = lookup[word] except KeyError: raise Exception('Undefined symbol: "%s"' % word) return word # Classes class Program(object): def __init__(self, instructions): self.instructions = instructions self.text = None self.lookup = {} self.size = 0 for instruction in instructions: if instruction.offset is None: instruction.offset = self.size self.size += instruction.size if isinstance(instruction, Label): self.lookup[instruction.name] = instruction.offset def assemble(self): result = [] for instruction in self.instructions: result.extend(instruction.assemble(self.lookup)) return result def pretty(self): lines = [] skip = False for instruction in self.instructions: line = instruction.pretty().strip() if isinstance(instruction, Label): pad = 0 else: pad = 4 if skip else 2 line = '%s%s' % (' ' * pad, line) data = instruction.assemble(self.lookup) if data and not isinstance(instruction, Data): pad = ' ' * (32 - len(line)) data = ' '.join('%04x' % x for x in data) line = '%s%s; %s' % (line, pad, data) lines.append(line) skip = instruction.conditional return '\n'.join(lines) class Data(object): def __init__(self, data): self.data = data self.size = len(data) self.offset = None self.conditional = False def assemble(self, lookup): return [do_lookup(lookup, word) for word in self.data] def pretty(self): data = ', '.join('"%s"' % x if isinstance(x, str) else pretty_value(x) for x in self.data) return 'DAT %s' % data class Reserve(object): def __init__(self, size): self.size = size self.offset = None self.conditional = False def assemble(self, lookup): return [0] * self.size def pretty(self): return 'RESERVE %s' % pretty_value(self.size) class Label(object): def __init__(self, name, offset=None): self.name = name self.size = 0 self.offset = offset self.conditional = False def assemble(self, lookup): return [] def pretty(self): return ':%s' % self.name class BasicInstruction(object): def __init__(self, op, dst, src): self.op = op self.dst = dst self.src = src value = self.op value |= (self.dst.value & 0x1f) << 5 value |= (self.src.value & 0x3f) << 10 self.value = value self.size = 1 + dst.size + src.size self.offset = None self.conditional = 0x10 <= self.op <= 0x17 def assemble(self, lookup): result = [self.value] result.extend(self.src.assemble(lookup)) result.extend(self.dst.assemble(lookup)) return result def pretty(self): op = REV_BASIC_OPCODES[self.op] dst = self.dst.pretty() src = self.src.pretty() return '%s %s, %s' % (op, dst, src) class SpecialInstruction(object): def __init__(self, op, src): self.op = op self.src = src value = 0 value |= (self.op & 0x1f) << 5 value |= (self.src.value & 0x3f) << 10 self.value = value self.size = 1 + src.size self.offset = None self.conditional = False def assemble(self, lookup): result = [self.value] result.extend(self.src.assemble(lookup)) return result def pretty(self): op = REV_SPECIAL_OPCODES[self.op] src = self.src.pretty() return '%s %s' % (op, src) class CommandInstruction(object): def __init__(self, value): self.value = value self.size = 1 self.offset = None self.conditional = False def assemble(self, lookup): result = [self.value] return result def pretty(self): return REV_COMMAND_OPCODES[self.value] class Operand(object): def __init__(self, codes, value, word=None): self.codes = codes self.value = value self.word = word self.size = int(word is not None) def assemble(self, lookup): return [] if self.word is None else [do_lookup(lookup, self.word)] def pretty(self): x = self.value word = self.word if isinstance(word, int): word = pretty_value(word) if x in REV_REGISTERS: return REV_REGISTERS[x] elif x - 0x08 in REV_REGISTERS: return '[%s]' % REV_REGISTERS[x - 0x08] elif x - 0x10 in REV_REGISTERS: return '[%s + %s]' % (REV_REGISTERS[x - 0x10], word) elif x in self.codes: return self.codes[x] elif x == 0x1a: return 'PICK %s' % word elif x == 0x1e: return '[%s]' % word elif x == 0x1f: return '%s' % word elif x == 0x20: return pretty_value(0xffff) elif x >= 0x21: return pretty_value(x - 0x21) class DstOperand(Operand): def __init__(self, *args): super(DstOperand, self).__init__(REV_DST_CODES, *args) class SrcOperand(Operand): def __init__(self, *args): super(SrcOperand, self).__init__(REV_SRC_CODES, *args) # Lexer Rules reserved = set( BASIC_OPCODES.keys() + SPECIAL_OPCODES.keys() + COMMAND_OPCODES.keys() + REGISTERS.keys() + DST_CODES.keys() + SRC_CODES.keys() + ['PICK', 'DAT', 'RESERVE'] ) tokens = [ 'LBRACK', 'RBRACK', 'PLUS', 'LABEL', 'ID', 'DECIMAL', 'HEX', 'OCT', 'STRING', 'CHAR', 'INC', 'DEC', 'AT' ] + list(reserved) t_ignore = ' \t\r,' t_ignore_COMMENT = r';.*' t_INC = r'\+\+' t_DEC = r'\-\-' t_LBRACK = r'\[' t_RBRACK = r'\]' t_PLUS = r'\+' t_AT = r'\@' def t_newline(t): r'\n+' t.lexer.lineno += len(t.value) def t_STRING(t): r'"[^"]*"' t.value = tuple(ord(x) for x in t.value[1:-1]) return t def t_CHAR(t): r"'[^']'" t.value = ord(t.value[1]) return t def t_HEX(t): r'\-?0x[a-fA-F0-9]+' t.value = int(t.value, 16) % SIZE return t def t_OCT(t): r'\-?0\d+' t.value = int(t.value, 8) % SIZE return t def t_DECIMAL(t): r'\-?\d+' t.value = int(t.value) % SIZE return t def t_LABEL(t): r':\.?[a-zA-Z_][a-zA-Z_0-9]*' t.value = t.value[1:] if t.value[0] == '.': t.value = '%s%s' % (t.lexer.label_prefix, t.value) else: t.lexer.label_prefix = t.value return t def t_ID(t): r'\.?[a-zA-Z_][a-zA-Z_0-9]*' upper = t.value.upper() if upper in reserved: t.type = upper t.value = upper else: t.type = 'ID' if t.value[0] == '.': t.value = '%s%s' % (t.lexer.label_prefix, t.value) return t def t_error(t): raise Exception('Unrecognized token on line %d: %s' % (t.lineno, t.value)) # Parser Rules def p_program(t): 'program : instructions' t[0] = Program(t[1]) def p_instructions1(t): 'instructions : instruction instructions' t[0] = (t[1],) + t[2] def p_instructions2(t): 'instructions : instruction' t[0] = (t[1],) def p_data1(t): 'data : literal data' arg = t[1] if isinstance(t[1], tuple) else (t[1],) t[0] = arg + t[2] def p_data2(t): 'data : literal' arg = t[1] if isinstance(t[1], tuple) else (t[1],) t[0] = arg def p_instruction_data(t): 'instruction : DAT data' t[0] = Data(t[2]) def p_instruction_reserve(t): 'instruction : RESERVE literal' t[0] = Reserve(t[2]) def p_instruction_label1(t): 'instruction : LABEL' t[0] = Label(t[1]) def p_instruction_label2(t): 'instruction : LABEL AT literal' t[0] = Label(t[1], t[3]) def p_instruction_basic(t): 'instruction : basic_opcode dst_operand src_operand' t[0] = BasicInstruction(t[1], t[2], t[3]) def p_instruction_special(t): 'instruction : special_opcode src_operand' t[0] = SpecialInstruction(t[1], t[2]) def p_instruction_command(t): 'instruction : command_opcode' t[0] = CommandInstruction(t[1]) def p_dst_operand_register(t): 'dst_operand : register' t[0] = DstOperand(REGISTERS[t[1]]) def p_dst_operand_register_dereference(t): 'dst_operand : LBRACK register RBRACK' t[0] = DstOperand(REGISTERS[t[2]] + 0x08) def p_dst_operand_register_literal_dereference1(t): 'dst_operand : LBRACK register PLUS literal RBRACK' t[0] = DstOperand(REGISTERS[t[2]] + 0x10, t[4]) def p_dst_operand_register_literal_dereference2(t): 'dst_operand : LBRACK literal PLUS register RBRACK' t[0] = DstOperand(REGISTERS[t[4]] + 0x10, t[2]) def p_dst_operand_pick1(t): 'dst_operand : LBRACK SP PLUS literal RBRACK' t[0] = DstOperand(0x1a, t[4]) def p_dst_operand_pick2(t): 'dst_operand : LBRACK literal PLUS SP RBRACK' t[0] = DstOperand(0x1a, t[2]) def p_dst_operand_pick3(t): 'dst_operand : PICK literal' t[0] = DstOperand(0x1a, t[2]) def p_dst_operand_code(t): 'dst_operand : dst_code' t[0] = DstOperand(DST_CODES[t[1]]) def p_dst_operand_push(t): 'dst_operand : LBRACK DEC SP RBRACK' t[0] = DstOperand(0x18) def p_dst_operand_peek(t): 'dst_operand : LBRACK SP RBRACK' t[0] = DstOperand(0x19) def p_dst_operand_literal_dereference(t): 'dst_operand : LBRACK literal RBRACK' t[0] = DstOperand(0x1e, t[2]) def p_dst_operand_literal(t): 'dst_operand : literal' t[0] = DstOperand(0x1f, t[1]) def p_src_operand_register(t): 'src_operand : register' t[0] = SrcOperand(REGISTERS[t[1]]) def p_src_operand_register_dereference(t): 'src_operand : LBRACK register RBRACK' t[0] = SrcOperand(REGISTERS[t[2]] + 0x08) def p_src_operand_register_literal_dereference1(t): 'src_operand : LBRACK register PLUS literal RBRACK' t[0] = SrcOperand(REGISTERS[t[2]] + 0x10, t[4]) def p_src_operand_register_literal_dereference2(t): 'src_operand : LBRACK literal PLUS register RBRACK' t[0] = SrcOperand(REGISTERS[t[4]] + 0x10, t[2]) def p_src_operand_pick1(t): 'src_operand : LBRACK SP PLUS literal RBRACK' t[0] = SrcOperand(0x1a, t[4]) def p_src_operand_pick2(t): 'src_operand : LBRACK literal PLUS SP RBRACK' t[0] = SrcOperand(0x1a, t[2]) def p_src_operand_pick3(t): 'src_operand : PICK literal' t[0] = SrcOperand(0x1a, t[2]) def p_src_operand_code(t): 'src_operand : src_code' t[0] = SrcOperand(SRC_CODES[t[1]]) def p_src_operand_pop(t): 'src_operand : LBRACK SP INC RBRACK' t[0] = SrcOperand(0x18) def p_src_operand_peek(t): 'src_operand : LBRACK SP RBRACK' t[0] = SrcOperand(0x19) def p_src_operand_literal_dereference(t): 'src_operand : LBRACK literal RBRACK' t[0] = SrcOperand(0x1e, t[2]) def p_src_operand_literal(t): 'src_operand : literal' if t[1] == 0xffff: t[0] = SrcOperand(0x20) elif t[1] <= 0x1e: t[0] = SrcOperand(0x21 + t[1]) else: t[0] = SrcOperand(0x1f, t[1]) def p_literal(t): '''literal : DECIMAL | HEX | OCT | ID | STRING | CHAR''' t[0] = t[1] def p_basic_opcode(t): t[0] = BASIC_OPCODES[t[1]] p_basic_opcode.__doc__ = ('basic_opcode : %s' % '\n | '.join(sorted(BASIC_OPCODES))) def p_special_opcode(t): t[0] = SPECIAL_OPCODES[t[1]] p_special_opcode.__doc__ = ('special_opcode : %s' % '\n | '.join(sorted(SPECIAL_OPCODES))) def p_command_opcode(t): t[0] = COMMAND_OPCODES[t[1]] p_command_opcode.__doc__ = ('command_opcode : %s' % '\n | '.join(sorted(COMMAND_OPCODES))) def p_register(t): t[0] = t[1] p_register.__doc__ = ('register : %s' % '\n | '.join(sorted(REGISTERS))) def p_dst_code(t): t[0] = t[1] p_dst_code.__doc__ = ('dst_code : %s' % '\n | '.join(sorted(DST_CODES))) def p_src_code(t): t[0] = t[1] p_src_code.__doc__ = ('src_code : %s' % '\n | '.join(sorted(SRC_CODES))) def p_error(t): raise Exception('Invalid token on line %d: %s' % (t.lineno, t.value)) # Assembler Functions def create_lexer(): lexer = lex.lex() lexer.label_prefix = None return lexer def create_parser(): parser = yacc.yacc(debug=False, write_tables=False) return parser LEXER = create_lexer() PARSER = create_parser() def parse(text): LEXER.lineno = 1 program = PARSER.parse(text, lexer=LEXER) program.text = text return program def parse_file(path): with open(path) as fp: text = fp.read() return parse(text) def assemble(text): program = parse(text) return program.assemble() def assemble_file(path): with open(path) as fp: text = fp.read() return assemble(text) def pretty(text): program = parse(text) return program.pretty() def pretty_file(path): with open(path) as fp: text = fp.read() return pretty(text) # Disassembler Functions def disassemble(words): def next_word(): return words.pop() if words else 0 instructions = [] use_next_word = set(range(0x10, 0x18) + [0x1a, 0x1e, 0x1f]) words = list(reversed(words)) while words: word = next_word() op = word & 0x1f dst = (word >> 5) & 0x1f src = (word >> 10) & 0x3f if op != 0 and op in REV_BASIC_OPCODES: dst = DstOperand(dst, next_word() if dst in use_next_word else None) src = SrcOperand(src, next_word() if src in use_next_word else None) instruction = BasicInstruction(op, dst, src) instructions.append(instruction) elif op == 0 and dst in REV_SPECIAL_OPCODES: src = SrcOperand(src, next_word() if src in use_next_word else None) instruction = SpecialInstruction(dst, src) instructions.append(instruction) else: instruction = Data([word]) instructions.append(instruction) program = Program(instructions) program.text = program.pretty() return program def disassemble_file(path): with open(path, 'rb') as fp: data = fp.read() words = [(ord(a) << 8) | ord(b) for a, b in zip(data[::2], data[1::2])] return disassemble(words)
mit
4,979,945,226,758,335,000
24.631746
78
0.560503
false
2.906407
false
false
false
bnkr/selenit
selenibench/scripts.py
1
3871
from __future__ import print_function import sys, argparse, selenium, contextlib, os, json, traceback from datetime import datetime as DateTime from datetime import timedelta as TimeDelta from selenium.webdriver import Remote as WebDriverRemote from selenium.webdriver.support.ui import WebDriverWait class SelenibenchCli(object): """Downloads timings from the web performance api.""" def __init__(self, argv): self.argv = argv def run(self): parser = self.get_parser() settings = self.get_settings(parser) if settings.log_json: io = open(settings.log_json, 'w') else: io = None runs = 0 contiguous_failures = 0 while runs < settings.number: runs += 1 remote = WebDriverRemote(command_executor=settings.webdriver, desired_capabilities=settings.capabilities) with contextlib.closing(remote) as driver: try: driver.get(settings.url[0]) self.find_load_times(driver, io) contiguous_failures = 0 except: if contiguous_failures > 3: print("Failure getting load times. Giving up.") raise contiguous_failures += 1 runs -= 1 print("Failure getting load times. Will try again.") traceback.print_ex() return 0 def find_load_times(self, driver, log): def is_loaded(driver): return driver.execute_script("return (document.readyState == 'complete')") WebDriverWait(driver, 15).until(is_loaded) timings = driver.execute_script("return window.performance.timing") times = {} for key, value in timings.iteritems(): if not isinstance(value, int): continue if value in (True, False): continue value = str(value) unixey = int(value[0:10]) if value[10:]: ms = int(value[10:]) else: ms = 0 converted = DateTime.fromtimestamp(unixey) converted += TimeDelta(milliseconds=ms) times[key] = converted # This kind of thing really needs unit tests. The thing takes so long # to run it's just going to break horribly. if log: serialisable = dict( (key, value.isoformat()) for key, value in times.iteritems()) log.write(json.dumps(serialisable)) log.write("\n") print(times) def get_parser(self): parser = argparse.ArgumentParser() parser.add_argument("url", nargs="+") parser.add_argument("-w", "--webdriver", required=True, help="Location to hub or webdriver.") parser.add_argument("-c", "--capabilities", action="append", default=[], help="Add a capability.") parser.add_argument("-n", "--number", type=int, default=1, help="How many requests to run.") parser.add_argument("-j", "--log-json", default=None, help="Log json per-line for each hit.") return parser def get_settings(self, parser): settings = parser.parse_args(self.argv[1:]) capabilities = {'browserName': "firefox"} for capability in settings.capabilities: name, value = capability.split("=") capabilities[name.strip()] = value.strip() settings.capabilities = capabilities return settings def selenibench_main(): """Command-line entry point.""" cli = SelenibenchCli(sys.argv) sys.exit(cli.run())
mit
-2,705,399,600,824,886,300
32.08547
86
0.547145
false
4.720732
false
false
false
tobykurien/MakerDroid
assetsrc/public.mp3/fabmetheus_utilities/vector3index.py
1
13371
""" Vector3 is a three dimensional vector class. Below are examples of Vector3 use. >>> from vector3 import Vector3 >>> origin = Vector3() >>> origin 0.0, 0.0, 0.0 >>> pythagoras = Vector3( 3, 4, 0 ) >>> pythagoras 3.0, 4.0, 0.0 >>> pythagoras.magnitude() 5.0 >>> pythagoras.magnitudeSquared() 25 >>> triplePythagoras = pythagoras * 3.0 >>> triplePythagoras 9.0, 12.0, 0.0 >>> plane = pythagoras.dropAxis( 2 ) >>> plane (3+4j) """ from __future__ import absolute_import try: import psyco psyco.full() except: pass #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ import math import operator __author__ = "Enrique Perez ([email protected])" __credits__ = 'Nophead <http://forums.reprap.org/profile.php?12,28>\nArt of Illusion <http://www.artofillusion.org/>' __date__ = "$Date: 2008/21/04 $" __license__ = "GPL 3.0" class Vector3Index: "A three dimensional vector index class." __slots__ = [ 'index', 'x', 'y', 'z' ] def __init__( self, index, x = 0.0, y = 0.0, z = 0.0 ): self.index = index self.x = x self.y = y self.z = z def __abs__( self ): "Get the magnitude of the Vector3." return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z ) magnitude = __abs__ def __add__( self, other ): "Get the sum of this Vector3 and other one." return Vector3( self.x + other.x, self.y + other.y, self.z + other.z ) def __copy__( self ): "Get the copy of this Vector3." return Vector3( self.x, self.y, self.z ) __pos__ = __copy__ copy = __copy__ def __div__( self, other ): "Get a new Vector3 by dividing each component of this one." return Vector3( self.x / other, self.y / other, self.z / other ) def __eq__( self, other ): "Determine whether this vector is identical to other one." if other == None: return False return self.x == other.x and self.y == other.y and self.z == other.z def __floordiv__( self, other ): "Get a new Vector3 by floor dividing each component of this one." return Vector3( self.x // other, self.y // other, self.z // other ) def __hash__( self ): "Determine whether this vector is identical to other one." return self.__repr__().__hash__() def __iadd__( self, other ): "Add other Vector3 to this one." self.x += other.x self.y += other.y self.z += other.z return self def __idiv__( self, other ): "Divide each component of this Vector3." self.x /= other self.y /= other self.z /= other return self def __ifloordiv__( self, other ): "Floor divide each component of this Vector3." self.x //= other self.y //= other self.z //= other return self def __imul__( self, other ): "Multiply each component of this Vector3." self.x *= other self.y *= other self.z *= other return self def __isub__( self, other ): "Subtract other Vector3 from this one." self.x -= other.x self.y -= other.y self.z -= other.z return self def __itruediv__( self, other ): "True divide each component of this Vector3." self.x = operator.truediv( self.x, other ) self.y = operator.truediv( self.y, other ) self.z = operator.truediv( self.z, other ) return self def __mul__( self, other ): "Get a new Vector3 by multiplying each component of this one." return Vector3( self.x * other, self.y * other, self.z * other ) def __ne__( self, other ): "Determine whether this vector is not identical to other one." return not self.__eq__( other ) def __neg__( self ): return Vector3( - self.x, - self.y, - self.z ) def __nonzero__( self ): return self.x != 0 or self.y != 0 or self.z != 0 def __repr__( self ): "Get the string representation of this Vector3." return '%s, %s, %s, %s' % ( self.index, self.x, self.y, self.z ) def __rdiv__( self, other ): "Get a new Vector3 by dividing each component of this one." return Vector3( other / self.x, other / self.y, other / self.z ) def __rfloordiv__( self, other ): "Get a new Vector3 by floor dividing each component of this one." return Vector3( other // self.x, other // self.y, other // self.z ) def __rmul__( self, other ): "Get a new Vector3 by multiplying each component of this one." return Vector3( self.x * other, self.y * other, self.z * other ) def __rtruediv__( self, other ): "Get a new Vector3 by true dividing each component of this one." return Vector3( operator.truediv( other , self.x ), operator.truediv( other, self.y ), operator.truediv( other, self.z ) ) def __sub__( self, other ): "Get the difference between the Vector3 and other one." return Vector3( self.x - other.x, self.y - other.y, self.z - other.z ) def __truediv__( self, other ): "Get a new Vector3 by true dividing each component of this one." return Vector3( operator.truediv( self.x, other ), operator.truediv( self.y, other ), operator.truediv( self.z, other ) ) def cross( self, other ): "Calculate the cross product of this vector with other one." return Vector3( self.y * other.z - self.z * other.y, - self.x * other.z + self.z * other.x, self.x * other.y - self.y * other.x ) def distance( self, other ): "Get the Euclidean distance between this vector and other one." return math.sqrt( self.distanceSquared( other ) ) def distanceSquared( self, other ): "Get the square of the Euclidean distance between this vector and other one." separationX = self.x - other.x separationY = self.y - other.y separationZ = self.z - other.z return separationX * separationX + separationY * separationY + separationZ * separationZ def dot( self, other ): "Calculate the dot product of this vector with other one." return self.x * other.x + self.y * other.y + self.z * other.z def dropAxis( self, which ): """Get a complex by removing one axis of this one. Keyword arguments: which -- the axis to drop (0=X, 1=Y, 2=Z)""" if which == 0: return complex( self.y, self.z ) if which == 1: return complex( self.x, self.z ) if which == 2: return complex( self.x, self.y ) def getNormalized( self, other ): "Get the normalized Vector3." magnitude = abs( self ) if magnitude == 0.0: return self.copy() return self / magnitude def magnitudeSquared( self ): "Get the square of the magnitude of the Vector3." return self.x * self.x + self.y * self.y + self.z * self.z def normalize( self ): "Scale each component of this Vector3 so that it has a magnitude of 1. If this Vector3 has a magnitude of 0, this method has no effect." magnitude = abs( self ) if magnitude != 0.0: self /= magnitude def reflect( self, normal ): "Reflect the Vector3 across the normal, which is assumed to be normalized." distance = 2 * ( self.x * normal.x + self.y * normal.y + self.z * normal.z ) return Vector3( self.x - distance * normal.x, self.y - distance * normal.y, self.z - distance * normal.z ) def setToVector3( self, other ): "Set this Vector3 to be identical to other one." self.x = other.x self.y = other.y self.z = other.z def setToXYZ( self, x, y, z ): "Set the x, y, and z components of this Vector3." self.x = x self.y = y self.z = z """ class Vector3: __slots__ = ['x', 'y', 'z'] def __init__(self, x, y, z): self.x = x self.y = y self.z = z def __copy__(self): return self.__class__(self.x, self.y, self.z) copy = __copy__ def __repr__(self): return 'Vector3(%.2f, %.2f, %.2f)' % (self.x, self.y, self.z) def __eq__(self, other): if isinstance(other, Vector3): return self.x == other.x and \ self.y == other.y and \ self.z == other.z else: assert hasattr(other, '__len__') and len(other) == 3 return self.x == other[0] and \ self.y == other[1] and \ self.z == other[2] def __ne__(self, other): return not self.__eq__(other) def __nonzero__(self): return self.x != 0 or self.y != 0 or self.z != 0 def __len__(self): return 3 def __getitem__(self, key): return (self.x, self.y, self.z)[key] def __setitem__(self, key, value): l = [self.x, self.y, self.z] l[key] = value self.x, self.y, self.z = l def __iter__(self): return iter((self.x, self.y, self.z)) def __getattr__(self, name): try: return tuple([(self.x, self.y, self.z)['xyz'.index(c)] \ for c in name]) except ValueError: raise AttributeError, name if _enable_swizzle_set: # This has detrimental performance on ordinary setattr as well # if enabled def __setattr__(self, name, value): if len(name) == 1: object.__setattr__(self, name, value) else: try: l = [self.x, self.y, self.z] for c, v in map(None, name, value): l['xyz'.index(c)] = v self.x, self.y, self.z = l except ValueError: raise AttributeError, name def __add__(self, other): if isinstance(other, Vector3): # Vector + Vector -> Vector # Vector + Point -> Point # Point + Point -> Vector if self.__class__ is other.__class__: _class = Vector3 else: _class = Point3 return _class(self.x + other.x, self.y + other.y, self.z + other.z) else: assert hasattr(other, '__len__') and len(other) == 3 return Vector3(self.x + other[0], self.y + other[1], self.z + other[2]) __radd__ = __add__ def __iadd__(self, other): if isinstance(other, Vector3): self.x += other.x self.y += other.y self.z += other.z else: self.x += other[0] self.y += other[1] self.z += other[2] return self def __sub__(self, other): if isinstance(other, Vector3): # Vector - Vector -> Vector # Vector - Point -> Point # Point - Point -> Vector if self.__class__ is other.__class__: _class = Vector3 else: _class = Point3 return Vector3(self.x - other.x, self.y - other.y, self.z - other.z) else: assert hasattr(other, '__len__') and len(other) == 3 return Vector3(self.x - other[0], self.y - other[1], self.z - other[2]) def __rsub__(self, other): if isinstance(other, Vector3): return Vector3(other.x - self.x, other.y - self.y, other.z - self.z) else: assert hasattr(other, '__len__') and len(other) == 3 return Vector3(other.x - self[0], other.y - self[1], other.z - self[2]) def __mul__(self, other): if isinstance(other, Vector3): # TODO component-wise mul/div in-place and on Vector2; docs. if self.__class__ is Point3 or other.__class__ is Point3: _class = Point3 else: _class = Vector3 return _class(self.x * other.x, self.y * other.y, self.z * other.z) else: assert type(other) in (int, long, float) return Vector3(self.x * other, self.y * other, self.z * other) __rmul__ = __mul__ def __imul__(self, other): assert type(other) in (int, long, float) self.x *= other self.y *= other self.z *= other return self def __div__(self, other): assert type(other) in (int, long, float) return Vector3(operator.div(self.x, other), operator.div(self.y, other), operator.div(self.z, other)) def __rdiv__(self, other): assert type(other) in (int, long, float) return Vector3(operator.div(other, self.x), operator.div(other, self.y), operator.div(other, self.z)) def __floordiv__(self, other): assert type(other) in (int, long, float) return Vector3(operator.floordiv(self.x, other), operator.floordiv(self.y, other), operator.floordiv(self.z, other)) def __rfloordiv__(self, other): assert type(other) in (int, long, float) return Vector3(operator.floordiv(other, self.x), operator.floordiv(other, self.y), operator.floordiv(other, self.z)) def __truediv__(self, other): assert type(other) in (int, long, float) return Vector3(operator.truediv(self.x, other), operator.truediv(self.y, other), operator.truediv(self.z, other)) def __rtruediv__(self, other): assert type(other) in (int, long, float) return Vector3(operator.truediv(other, self.x), operator.truediv(other, self.y), operator.truediv(other, self.z)) def __neg__(self): return Vector3(-self.x, -self.y, -self.z) __pos__ = __copy__ def __abs__(self): return math.sqrt(self.x ** 2 + \ self.y ** 2 + \ self.z ** 2) magnitude = __abs__ def magnitude_squared(self): return self.x ** 2 + \ self.y ** 2 + \ self.z ** 2 def normalize(self): d = self.magnitude() if d: self.x /= d self.y /= d self.z /= d return self def normalized(self): d = self.magnitude() if d: return Vector3(self.x / d, self.y / d, self.z / d) return self.copy() def dot(self, other): assert isinstance(other, Vector3) return self.x * other.x + \ self.y * other.y + \ self.z * other.z def cross(self, other): assert isinstance(other, Vector3) return Vector3(self.y * other.z - self.z * other.y, -self.x * other.z + self.z * other.x, self.x * other.y - self.y * other.x) def reflect(self, normal): # assume normal is normalized assert isinstance(normal, Vector3) d = 2 * (self.x * normal.x + self.y * normal.y + self.z * normal.z) return Vector3(self.x - d * normal.x, self.y - d * normal.y, self.z - d * normal.z) """
gpl-3.0
-3,217,481,328,199,761,400
26.289796
157
0.614315
false
2.898548
false
false
false
IQSS/geoconnect
gc_apps/classification/layer_link_helper.py
1
5041
""" Used for development to create WorldMap-related links from a layer name """ from __future__ import print_function import logging import re import requests from django.conf import settings LOGGER = logging.getLogger(__name__) GEONODE_PREFIX = 'geonode:' class LayerLink(object): """Holds name, link, description""" def __init__(self, name, link, description=None): self.name = name self.link = link self.description = description def show(self): """print info""" info = ('name: {0}' 'link: {1}' 'description: {2}'\ ).format(self.name, self.link, self.description) print (info) class LayerLinkHelper(object): """ For development/debugging, given a WorldMap layer name, create links related to various geonode services including: - Listing geoserver attributes for the layer - Retrieving the current SLD in XML format - Showing the classify service url, etc. """ def __init__(self, layer_name, server_name='http://localhost:8000'): assert layer_name is not None, "layer_name cannot be None" self.layer_name = layer_name # geonode:boston_social_disorder self.server_name = server_name if self.server_name.endswith('/'): self.server_name = self.server_name[:-1] self.layer_name_no_prefix = None # boston_social_disorder self.links_dict = {} self.links_list = [] # Secondary processing involving requests self.sld_name = None self.format_layer_name() self.format_layer_links() def format_layer_name(self): """ Make sure the layer name has the GEONODE_PREFIX e.g. "geonode:boston_social_disorder" Set a variable w/o the prefix e.g. layer_name_no_prefix = "boston_social_disorder" """ if not self.layer_name.startswith(GEONODE_PREFIX): self.layer_name = '%s%s' % (GEONODE_PREFIX, self.layer_name) self.layer_name_no_prefix = self.layer_name[len(GEONODE_PREFIX):] def add_link(self, name, link, description=''): """ Add a LayerLink object to "links_list" """ layer_link_obj = LayerLink(name, link, description) # add to list self.links_list.append(layer_link_obj) # add to dict self.links_dict[name] = layer_link_obj LOGGER.debug('links count: %s', len(self.links_list)) def get_geoserver(self): """Retrieve the geoserver url""" return self.server_name.replace(':8000', ':8080') def format_layer_links(self): """Format/Create the layer links""" # View layer view_url = '%s/data/%s' % (self.server_name, self.layer_name) self.add_link('wm_layer', view_url, 'WorldMap layer view') # Geoserver attributes attr_url = ('%s/geoserver/rest/sldservice/%s/attributes.xml'\ % (self.get_geoserver(), self.layer_name)) self.add_link('attributes', attr_url, 'Geoserver Attributes') # SLD Name layer_url = '%s/geoserver/rest/layers/%s.html' %\ (self.get_geoserver(), self.layer_name_no_prefix) self.add_link('sld_name', layer_url, 'SLD name') if not self.get_sld_name(): return sld_url = '%s/geoserver/rest/styles/%s.sld' % \ (self.get_geoserver(), self.sld_name) self.add_link('sld_xml', sld_url, 'current SLD XML') sld_url2 = '%s%s%s%s' % (\ self.get_geoserver(), '/geoserver/web/?wicket:bookmarkablePage=', ':org.geoserver.wms.web.data.StyleEditPage&name=', self.sld_name) self.add_link('sld_xml2', sld_url2, 'Editable/Formatted SLD XML') def get_sld_name(self): """ Retrieve the layer's SLD name from the server """ if not 'sld_name' in self.links_dict: return False sld_url = self.links_dict['sld_name'].link #print ('Attempt to retrieve SLD sld_url: %s' % sld_url) resp = requests.get(sld_url, auth=settings.WORLDMAP_ACCOUNT_AUTH) if not resp.status_code == 200: LOGGER.error('Failed to retrieve SLD: %s', sld_url) return False # Parse out the SLD Name sld_search = re.search(r'<li>Default style: StyleInfoImpl\[(.*)\]',\ resp.text, re.IGNORECASE) if sld_search is None: LOGGER.error('Failed to retrieve SLD') return False sld_name = sld_search.group(1) self.sld_name = sld_name return True """ if title_search: title = title_search.group(1) content = r.text start_tag = idx = content.find('<li>Default style: StyleInfoImpl[') if idx == -1: print 'Failed to retrieve SLD' return end_idx = content.find(']', idx + print r.text """
apache-2.0
4,475,897,529,455,973,400
29.551515
76
0.575878
false
3.647612
false
false
false
MetricsGrimoire/sortinghat
tests/test_cmd_log.py
1
8958
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2014-2017 Bitergia # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Authors: # Santiago Dueñas <[email protected]> # import datetime import sys import unittest if '..' not in sys.path: sys.path.insert(0, '..') from sortinghat import api from sortinghat.command import CMD_SUCCESS from sortinghat.cmd.log import Log from sortinghat.exceptions import CODE_INVALID_DATE_ERROR, CODE_VALUE_ERROR, CODE_NOT_FOUND_ERROR from tests.base import TestCommandCaseBase LOG_UUID_NOT_FOUND_ERROR = "Error: Jane Roe not found in the registry" LOG_ORG_NOT_FOUND_ERROR = "Error: LibreSoft not found in the registry" LOG_INVALID_PERIOD_ERROR = "Error: 'from_date' 2001-01-01 00:00:00 cannot be greater than 1999-01-01 00:00:00" LOG_INVALID_DATE_ERROR = "Error: 1999-13-01 is not a valid date" LOG_INVALID_FORMAT_DATE_ERROR = "Error: YYZYY is not a valid date" LOG_EMPTY_OUTPUT = "" LOG_OUTPUT = """John Doe\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00 John Smith\tBitergia\t1900-01-01 00:00:00\t2100-01-01 00:00:00 John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00 John Smith\tBitergia\t2006-01-01 00:00:00\t2008-01-01 00:00:00 John Smith\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00""" LOG_UUID_OUTPUT = """John Doe\tExample\t1900-01-01 00:00:00\t2100-01-01 00:00:00""" LOG_ORG_OUTPUT = """John Smith\tBitergia\t1900-01-01 00:00:00\t2100-01-01 00:00:00 John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00 John Smith\tBitergia\t2006-01-01 00:00:00\t2008-01-01 00:00:00""" LOG_TIME_PERIOD_OUTPUT = """John Smith\tBitergia\t1999-01-01 00:00:00\t2000-01-01 00:00:00""" class TestLogCaseBase(TestCommandCaseBase): """Defines common setup and teardown methods on log unit tests""" cmd_klass = Log def load_test_dataset(self): self.db.clear() api.add_unique_identity(self.db, 'John Smith') api.add_unique_identity(self.db, 'John Doe') api.add_organization(self.db, 'Example') api.add_organization(self.db, 'Bitergia') api.add_enrollment(self.db, 'John Smith', 'Example') api.add_enrollment(self.db, 'John Doe', 'Example') api.add_enrollment(self.db, 'John Smith', 'Bitergia') api.add_enrollment(self.db, 'John Smith', 'Bitergia', datetime.datetime(1999, 1, 1), datetime.datetime(2000, 1, 1)) api.add_enrollment(self.db, 'John Smith', 'Bitergia', datetime.datetime(2006, 1, 1), datetime.datetime(2008, 1, 1)) class TestLogCommand(TestLogCaseBase): """Unit tests for log command""" def test_log(self): """Check log output""" code = self.cmd.run() self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_OUTPUT) def test_log_uuid(self): """Check log using a uuid""" code = self.cmd.run('--uuid', 'John Doe') self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_UUID_OUTPUT) def test_log_organization(self): """Check log using a organization""" code = self.cmd.run('--organization', 'Bitergia') self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_ORG_OUTPUT) def test_log_period(self): """Check log using a time period""" code = self.cmd.run('--from', '1990-1-1 08:59:17', '--to', '2005-1-1') self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_TIME_PERIOD_OUTPUT) def test_log_mix_filter(self): """Check log using some filters""" code = self.cmd.run('--uuid', 'John Doe', '--organization', 'Example', '--from', '1990-1-1 08:59:17', '--to', '2005-1-1') self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_EMPTY_OUTPUT) def test_empty_registry(self): """Check output when the registry is empty""" # Delete the contents of the database self.db.clear() code = self.cmd.run() self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_EMPTY_OUTPUT) def test_invalid_dates(self): """Check whether it fails when invalid dates are given""" code = self.cmd.run('--from', '1999-13-01') self.assertEqual(code, CODE_INVALID_DATE_ERROR) output = sys.stderr.getvalue().strip().split('\n')[0] self.assertEqual(output, LOG_INVALID_DATE_ERROR) code = self.cmd.run('--from', 'YYZYY') self.assertEqual(code, CODE_INVALID_DATE_ERROR) x = sys.stderr.getvalue() output = sys.stderr.getvalue().strip().split('\n')[-1] self.assertEqual(output, LOG_INVALID_FORMAT_DATE_ERROR) code = self.cmd.run('--to', '1999-13-01') self.assertEqual(code, CODE_INVALID_DATE_ERROR) x = sys.stderr.getvalue() output = sys.stderr.getvalue().strip().split('\n')[-1] self.assertEqual(output, LOG_INVALID_DATE_ERROR) code = self.cmd.run('--to', 'YYZYY') self.assertEqual(code, CODE_INVALID_DATE_ERROR) x = sys.stderr.getvalue() output = sys.stderr.getvalue().strip().split('\n')[-1] self.assertEqual(output, LOG_INVALID_FORMAT_DATE_ERROR) class TestLog(TestLogCaseBase): """Unit tests for log""" def test_log(self): """Check log output""" code = self.cmd.log() self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_OUTPUT) def test_log_uuid(self): """Check log using a uuid""" code = self.cmd.log('John Doe') self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_UUID_OUTPUT) def test_log_organization(self): """Check log using a organization""" code = self.cmd.log(organization='Bitergia') self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_ORG_OUTPUT) def test_log_period(self): """Check log using a time period""" code = self.cmd.log(from_date=datetime.datetime(1990, 1, 1), to_date=datetime.datetime(2005, 1, 1)) self.assertEqual(code, CMD_SUCCESS) output = sys.stdout.getvalue().strip() self.assertEqual(output, LOG_TIME_PERIOD_OUTPUT) def test_period_ranges(self): """Check whether enrollments cannot be listed giving invalid period ranges""" code = self.cmd.log('John Smith', 'Example', datetime.datetime(2001, 1, 1), datetime.datetime(1999, 1, 1)) self.assertEqual(code, CODE_VALUE_ERROR) output = sys.stderr.getvalue().strip() self.assertEqual(output, LOG_INVALID_PERIOD_ERROR) def test_not_found_uuid(self): """Check whether it raises an error when the uiid is not available""" code = self.cmd.log(uuid='Jane Roe') self.assertEqual(code, CODE_NOT_FOUND_ERROR) output = sys.stderr.getvalue().strip() self.assertEqual(output, LOG_UUID_NOT_FOUND_ERROR) def test_not_found_organization(self): """Check whether it raises an error when the organization is not available""" code = self.cmd.log(organization='LibreSoft') self.assertEqual(code, CODE_NOT_FOUND_ERROR) output = sys.stderr.getvalue().strip() self.assertEqual(output, LOG_ORG_NOT_FOUND_ERROR) def test_empty_registry(self): """Check output when the registry is empty""" # Delete the contents of the database self.db.clear() code = self.cmd.log() self.assertEqual(code, CMD_SUCCESS) output = sys.stderr.getvalue().strip('\n') self.assertEqual(output, LOG_EMPTY_OUTPUT) if __name__ == "__main__": unittest.main(buffer=True, exit=False)
gpl-3.0
3,161,797,829,543,985,000
35.263158
110
0.631126
false
3.508421
true
false
false
nijx/hypertable
src/py/ThriftClient/client_test.py
1
4079
import sys import time from hypertable.thriftclient import * from hyperthrift.gen.ttypes import * try: client = ThriftClient("localhost", 38080) print "HQL examples" try: namespace = client.namespace_open("bad") except: print "Caught exception when tyring to open 'bad' namespace" namespace = client.namespace_open("test") res = client.hql_query(namespace, "show tables") print res res = client.hql_query(namespace, "select * from thrift_test") print res print "mutator examples"; mutator = client.mutator_open(namespace, "thrift_test", 0, 0); client.mutator_set_cell(mutator, Cell(Key("py-k1", "col", None), "py-v1")) client.mutator_flush(mutator); client.mutator_close(mutator); print "shared mutator examples"; mutate_spec = MutateSpec("test_py", 1000, 0); client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k1", "col", None), "py-put-v1")) client.shared_mutator_refresh(namespace, "thrift_test", mutate_spec) client.shared_mutator_set_cell(namespace, "thrift_test", mutate_spec, Cell(Key("py-put-k2", "col", None), "py-put-v2")) time.sleep(2) print "scanner examples"; scanner = client.scanner_open(namespace, "thrift_test", ScanSpec(None, None, None, 1)); while True: cells = client.scanner_get_cells(scanner) if (len(cells) == 0): break print cells client.scanner_close(scanner) print "asynchronous api examples\n"; future = client.future_open(0); mutator_async_1 = client.async_mutator_open(namespace, "thrift_test", future, 0); mutator_async_2 = client.async_mutator_open(namespace, "thrift_test", future, 0); client.async_mutator_set_cell(mutator_async_1, Cell(Key("py-k1","col", None), "py-v1-async")); client.async_mutator_set_cell(mutator_async_2, Cell(Key("py-k1","col", None), "py-v2-async")); client.async_mutator_flush(mutator_async_1); client.async_mutator_flush(mutator_async_2); num_results=0; while True: result = client.future_get_result(future, 0); if(result.is_empty): break num_results+=1; print result; if (result.is_error or result.is_scan): print "Unexpected result\n" exit(1); if (num_results>2): print "Expected only 2 results\n" exit(1) if (num_results!=2): print "Expected only 2 results\n" exit(1) if (client.future_is_cancelled(future) or client.future_is_full(future) or not (client.future_is_empty(future)) or client.future_has_outstanding(future)): print "Future object in unexpected state" exit(1) client.async_mutator_close(mutator_async_1) client.async_mutator_close(mutator_async_2) color_scanner = client.async_scanner_open(namespace, "FruitColor", future, ScanSpec(None, None, None, 1)); location_scanner = client.async_scanner_open(namespace, "FruitLocation", future, ScanSpec(None, None, None, 1)); energy_scanner = client.async_scanner_open(namespace, "FruitEnergy", future, ScanSpec(None, None, None, 1)); expected_cells = 6; num_cells = 0; while True: result = client.future_get_result(future, 0); print result; if (result.is_empty or result.is_error or not(result.is_scan) ): print "Unexpected result\n" exit(1); for cell in result.cells: print cell; num_cells+=1; if(num_cells >= 6): client.future_cancel(future); break; if (not client.future_is_cancelled(future)): print "Expected future ops to be cancelled\n" exit(1) print "regexp scanner example"; scanner = client.scanner_open(namespace, "thrift_test", ScanSpec(None, None, None, 1, 0, None, None, ["col"], False,0, 0, "k", "v[24]")); while True: cells = client.scanner_get_cells(scanner) if (len(cells) == 0): break print cells client.scanner_close(scanner) client.async_scanner_close(color_scanner); client.async_scanner_close(location_scanner); client.async_scanner_close(energy_scanner); client.future_close(future); client.namespace_close(namespace) except: print sys.exc_info() raise
gpl-3.0
-526,401,585,103,323,140
32.162602
156
0.679333
false
3.214342
true
false
false
earlbellinger/asteroseismology
grid/calibrate.py
1
3590
#### Calibrate a solar model #### Author: Earl Bellinger ( [email protected] ) #### Stellar Ages & Galactic Evolution Group #### Max-Planck-Institut fur Sonnensystemforschung #### Department of Astronomy, Yale University import numpy as np import pandas as pd from scipy import optimize from os import path from subprocess import Popen from math import log10 Z_div_X_solar = 0.02293 # GS98 # 0.0245 # GN93 # log10_Z_div_X_solar = np.log10(Z_div_X_solar) constraint_names = ("log L", "log R", "Fe/H") param_names = ("Y", "alpha", "Z") param_init = [0.273449170177157, 1.83413390909832, 0.0197444964340224] directory = 'calibrate_py' print(directory) def objective(): ## minimize sum(log(model values / solar values)**2) # searches in LOGS_MS subdirectory of the global 'directory' variable hstry_file = path.join(directory, 'LOGS_MS', 'history.data') if (not path.exists(hstry_file)): return np.inf hstry = pd.read_table(hstry_file, header=0, skiprows=5, delimiter='\s+') #header=1, mdl = hstry.loc[hstry.shape[0]-1] #hstry[nrow(hstry),] # [Fe/H] = log10 ( Z / X / (Z/X)_Sun ) mdl_Fe_H = mdl['log_surf_cell_z']-np.log10(mdl['surface_h1'])-log10_Z_div_X_solar mdl_vals = [mdl['log_L'], mdl['log_R'], mdl_Fe_H] print("*** Model values") print(constraint_names, mdl_vals) print('L', 10**mdl['log_L'], 'R', 10**mdl['log_R']) result = sum([ii**2 for ii in mdl_vals]) if np.isfinite(result): return log10(result) return 10**10 ### SEARCH iteration = 0 best_val = np.inf best_param = param_init #run = function(params) { def run(params): global iteration global best_val global best_param iteration = iteration + 1 print("**** iter:", iteration) Y, alpha, Z = params print(param_names, (Y, alpha, Z)) if (Y < 0.2 or Y > 0.4 or Z < 0 or Z > 0.04 or alpha < 1 or alpha > 3): return 10**10 #if (Y < 0.23): # Y = 0.23 #if (Y > 0.33): # Y = 0.33 #if (Z < 0.01): # Z = 0.01 #if (Z > 0.04): # Z = 0.04 #if (alpha < 1): # alpha = 1 #if (alpha > 3): # alpha = 3 command = "./dispatch.sh" + \ ' -Y ' + str(Y) + \ ' -a ' + str(alpha) + \ ' -o ' + '0' + \ ' -Z ' + str(Z) + \ ' -D ' + '1' + \ ' -g ' + '1' + \ ' -e ' + '0' + \ ' -c ' + "4572000000" + \ ' -d ' + directory print(command) #system(command) process = Popen(command.split(), shell=False) process.wait() obj_val = objective() print("**** objective value =", obj_val) if (obj_val < best_val): best_val = obj_val print("*****", param_names, params) best_param = params print("***** New record!") return obj_val result = optimize.minimize(fun=run, x0=param_init, method='Nelder-Mead', options={'disp': True, 'maxiter': 10000}) #, #bounds=((0.25, 0.32), (1, 3), (0.012, 0.03))) print("Optimization terminated. Saving best result") Y, alpha, Z = result.x command = "./dispatch.sh" + \ ' -Y ' + str(Y) + \ ' -a ' + str(alpha) + \ ' -o ' + '0' + \ ' -Z ' + str(Z) + \ ' -D ' + '1' + \ ' -g ' + '1' + \ ' -e ' + '0' + \ ' -c ' + "4572000000" + \ ' -d ' + directory print(command) process = Popen(command.split(), shell=False) process.wait() print(result)
gpl-2.0
-8,822,781,440,106,951,000
26.72
88
0.51532
false
2.849206
false
false
false
huajiahen/hotspot
backend/Busy/models.py
1
1154
# -*- coding:utf-8 -*- from django.db.models import * class Event(Model): content = CharField(u'内容',max_length = 200) starttime = IntegerField(u'开始时间') endtime = IntegerField(u'结束时间') #longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14) #latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14) longitude = FloatField(u'经度') latitude = FloatField(u'纬度') address = CharField(u'地点',max_length = 100) hit = IntegerField(u'想去数',default = 0) class Emergency(Model): content = CharField(u'内容',max_length = 100) #longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14) #latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14) longitude = FloatField(u'经度') latitude = FloatField(u'纬度') class Man(Model): user_id = CharField(u'用户ID',max_length = 200) longitude = DecimalField(u'经度',max_digits = 18,decimal_places = 14) latitude = DecimalField(u'纬度',max_digits = 18,decimal_places = 14) hadevent = BooleanField(u'是否参与事件',default = False)
mit
-7,604,758,913,650,735,000
38.407407
72
0.662594
false
2.867925
false
false
false
cmaclell/py_plan
py_plan/problems/blocksworld.py
1
3681
from operator import ne from py_search.utils import compare_searches from py_search.uninformed import depth_first_search from py_search.uninformed import breadth_first_search from py_search.uninformed import iterative_deepening_search from py_plan.total_order import StateSpacePlanningProblem from py_plan.base import Operator move = Operator('move', [('on', '?b', '?x'), ('block', '?b'), ('block', '?x'), ('block', '?y'), ('block', '?other'), ('block', '?other2'), ('not', ('on', '?other', '?b')), ('not', ('on', '?other2', '?y')), # ('clear', '?b'), # ('clear', '?y'), (ne, '?b', '?x'), (ne, '?b', '?y'), (ne, '?x', '?y')], [('on', '?b', '?y'), # ('clear', '?x'), ('not', ('on', '?b', '?x')), # ('not', ('clear', '?y')) ]) move_from_table = Operator('move_from_table', [('on', '?b', 'Table'), ('block', '?other'), ('block', '?other2'), ('not', ('on', '?other', '?b')), ('not', ('on', '?other2', '?y')), # ('clear', '?b'), # ('clear', '?y'), ('block', '?b'), ('block', '?y'), (ne, '?b', '?y')], [('on', '?b', '?y'), ('not', ('on', '?b', 'Table')), # ('not', ('clear', '?y')) ]) move_to_table = Operator('move_to_table', [('on', '?b', '?x'), ('block', '?b'), ('block', '?x'), ('block', '?other'), ('not', ('on', '?other', '?b')), # ('clear', '?b'), (ne, '?b', '?x')], [('on', '?b', 'Table'), # ('clear', '?x'), ('not', ('on', '?b', '?x'))]) if __name__ == "__main__": start = [('on', 'A', 'Table'), ('on', 'B', 'Table'), ('on', 'C', 'A'), ('block', 'A'), ('block', 'B'), ('block', 'C'), # ('clear', 'B'), # ('clear', 'C') ] goal = [('on', 'A', 'B'), ('on', 'B', 'C'), ('on', 'C', 'Table')] # start = [('on', 'A', 'Table'), # ('on', 'B', 'Table'), # ('on', 'C', 'Table'), # ('block', 'A'), # ('block', 'B'), # ('block', 'C'), # ('clear', 'A'), # ('clear', 'B'), # ('clear', 'C')] def progression(x): return breadth_first_search(x, forward=True, backward=False) def regression(x): return breadth_first_search(x, forward=False, backward=True) def bidirectional(x): return breadth_first_search(x, forward=True, backward=True) p = StateSpacePlanningProblem(start, goal, [move_from_table, move_to_table]) # print(next(best_first_search(p)).state) compare_searches([p], [progression, regression, bidirectional, # iterative_deepening_search ]) print(next(progression(p)).path()) print(next(regression(p)).path())
mit
1,218,365,807,427,348,500
33.401869
68
0.32627
false
4.099109
false
false
false
citrix-openstack-build/horizon
horizon/tables/base.py
1
53167
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import logging from operator import attrgetter # noqa import sys from django.conf import settings # noqa from django.core import urlresolvers from django import forms from django.http import HttpResponse # noqa from django import template from django.template.defaultfilters import truncatechars # noqa from django.template.loader import render_to_string # noqa from django.utils.datastructures import SortedDict # noqa from django.utils.html import escape # noqa from django.utils import http from django.utils.http import urlencode # noqa from django.utils.safestring import mark_safe # noqa from django.utils import termcolors from django.utils.translation import ugettext_lazy as _ # noqa from horizon import conf from horizon import exceptions from horizon import messages from horizon.tables.actions import FilterAction # noqa from horizon.tables.actions import LinkAction # noqa from horizon.utils import html LOG = logging.getLogger(__name__) PALETTE = termcolors.PALETTES[termcolors.DEFAULT_PALETTE] STRING_SEPARATOR = "__" class Column(html.HTMLElement): """ A class which represents a single column in a :class:`.DataTable`. .. attribute:: transform A string or callable. If ``transform`` is a string, it should be the name of the attribute on the underlying data class which should be displayed in this column. If it is a callable, it will be passed the current row's data at render-time and should return the contents of the cell. Required. .. attribute:: verbose_name The name for this column which should be used for display purposes. Defaults to the value of ``transform`` with the first letter of each word capitalized. .. attribute:: sortable Boolean to determine whether this column should be sortable or not. Defaults to ``True``. .. attribute:: hidden Boolean to determine whether or not this column should be displayed when rendering the table. Default: ``False``. .. attribute:: link A string or callable which returns a URL which will be wrapped around this column's text as a link. .. attribute:: allowed_data_types A list of data types for which the link should be created. Default is an empty list (``[]``). When the list is empty and the ``link`` attribute is not None, all the rows under this column will be links. .. attribute:: status Boolean designating whether or not this column represents a status (i.e. "enabled/disabled", "up/down", "active/inactive"). Default: ``False``. .. attribute:: status_choices A tuple of tuples representing the possible data values for the status column and their associated boolean equivalent. Positive states should equate to ``True``, negative states should equate to ``False``, and indeterminate states should be ``None``. Values are compared in a case-insensitive manner. Example (these are also the default values):: status_choices = ( ('enabled', True), ('true', True) ('up', True), ('active', True), ('yes', True), ('on', True), ('none', None), ('unknown', None), ('', None), ('disabled', False), ('down', False), ('false', False), ('inactive', False), ('no', False), ('off', False), ) .. attribute:: display_choices A tuple of tuples representing the possible values to substitute the data when displayed in the column cell. .. attribute:: empty_value A string or callable to be used for cells which have no data. Defaults to the string ``"-"``. .. attribute:: summation A string containing the name of a summation method to be used in the generation of a summary row for this column. By default the options are ``"sum"`` or ``"average"``, which behave as expected. Optional. .. attribute:: filters A list of functions (often template filters) to be applied to the value of the data for this column prior to output. This is effectively a shortcut for writing a custom ``transform`` function in simple cases. .. attribute:: classes An iterable of CSS classes which should be added to this column. Example: ``classes=('foo', 'bar')``. .. attribute:: attrs A dict of HTML attribute strings which should be added to this column. Example: ``attrs={"data-foo": "bar"}``. .. attribute:: truncate An integer for the maximum length of the string in this column. If the data in this column is larger than the supplied number, the data for this column will be truncated and an ellipsis will be appended to the truncated data. Defaults to ``None``. .. attribute:: link_classes An iterable of CSS classes which will be added when the column's text is displayed as a link. Example: ``classes=('link-foo', 'link-bar')``. Defaults to ``None``. .. attribute:: wrap_list Boolean value indicating whether the contents of this cell should be wrapped in a ``<ul></ul>`` tag. Useful in conjunction with Django's ``unordered_list`` template filter. Defaults to ``False``. """ summation_methods = { "sum": sum, "average": lambda data: sum(data, 0.0) / len(data) } # Used to retain order when instantiating columns on a table creation_counter = 0 transform = None name = None verbose_name = None status_choices = ( ('enabled', True), ('true', True), ('up', True), ('yes', True), ('active', True), ('on', True), ('none', None), ('unknown', None), ('', None), ('disabled', False), ('down', False), ('false', False), ('inactive', False), ('no', False), ('off', False), ) def __init__(self, transform, verbose_name=None, sortable=True, link=None, allowed_data_types=[], hidden=False, attrs=None, status=False, status_choices=None, display_choices=None, empty_value=None, filters=None, classes=None, summation=None, auto=None, truncate=None, link_classes=None, wrap_list=False): self.classes = list(classes or getattr(self, "classes", [])) super(Column, self).__init__() self.attrs.update(attrs or {}) if callable(transform): self.transform = transform self.name = transform.__name__ else: self.transform = unicode(transform) self.name = self.transform # Empty string is a valid value for verbose_name if verbose_name is None: verbose_name = self.transform.title() else: verbose_name = verbose_name self.auto = auto self.sortable = sortable self.verbose_name = verbose_name self.link = link self.allowed_data_types = allowed_data_types self.hidden = hidden self.status = status self.empty_value = empty_value or '-' self.filters = filters or [] self.truncate = truncate self.link_classes = link_classes or [] self.wrap_list = wrap_list if status_choices: self.status_choices = status_choices self.display_choices = display_choices if summation is not None and summation not in self.summation_methods: raise ValueError("Summation method %s must be one of %s." % (summation, ", ".join(self.summation_methods.keys()))) self.summation = summation self.creation_counter = Column.creation_counter Column.creation_counter += 1 if self.sortable and not self.auto: self.classes.append("sortable") if self.hidden: self.classes.append("hide") if self.link is not None: self.classes.append('anchor') def __unicode__(self): return unicode(self.verbose_name) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.name) def get_raw_data(self, datum): """ Returns the raw data for this column, before any filters or formatting are applied to it. This is useful when doing calculations on data in the table. """ # Callable transformations if callable(self.transform): data = self.transform(datum) # Basic object lookups elif hasattr(datum, self.transform): data = getattr(datum, self.transform, None) # Dict lookups elif isinstance(datum, collections.Iterable) and \ self.transform in datum: data = datum.get(self.transform) else: if settings.DEBUG: msg = _("The attribute %(attr)s doesn't exist on " "%(obj)s.") % {'attr': self.transform, 'obj': datum} msg = termcolors.colorize(msg, **PALETTE['ERROR']) LOG.warning(msg) data = None return data def get_data(self, datum): """ Returns the final display data for this column from the given inputs. The return value will be either the attribute specified for this column or the return value of the attr:`~horizon.tables.Column.transform` method for this column. """ datum_id = self.table.get_object_id(datum) if datum_id in self.table._data_cache[self]: return self.table._data_cache[self][datum_id] data = self.get_raw_data(datum) display_value = None if self.display_choices: display_value = [display for (value, display) in self.display_choices if value.lower() == (data or '').lower()] if display_value: data = display_value[0] else: for filter_func in self.filters: data = filter_func(data) if data and self.truncate: data = truncatechars(data, self.truncate) self.table._data_cache[self][datum_id] = data return self.table._data_cache[self][datum_id] def get_link_url(self, datum): """ Returns the final value for the column's ``link`` property. If ``allowed_data_types`` of this column is not empty and the datum has an assigned type, check if the datum's type is in the ``allowed_data_types`` list. If not, the datum won't be displayed as a link. If ``link`` is a callable, it will be passed the current data object and should return a URL. Otherwise ``get_link_url`` will attempt to call ``reverse`` on ``link`` with the object's id as a parameter. Failing that, it will simply return the value of ``link``. """ if self.allowed_data_types: data_type_name = self.table._meta.data_type_name data_type = getattr(datum, data_type_name, None) if data_type and (data_type not in self.allowed_data_types): return None obj_id = self.table.get_object_id(datum) if callable(self.link): return self.link(datum) try: return urlresolvers.reverse(self.link, args=(obj_id,)) except urlresolvers.NoReverseMatch: return self.link def get_summation(self): """ Returns the summary value for the data in this column if a valid summation method is specified for it. Otherwise returns ``None``. """ if self.summation not in self.summation_methods: return None summation_function = self.summation_methods[self.summation] data = [self.get_raw_data(datum) for datum in self.table.data] data = filter(lambda datum: datum is not None, data) if len(data): summation = summation_function(data) for filter_func in self.filters: summation = filter_func(summation) return summation else: return None class Row(html.HTMLElement): """ Represents a row in the table. When iterated, the ``Row`` instance will yield each of its cells. Rows are capable of AJAX updating, with a little added work: The ``ajax`` property needs to be set to ``True``, and subclasses need to define a ``get_data`` method which returns a data object appropriate for consumption by the table (effectively the "get" lookup versus the table's "list" lookup). The automatic update interval is configurable by setting the key ``ajax_poll_interval`` in the ``HORIZON_CONFIG`` dictionary. Default: ``2500`` (measured in milliseconds). .. attribute:: table The table which this row belongs to. .. attribute:: datum The data object which this row represents. .. attribute:: id A string uniquely representing this row composed of the table name and the row data object's identifier. .. attribute:: cells The cells belonging to this row stored in a ``SortedDict`` object. This attribute is populated during instantiation. .. attribute:: status Boolean value representing the status of this row calculated from the values of the table's ``status_columns`` if they are set. .. attribute:: status_class Returns a css class for the status of the row based on ``status``. .. attribute:: ajax Boolean value to determine whether ajax updating for this row is enabled. .. attribute:: ajax_action_name String that is used for the query parameter key to request AJAX updates. Generally you won't need to change this value. Default: ``"row_update"``. """ ajax = False ajax_action_name = "row_update" def __init__(self, table, datum=None): super(Row, self).__init__() self.table = table self.datum = datum self.selected = False if self.datum: self.load_cells() else: self.id = None self.cells = [] def load_cells(self, datum=None): """ Load the row's data (either provided at initialization or as an argument to this function), initiailize all the cells contained by this row, and set the appropriate row properties which require the row's data to be determined. This function is called automatically by :meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is provided. However, by not providing the data during initialization this function allows for the possibility of a two-step loading pattern when you need a row instance but don't yet have the data available. """ # Compile all the cells on instantiation. table = self.table if datum: self.datum = datum else: datum = self.datum cells = [] for column in table.columns.values(): if column.auto == "multi_select": widget = forms.CheckboxInput(check_test=lambda value: False) # Convert value to string to avoid accidental type conversion data = widget.render('object_ids', unicode(table.get_object_id(datum))) table._data_cache[column][table.get_object_id(datum)] = data elif column.auto == "actions": data = table.render_row_actions(datum) table._data_cache[column][table.get_object_id(datum)] = data else: data = column.get_data(datum) cell = Cell(datum, data, column, self) cells.append((column.name or column.auto, cell)) self.cells = SortedDict(cells) if self.ajax: interval = conf.HORIZON_CONFIG['ajax_poll_interval'] self.attrs['data-update-interval'] = interval self.attrs['data-update-url'] = self.get_ajax_update_url() self.classes.append("ajax-update") # Add the row's status class and id to the attributes to be rendered. self.classes.append(self.status_class) id_vals = {"table": self.table.name, "sep": STRING_SEPARATOR, "id": table.get_object_id(datum)} self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals self.attrs['id'] = self.id # Add the row's display name if available display_name = table.get_object_display(datum) if display_name: self.attrs['data-display'] = escape(display_name) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.id) def __iter__(self): return iter(self.cells.values()) @property def status(self): column_names = self.table._meta.status_columns if column_names: statuses = dict([(column_name, self.cells[column_name].status) for column_name in column_names]) return self.table.calculate_row_status(statuses) @property def status_class(self): column_names = self.table._meta.status_columns if column_names: return self.table.get_row_status_class(self.status) else: return '' def render(self): return render_to_string("horizon/common/_data_table_row.html", {"row": self}) def get_cells(self): """ Returns the bound cells for this row in order. """ return self.cells.values() def get_ajax_update_url(self): table_url = self.table.get_absolute_url() params = urlencode({"table": self.table.name, "action": self.ajax_action_name, "obj_id": self.table.get_object_id(self.datum)}) return "%s?%s" % (table_url, params) def get_data(self, request, obj_id): """ Fetches the updated data for the row based on the object id passed in. Must be implemented by a subclass to allow AJAX updating. """ raise NotImplementedError("You must define a get_data method on %s" % self.__class__.__name__) class Cell(html.HTMLElement): """ Represents a single cell in the table. """ def __init__(self, datum, data, column, row, attrs=None, classes=None): self.classes = classes or getattr(self, "classes", []) super(Cell, self).__init__() self.attrs.update(attrs or {}) self.datum = datum self.data = data self.column = column self.row = row self.wrap_list = column.wrap_list def __repr__(self): return '<%s: %s, %s>' % (self.__class__.__name__, self.column.name, self.row.id) @property def value(self): """ Returns a formatted version of the data for final output. This takes into consideration the :attr:`~horizon.tables.Column.link`` and :attr:`~horizon.tables.Column.empty_value` attributes. """ try: data = self.column.get_data(self.datum) if data is None: if callable(self.column.empty_value): data = self.column.empty_value(self.datum) else: data = self.column.empty_value except Exception: data = None exc_info = sys.exc_info() raise template.TemplateSyntaxError, exc_info[1], exc_info[2] if self.url: link_classes = ' '.join(self.column.link_classes) # Escape the data inside while allowing our HTML to render data = mark_safe('<a href="%s" class="%s">%s</a>' % (self.url, link_classes, escape(data))) return data @property def url(self): if self.column.link: url = self.column.get_link_url(self.datum) if url: return url else: return None @property def status(self): """ Gets the status for the column based on the cell's data. """ # Deal with status column mechanics based in this cell's data if hasattr(self, '_status'): return self._status if self.column.status or \ self.column.name in self.column.table._meta.status_columns: #returns the first matching status found data_value_lower = unicode(self.data).lower() for status_name, status_value in self.column.status_choices: if unicode(status_name).lower() == data_value_lower: self._status = status_value return self._status self._status = None return self._status def get_status_class(self, status): """ Returns a css class name determined by the status value. """ if status is True: return "status_up" elif status is False: return "status_down" else: return "status_unknown" def get_default_classes(self): """ Returns a flattened string of the cell's CSS classes. """ if not self.url: self.column.classes = [cls for cls in self.column.classes if cls != "anchor"] column_class_string = self.column.get_final_attrs().get('class', "") classes = set(column_class_string.split(" ")) if self.column.status: classes.add(self.get_status_class(self.status)) return list(classes) class DataTableOptions(object): """ Contains options for :class:`.DataTable` objects. .. attribute:: name A short name or slug for the table. .. attribute:: verbose_name A more verbose name for the table meant for display purposes. .. attribute:: columns A list of column objects or column names. Controls ordering/display of the columns in the table. .. attribute:: table_actions A list of action classes derived from the :class:`~horizon.tables.Action` class. These actions will handle tasks such as bulk deletion, etc. for multiple objects at once. .. attribute:: row_actions A list similar to ``table_actions`` except tailored to appear for each row. These actions act on a single object at a time. .. attribute:: actions_column Boolean value to control rendering of an additional column containing the various actions for each row. Defaults to ``True`` if any actions are specified in the ``row_actions`` option. .. attribute:: multi_select Boolean value to control rendering of an extra column with checkboxes for selecting multiple objects in the table. Defaults to ``True`` if any actions are specified in the ``table_actions`` option. .. attribute:: filter Boolean value to control the display of the "filter" search box in the table actions. By default it checks whether or not an instance of :class:`.FilterAction` is in :attr:`.table_actions`. .. attribute:: template String containing the template which should be used to render the table. Defaults to ``"horizon/common/_data_table.html"``. .. attribute:: context_var_name The name of the context variable which will contain the table when it is rendered. Defaults to ``"table"``. .. attribute:: pagination_param The name of the query string parameter which will be used when paginating this table. When using multiple tables in a single view this will need to be changed to differentiate between the tables. Default: ``"marker"``. .. attribute:: status_columns A list or tuple of column names which represents the "state" of the data object being represented. If ``status_columns`` is set, when the rows are rendered the value of this column will be used to add an extra class to the row in the form of ``"status_up"`` or ``"status_down"`` for that row's data. The row status is used by other Horizon components to trigger tasks such as dynamic AJAX updating. .. attribute:: row_class The class which should be used for rendering the rows of this table. Optional. Default: :class:`~horizon.tables.Row`. .. attribute:: column_class The class which should be used for handling the columns of this table. Optional. Default: :class:`~horizon.tables.Column`. .. attribute:: mixed_data_type A toggle to indicate if the table accepts two or more types of data. Optional. Default: :``False`` .. attribute:: data_types A list of data types that this table would accept. Default to be an empty list, but if the attibute ``mixed_data_type`` is set to ``True``, then this list must have at least one element. .. attribute:: data_type_name The name of an attribute to assign to data passed to the table when it accepts mix data. Default: ``"_table_data_type"`` .. attribute:: footer Boolean to control whether or not to show the table's footer. Default: ``True``. .. attribute:: permissions A list of permission names which this table requires in order to be displayed. Defaults to an empty list (``[]``). """ def __init__(self, options): self.name = getattr(options, 'name', self.__class__.__name__) verbose_name = getattr(options, 'verbose_name', None) \ or self.name.title() self.verbose_name = verbose_name self.columns = getattr(options, 'columns', None) self.status_columns = getattr(options, 'status_columns', []) self.table_actions = getattr(options, 'table_actions', []) self.row_actions = getattr(options, 'row_actions', []) self.row_class = getattr(options, 'row_class', Row) self.column_class = getattr(options, 'column_class', Column) self.pagination_param = getattr(options, 'pagination_param', 'marker') self.browser_table = getattr(options, 'browser_table', None) self.footer = getattr(options, 'footer', True) self.no_data_message = getattr(options, "no_data_message", _("No items to display.")) self.permissions = getattr(options, 'permissions', []) # Set self.filter if we have any FilterActions filter_actions = [action for action in self.table_actions if issubclass(action, FilterAction)] if len(filter_actions) > 1: raise NotImplementedError("Multiple filter actions is not " "currently supported.") self.filter = getattr(options, 'filter', len(filter_actions) > 0) if len(filter_actions) == 1: self._filter_action = filter_actions.pop() else: self._filter_action = None self.template = getattr(options, 'template', 'horizon/common/_data_table.html') self.row_actions_template = \ 'horizon/common/_data_table_row_actions.html' self.table_actions_template = \ 'horizon/common/_data_table_table_actions.html' self.context_var_name = unicode(getattr(options, 'context_var_name', 'table')) self.actions_column = getattr(options, 'actions_column', len(self.row_actions) > 0) self.multi_select = getattr(options, 'multi_select', len(self.table_actions) > 0) # Set runtime table defaults; not configurable. self.has_more_data = False # Set mixed data type table attr self.mixed_data_type = getattr(options, 'mixed_data_type', False) self.data_types = getattr(options, 'data_types', []) # If the data_types has more than 2 elements, set mixed_data_type # to True automatically. if len(self.data_types) > 1: self.mixed_data_type = True # However, if the mixed_data_type is set to True manually and the # the data_types is empty, raise an errror. if self.mixed_data_type and len(self.data_types) <= 1: raise ValueError("If mixed_data_type is set to True in class %s, " "data_types should has more than one types" % self.name) self.data_type_name = getattr(options, 'data_type_name', "_table_data_type") class DataTableMetaclass(type): """ Metaclass to add options to DataTable class and collect columns. """ def __new__(mcs, name, bases, attrs): # Process options from Meta class_name = name attrs["_meta"] = opts = DataTableOptions(attrs.get("Meta", None)) # Gather columns; this prevents the column from being an attribute # on the DataTable class and avoids naming conflicts. columns = [] for attr_name, obj in attrs.items(): if issubclass(type(obj), (opts.column_class, Column)): column_instance = attrs.pop(attr_name) column_instance.name = attr_name column_instance.classes.append('normal_column') columns.append((attr_name, column_instance)) columns.sort(key=lambda x: x[1].creation_counter) # Iterate in reverse to preserve final order for base in bases[::-1]: if hasattr(base, 'base_columns'): columns = base.base_columns.items() + columns attrs['base_columns'] = SortedDict(columns) # If the table is in a ResourceBrowser, the column number must meet # these limits because of the width of the browser. if opts.browser_table == "navigation" and len(columns) > 1: raise ValueError("You can only assign one column to %s." % class_name) if opts.browser_table == "content" and len(columns) > 2: raise ValueError("You can only assign two columns to %s." % class_name) if opts.columns: # Remove any columns that weren't declared if we're being explicit # NOTE: we're iterating a COPY of the list here! for column_data in columns[:]: if column_data[0] not in opts.columns: columns.pop(columns.index(column_data)) # Re-order based on declared columns columns.sort(key=lambda x: attrs['_meta'].columns.index(x[0])) # Add in our auto-generated columns if opts.multi_select and opts.browser_table != "navigation": multi_select = opts.column_class("multi_select", verbose_name="", auto="multi_select") multi_select.classes.append('multi_select_column') columns.insert(0, ("multi_select", multi_select)) if opts.actions_column: actions_column = opts.column_class("actions", verbose_name=_("Actions"), auto="actions") actions_column.classes.append('actions_column') columns.append(("actions", actions_column)) # Store this set of columns internally so we can copy them per-instance attrs['_columns'] = SortedDict(columns) # Gather and register actions for later access since we only want # to instantiate them once. # (list() call gives deterministic sort order, which sets don't have.) actions = list(set(opts.row_actions) | set(opts.table_actions)) actions.sort(key=attrgetter('name')) actions_dict = SortedDict([(action.name, action()) for action in actions]) attrs['base_actions'] = actions_dict if opts._filter_action: # Replace our filter action with the instantiated version opts._filter_action = actions_dict[opts._filter_action.name] # Create our new class! return type.__new__(mcs, name, bases, attrs) class DataTable(object): """ A class which defines a table with all data and associated actions. .. attribute:: name String. Read-only access to the name specified in the table's Meta options. .. attribute:: multi_select Boolean. Read-only access to whether or not this table should display a column for multi-select checkboxes. .. attribute:: data Read-only access to the data this table represents. .. attribute:: filtered_data Read-only access to the data this table represents, filtered by the :meth:`~horizon.tables.FilterAction.filter` method of the table's :class:`~horizon.tables.FilterAction` class (if one is provided) using the current request's query parameters. """ __metaclass__ = DataTableMetaclass def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs): self.request = request self.data = data self.kwargs = kwargs self._needs_form_wrapper = needs_form_wrapper self._no_data_message = self._meta.no_data_message self.breadcrumb = None self.current_item_id = None self.permissions = self._meta.permissions # Create a new set columns = [] for key, _column in self._columns.items(): column = copy.copy(_column) column.table = self columns.append((key, column)) self.columns = SortedDict(columns) self._populate_data_cache() # Associate these actions with this table for action in self.base_actions.values(): action.table = self self.needs_summary_row = any([col.summation for col in self.columns.values()]) def __unicode__(self): return unicode(self._meta.verbose_name) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self._meta.name) @property def name(self): return self._meta.name @property def footer(self): return self._meta.footer @property def multi_select(self): return self._meta.multi_select @property def filtered_data(self): if not hasattr(self, '_filtered_data'): self._filtered_data = self.data if self._meta.filter and self._meta._filter_action: action = self._meta._filter_action filter_string = self.get_filter_string() request_method = self.request.method needs_preloading = (not filter_string and request_method == 'GET' and action.needs_preloading) valid_method = (request_method == action.method) if (filter_string and valid_method) or needs_preloading: if self._meta.mixed_data_type: self._filtered_data = action.data_type_filter(self, self.data, filter_string) else: self._filtered_data = action.filter(self, self.data, filter_string) return self._filtered_data def get_filter_string(self): filter_action = self._meta._filter_action param_name = filter_action.get_param_name() filter_string = self.request.POST.get(param_name, '') return filter_string def _populate_data_cache(self): self._data_cache = {} # Set up hash tables to store data points for each column for column in self.get_columns(): self._data_cache[column] = {} def _filter_action(self, action, request, datum=None): try: # Catch user errors in permission functions here row_matched = True if self._meta.mixed_data_type: row_matched = action.data_type_matched(datum) return action._allowed(request, datum) and row_matched except Exception: LOG.exception("Error while checking action permissions.") return None def is_browser_table(self): if self._meta.browser_table: return True return False def render(self): """ Renders the table using the template from the table options. """ table_template = template.loader.get_template(self._meta.template) extra_context = {self._meta.context_var_name: self} context = template.RequestContext(self.request, extra_context) return table_template.render(context) def get_absolute_url(self): """ Returns the canonical URL for this table. This is used for the POST action attribute on the form element wrapping the table. In many cases it is also useful for redirecting after a successful action on the table. For convenience it defaults to the value of ``request.get_full_path()`` with any query string stripped off, e.g. the path at which the table was requested. """ return self.request.get_full_path().partition('?')[0] def get_empty_message(self): """ Returns the message to be displayed when there is no data. """ return self._no_data_message def get_object_by_id(self, lookup): """ Returns the data object from the table's dataset which matches the ``lookup`` parameter specified. An error will be raised if the match is not a single data object. We will convert the object id and ``lookup`` to unicode before comparison. Uses :meth:`~horizon.tables.DataTable.get_object_id` internally. """ if not isinstance(lookup, unicode): lookup = unicode(str(lookup), 'utf-8') matches = [] for datum in self.data: obj_id = self.get_object_id(datum) if not isinstance(obj_id, unicode): obj_id = unicode(str(obj_id), 'utf-8') if obj_id == lookup: matches.append(datum) if len(matches) > 1: raise ValueError("Multiple matches were returned for that id: %s." % matches) if not matches: raise exceptions.Http302(self.get_absolute_url(), _('No match returned for the id "%s".') % lookup) return matches[0] @property def has_actions(self): """ Boolean. Indicates whether there are any available actions on this table. """ if not self.base_actions: return False return any(self.get_table_actions()) or any(self._meta.row_actions) @property def needs_form_wrapper(self): """ Boolean. Indicates whather this table should be rendered wrapped in a ``<form>`` tag or not. """ # If needs_form_wrapper is explicitly set, defer to that. if self._needs_form_wrapper is not None: return self._needs_form_wrapper # Otherwise calculate whether or not we need a form element. return self.has_actions def get_table_actions(self): """ Returns a list of the action instances for this table. """ bound_actions = [self.base_actions[action.name] for action in self._meta.table_actions] return [action for action in bound_actions if self._filter_action(action, self.request)] def get_row_actions(self, datum): """ Returns a list of the action instances for a specific row. """ bound_actions = [] for action in self._meta.row_actions: # Copy to allow modifying properties per row bound_action = copy.copy(self.base_actions[action.name]) bound_action.attrs = copy.copy(bound_action.attrs) bound_action.datum = datum # Remove disallowed actions. if not self._filter_action(bound_action, self.request, datum): continue # Hook for modifying actions based on data. No-op by default. bound_action.update(self.request, datum) # Pre-create the URL for this link with appropriate parameters if issubclass(bound_action.__class__, LinkAction): bound_action.bound_url = bound_action.get_link_url(datum) bound_actions.append(bound_action) return bound_actions def render_table_actions(self): """ Renders the actions specified in ``Meta.table_actions``. """ template_path = self._meta.table_actions_template table_actions_template = template.loader.get_template(template_path) bound_actions = self.get_table_actions() extra_context = {"table_actions": bound_actions} if self._meta.filter and \ self._filter_action(self._meta._filter_action, self.request): extra_context["filter"] = self._meta._filter_action context = template.RequestContext(self.request, extra_context) return table_actions_template.render(context) def render_row_actions(self, datum): """ Renders the actions specified in ``Meta.row_actions`` using the current row data. """ template_path = self._meta.row_actions_template row_actions_template = template.loader.get_template(template_path) bound_actions = self.get_row_actions(datum) extra_context = {"row_actions": bound_actions, "row_id": self.get_object_id(datum)} context = template.RequestContext(self.request, extra_context) return row_actions_template.render(context) @staticmethod def parse_action(action_string): """ Parses the ``action`` parameter (a string) sent back with the POST data. By default this parses a string formatted as ``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns each of the pieces. The ``row_id`` is optional. """ if action_string: bits = action_string.split(STRING_SEPARATOR) bits.reverse() table = bits.pop() action = bits.pop() try: object_id = bits.pop() except IndexError: object_id = None return table, action, object_id def take_action(self, action_name, obj_id=None, obj_ids=None): """ Locates the appropriate action and routes the object data to it. The action should return an HTTP redirect if successful, or a value which evaluates to ``False`` if unsuccessful. """ # See if we have a list of ids obj_ids = obj_ids or self.request.POST.getlist('object_ids') action = self.base_actions.get(action_name, None) if not action or action.method != self.request.method: # We either didn't get an action or we're being hacked. Goodbye. return None # Meanhile, back in Gotham... if not action.requires_input or obj_id or obj_ids: if obj_id: obj_id = self.sanitize_id(obj_id) if obj_ids: obj_ids = [self.sanitize_id(i) for i in obj_ids] # Single handling is easy if not action.handles_multiple: response = action.single(self, self.request, obj_id) # Otherwise figure out what to pass along else: # Preference given to a specific id, since that implies # the user selected an action for just one row. if obj_id: obj_ids = [obj_id] response = action.multiple(self, self.request, obj_ids) return response elif action and action.requires_input and not (obj_id or obj_ids): messages.info(self.request, _("Please select a row before taking that action.")) return None @classmethod def check_handler(cls, request): """ Determine whether the request should be handled by this table. """ if request.method == "POST" and "action" in request.POST: table, action, obj_id = cls.parse_action(request.POST["action"]) elif "table" in request.GET and "action" in request.GET: table = request.GET["table"] action = request.GET["action"] obj_id = request.GET.get("obj_id", None) else: table = action = obj_id = None return table, action, obj_id def maybe_preempt(self): """ Determine whether the request should be handled by a preemptive action on this table or by an AJAX row update before loading any data. """ request = self.request table_name, action_name, obj_id = self.check_handler(request) if table_name == self.name: # Handle AJAX row updating. new_row = self._meta.row_class(self) if new_row.ajax and new_row.ajax_action_name == action_name: try: datum = new_row.get_data(request, obj_id) new_row.load_cells(datum) error = False except Exception: datum = None error = exceptions.handle(request, ignore=True) if request.is_ajax(): if not error: return HttpResponse(new_row.render()) else: return HttpResponse(status=error.status_code) preemptive_actions = [action for action in self.base_actions.values() if action.preempt] if action_name: for action in preemptive_actions: if action.name == action_name: handled = self.take_action(action_name, obj_id) if handled: return handled return None def maybe_handle(self): """ Determine whether the request should be handled by any action on this table after data has been loaded. """ request = self.request table_name, action_name, obj_id = self.check_handler(request) if table_name == self.name and action_name: action_names = [action.name for action in self.base_actions.values() if not action.preempt] # do not run preemptive actions here if action_name in action_names: return self.take_action(action_name, obj_id) return None def sanitize_id(self, obj_id): """ Override to modify an incoming obj_id to match existing API data types or modify the format. """ return obj_id def get_object_id(self, datum): """ Returns the identifier for the object this row will represent. By default this returns an ``id`` attribute on the given object, but this can be overridden to return other values. .. warning:: Make sure that the value returned is a unique value for the id otherwise rendering issues can occur. """ return datum.id def get_object_display(self, datum): """ Returns a display name that identifies this object. By default, this returns a ``name`` attribute from the given object, but this can be overriden to return other values. """ if hasattr(datum, 'name'): return datum.name return None def has_more_data(self): """ Returns a boolean value indicating whether there is more data available to this table from the source (generally an API). The method is largely meant for internal use, but if you want to override it to provide custom behavior you can do so at your own risk. """ return self._meta.has_more_data def get_marker(self): """ Returns the identifier for the last object in the current data set for APIs that use marker/limit-based paging. """ return http.urlquote_plus(self.get_object_id(self.data[-1])) def get_pagination_string(self): """ Returns the query parameter string to paginate this table. """ return "=".join([self._meta.pagination_param, self.get_marker()]) def calculate_row_status(self, statuses): """ Returns a boolean value determining the overall row status based on the dictionary of column name to status mappings passed in. By default, it uses the following logic: #. If any statuses are ``False``, return ``False``. #. If no statuses are ``False`` but any or ``None``, return ``None``. #. If all statuses are ``True``, return ``True``. This provides the greatest protection against false positives without weighting any particular columns. The ``statuses`` parameter is passed in as a dictionary mapping column names to their statuses in order to allow this function to be overridden in such a way as to weight one column's status over another should that behavior be desired. """ values = statuses.values() if any([status is False for status in values]): return False elif any([status is None for status in values]): return None else: return True def get_row_status_class(self, status): """ Returns a css class name determined by the status value. This class name is used to indicate the status of the rows in the table if any ``status_columns`` have been specified. """ if status is True: return "status_up" elif status is False: return "status_down" else: return "status_unknown" def get_columns(self): """ Returns this table's columns including auto-generated ones.""" return self.columns.values() def get_rows(self): """ Return the row data for this table broken out by columns. """ rows = [] try: for datum in self.filtered_data: row = self._meta.row_class(self, datum) if self.get_object_id(datum) == self.current_item_id: self.selected = True row.classes.append('current_selected') rows.append(row) except Exception: # Exceptions can be swallowed at the template level here, # re-raising as a TemplateSyntaxError makes them visible. LOG.exception("Error while rendering table rows.") exc_info = sys.exc_info() raise template.TemplateSyntaxError, exc_info[1], exc_info[2] return rows
apache-2.0
670,515,343,822,811,100
37.779723
79
0.584479
false
4.554309
false
false
false
jromang/retina-old
distinclude/spyderlib/interpreter.py
1
11927
# -*- coding: utf-8 -*- # # Copyright © 2009-2010 Pierre Raybaut # Licensed under the terms of the MIT License # (see spyderlib/__init__.py for details) """Shell Interpreter""" import sys import atexit import threading import ctypes import os import re import os.path as osp import pydoc from subprocess import Popen, PIPE from code import InteractiveConsole # Local imports: from spyderlib.utils.dochelpers import isdefined from spyderlib.utils import encoding # Force Python to search modules in the current directory first: sys.path.insert(0, '') def guess_filename(filename): """Guess filename""" if osp.isfile(filename): return filename if not filename.endswith('.py'): filename += '.py' for path in [os.getcwdu()]+sys.path: fname = osp.join(path, filename) if osp.isfile(fname): return fname elif osp.isfile(fname+'.py'): return fname+'.py' elif osp.isfile(fname+'.pyw'): return fname+'.pyw' return filename class Interpreter(InteractiveConsole, threading.Thread): """Interpreter, executed in a separate thread""" p1 = ">>> " p2 = "... " def __init__(self, namespace=None, exitfunc=None, Output=None, WidgetProxy=None, debug=False): """ namespace: locals send to InteractiveConsole object commands: list of commands executed at startup """ InteractiveConsole.__init__(self, namespace) threading.Thread.__init__(self) self._id = None self.exit_flag = False self.debug = debug # Execution Status self.more = False if exitfunc is not None: atexit.register(exitfunc) self.namespace = self.locals self.namespace['__name__'] = '__main__' self.namespace['execfile'] = self.execfile self.namespace['runfile'] = self.runfile self.namespace['help'] = self.help_replacement # Capture all interactive input/output self.initial_stdout = sys.stdout self.initial_stderr = sys.stderr self.initial_stdin = sys.stdin # Create communication pipes pr, pw = os.pipe() self.stdin_read = os.fdopen(pr, "r") self.stdin_write = os.fdopen(pw, "w", 0) self.stdout_write = Output() self.stderr_write = Output() self.widget_proxy = WidgetProxy() self.redirect_stds() #------ Standard input/output def redirect_stds(self): """Redirects stds""" if not self.debug: sys.stdout = self.stdout_write sys.stderr = self.stderr_write sys.stdin = self.stdin_read def restore_stds(self): """Restore stds""" if not self.debug: sys.stdout = self.initial_stdout sys.stderr = self.initial_stderr sys.stdin = self.initial_stdin def help_replacement(self, text=None, interactive=False): """For help() support""" if text is not None and not interactive: return pydoc.help(text) elif text is None: pyver = "%d.%d" % (sys.version_info[0], sys.version_info[1]) self.write(""" Welcome to Python %s! This is the online help utility. If this is your first time using Python, you should definitely check out the tutorial on the Internet at http://www.python.org/doc/tut/. Enter the name of any module, keyword, or topic to get help on writing Python programs and using Python modules. To quit this help utility and return to the interpreter, just type "quit". To get a list of available modules, keywords, or topics, type "modules", "keywords", or "topics". Each module also comes with a one-line summary of what it does; to list the modules whose summaries contain a given word such as "spam", type "modules spam". """ % pyver) else: text = text.strip() try: eval("pydoc.help(%s)" % text) except (NameError, SyntaxError): print "no Python documentation found for '%r'" % text self.write(os.linesep) self.widget_proxy.new_prompt("help> ") inp = self.raw_input() if inp.strip(): self.help_replacement(inp, interactive=True) else: self.write(""" You are now leaving help and returning to the Python interpreter. If you want to ask for help on a particular object directly from the interpreter, you can type "help(object)". Executing "help('string')" has the same effect as typing a particular string at the help> prompt. """) def run_command(self, cmd, new_prompt=True): """Run command in interpreter""" if cmd == 'exit()': self.exit_flag = True self.write('\n') return # -- Special commands type I # (transformed into commands executed in the interpreter) # ? command special_pattern = r"^%s (?:r\')?(?:u\')?\"?\'?([a-zA-Z0-9_\.]+)" run_match = re.match(special_pattern % 'run', cmd) help_match = re.match(r'^([a-zA-Z0-9_\.]+)\?$', cmd) cd_match = re.match(r"^\!cd \"?\'?([a-zA-Z0-9_ \.]+)", cmd) if help_match: cmd = 'help(%s)' % help_match.group(1) # run command elif run_match: filename = guess_filename(run_match.groups()[0]) cmd = 'runfile(r"%s", args=None)' % filename # !cd system command elif cd_match: cmd = 'import os; os.chdir(r"%s")' % cd_match.groups()[0].strip() # -- End of Special commands type I # -- Special commands type II # (don't need code execution in interpreter) xedit_match = re.match(special_pattern % 'xedit', cmd) edit_match = re.match(special_pattern % 'edit', cmd) clear_match = re.match(r"^clear ([a-zA-Z0-9_, ]+)", cmd) # (external) edit command if xedit_match: filename = guess_filename(xedit_match.groups()[0]) self.widget_proxy.edit(filename, external_editor=True) # local edit command elif edit_match: filename = guess_filename(edit_match.groups()[0]) if osp.isfile(filename): self.widget_proxy.edit(filename) else: self.stderr_write.write( "No such file or directory: %s\n" % filename) # remove reference (equivalent to MATLAB's clear command) elif clear_match: varnames = clear_match.groups()[0].replace(' ', '').split(',') for varname in varnames: try: self.namespace.pop(varname) except KeyError: pass # Execute command elif cmd.startswith('!'): # System ! command pipe = Popen(cmd[1:], shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE) txt_out = encoding.transcode( pipe.stdout.read() ) txt_err = encoding.transcode( pipe.stderr.read().rstrip() ) if txt_err: self.stderr_write.write(txt_err) if txt_out: self.stdout_write.write(txt_out) self.stdout_write.write('\n') self.more = False # -- End of Special commands type II else: # Command executed in the interpreter # self.widget_proxy.set_readonly(True) self.more = self.push(cmd) # self.widget_proxy.set_readonly(False) if new_prompt: self.widget_proxy.new_prompt(self.p2 if self.more else self.p1) if not self.more: self.resetbuffer() def run(self): """Wait for input and run it""" while not self.exit_flag: self.run_line() def run_line(self): line = self.stdin_read.readline() if self.exit_flag: return # Remove last character which is always '\n': self.run_command(line[:-1]) def get_thread_id(self): """Return thread id""" if self._id is None: for thread_id, obj in threading._active.items(): if obj is self: self._id = thread_id return self._id def raise_keyboard_interrupt(self): if self.isAlive(): ctypes.pythonapi.PyThreadState_SetAsyncExc(self.get_thread_id(), ctypes.py_object(KeyboardInterrupt)) return True else: return False def closing(self): """Actions to be done before restarting this interpreter""" pass def execfile(self, filename): """Exec filename""" source = open(filename, 'r').read() try: try: name = filename.encode('ascii') except UnicodeEncodeError: name = '<executed_script>' code = compile(source, name, "exec") except (OverflowError, SyntaxError): InteractiveConsole.showsyntaxerror(self, filename) else: self.runcode(code) def runfile(self, filename, args=None): """ Run filename args: command line arguments (string) """ if args is not None and not isinstance(args, basestring): raise TypeError("expected a character buffer object") self.namespace['__file__'] = filename sys.argv = [filename] if args is not None: for arg in args.split(): sys.argv.append(arg) self.execfile(filename) sys.argv = [''] self.namespace.pop('__file__') def eval(self, text): """ Evaluate text and return (obj, valid) where *obj* is the object represented by *text* and *valid* is True if object evaluation did not raise any exception """ assert isinstance(text, (str, unicode)) try: return eval(text, self.locals), True except: return None, False def is_defined(self, objtxt, force_import=False): """Return True if object is defined""" return isdefined(objtxt, force_import=force_import, namespace=self.locals) #=========================================================================== # InteractiveConsole API #=========================================================================== def push(self, line): """ Push a line of source text to the interpreter The line should not have a trailing newline; it may have internal newlines. The line is appended to a buffer and the interpreter’s runsource() method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is True if more input is required, False if the line was dealt with in some way (this is the same as runsource()). """ return InteractiveConsole.push(self, line) def resetbuffer(self): """Remove any unhandled source text from the input buffer""" InteractiveConsole.resetbuffer(self)
gpl-3.0
547,717,842,301,860,860
35.26875
80
0.545035
false
4.353414
false
false
false
spectralpython/spectral
spectral/database/aster.py
1
15620
''' Code for reading and managing ASTER spectral library data. ''' from __future__ import absolute_import, division, print_function, unicode_literals from spectral.utilities.python23 import IS_PYTHON3, tobytes, frombytes from .spectral_database import SpectralDatabase if IS_PYTHON3: readline = lambda fin: fin.readline() open_file = lambda filename: open(filename, encoding='iso-8859-1') else: readline = lambda fin: fin.readline().decode('iso-8859-1') open_file = lambda filename: open(filename) table_schemas = [ 'CREATE TABLE Samples (SampleID INTEGER PRIMARY KEY, Name TEXT, Type TEXT, Class TEXT, SubClass TEXT, ' 'ParticleSize TEXT, SampleNum TEXT, Owner TEXT, Origin TEXT, Phase TEXT, Description TEXT)', 'CREATE TABLE Spectra (SpectrumID INTEGER PRIMARY KEY, SampleID INTEGER, SensorCalibrationID INTEGER, ' 'Instrument TEXT, Environment TEXT, Measurement TEXT, ' 'XUnit TEXT, YUnit TEXT, MinWavelength FLOAT, MaxWavelength FLOAT, ' 'NumValues INTEGER, XData BLOB, YData BLOB)', ] arraytypecode = chr(ord('f')) # These files contained malformed signature data and will be ignored. bad_files = [ 'jhu.nicolet.mineral.silicate.tectosilicate.fine.albite1.spectrum.txt', 'usgs.perknic.rock.igneous.mafic.colid.me3.spectrum.txt' ] def read_pair(fin, num_lines=1): '''Reads a colon-delimited attribute-value pair from the file stream.''' s = '' for i in range(num_lines): s += " " + readline(fin).strip() return [x.strip().lower() for x in s.split(':')] class Signature: '''Object to store sample/measurement metadata, as well as wavelength-signatrure vectors.''' def __init__(self): self.sample = {} self.measurement = {} def read_aster_file(filename): '''Reads an ASTER 2.x spectrum file.''' fin = open_file(filename) s = Signature() # Number of lines per metadata attribute value lpv = [1] * 8 + [2] + [6] # A few files have an additional "Colleted by" sample metadata field, which # sometimes affects the number of header lines haveCollectedBy = False for i in range(30): line = readline(fin).strip() if line.find('Collected by:') >= 0: haveCollectedBy = True collectedByLineNum = i if line.startswith('Description:'): descriptionLineNum = i if line.startswith('Measurement:'): measurementLineNum = i if haveCollectedBy: lpv = [1] * 10 + [measurementLineNum - descriptionLineNum] # Read sample metadata fin.seek(0) for i in range(len(lpv)): pair = read_pair(fin, lpv[i]) s.sample[pair[0].lower()] = pair[1] # Read measurement metadata lpv = [1] * 8 + [2] for i in range(len(lpv)): pair = read_pair(fin, lpv[i]) if len(pair) < 2: print(pair) s.measurement[pair[0].lower()] = pair[1] # Read signature spectrum pairs = [] for line in fin.readlines(): line = line.strip() if len(line) == 0: continue pair = line.split() nItems = len(pair) # Try to handle invalid values on signature lines if nItems == 1: # print 'single item (%s) on signature line, %s' \ # % (pair[0], filename) continue elif nItems > 2: print('more than 2 values on signature line,', filename) continue try: x = float(pair[0]) except: print('corrupt signature line,', filename) if x == 0: # print 'Zero wavelength value', filename continue elif x < 0: print('Negative wavelength value,', filename) continue pairs.append(pair) [x, y] = [list(v) for v in zip(*pairs)] # Make sure wavelengths are ascending if float(x[0]) > float(x[-1]): x.reverse() y.reverse() s.x = [float(val) for val in x] s.y = [float(val) for val in y] s.measurement['first x value'] = x[0] s.measurement['last x value'] = x[-1] s.measurement['number of x values'] = len(x) fin.close() return s class AsterDatabase(SpectralDatabase): '''A relational database to manage ASTER spectral library data.''' schemas = table_schemas def _add_sample(self, name, sampleType, sampleClass, subClass, particleSize, sampleNumber, owner, origin, phase, description): sql = '''INSERT INTO Samples (Name, Type, Class, SubClass, ParticleSize, SampleNum, Owner, Origin, Phase, Description) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' self.cursor.execute(sql, (name, sampleType, sampleClass, subClass, particleSize, sampleNumber, owner, origin, phase, description)) rowId = self.cursor.lastrowid self.db.commit() return rowId def _add_signature( self, sampleID, calibrationID, instrument, environment, measurement, xUnit, yUnit, minWavelength, maxWavelength, xData, yData): import sqlite3 import array sql = '''INSERT INTO Spectra (SampleID, SensorCalibrationID, Instrument, Environment, Measurement, XUnit, YUnit, MinWavelength, MaxWavelength, NumValues, XData, YData) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' xBlob = sqlite3.Binary(tobytes(array.array(arraytypecode, xData))) yBlob = sqlite3.Binary(tobytes(array.array(arraytypecode, yData))) numValues = len(xData) self.cursor.execute( sql, ( sampleID, calibrationID, instrument, environment, measurement, xUnit, yUnit, minWavelength, maxWavelength, numValues, xBlob, yBlob)) rowId = self.cursor.lastrowid self.db.commit() return rowId @classmethod def create(cls, filename, aster_data_dir=None): '''Creates an ASTER relational database by parsing ASTER data files. Arguments: `filename` (str): Name of the new sqlite database file to create. `aster_data_dir` (str): Path to the directory containing ASTER library data files. If this argument is not provided, no data will be imported. Returns: An :class:`~spectral.database.AsterDatabase` object. Example:: >>> AsterDatabase.create("aster_lib.db", "/CDROM/ASTER2.0/data") This is a class method (it does not require instantiating an AsterDatabase object) that creates a new database by parsing all of the files in the ASTER library data directory. Normally, this should only need to be called once. Subsequently, a corresponding database object can be created by instantiating a new AsterDatabase object with the path the database file as its argument. For example:: >>> from spectral.database.aster import AsterDatabase >>> db = AsterDatabase("aster_lib.db") ''' import os if os.path.isfile(filename): raise Exception('Error: Specified file already exists.') db = cls() db._connect(filename) for schema in cls.schemas: db.cursor.execute(schema) if aster_data_dir: db._import_files(aster_data_dir) return db def __init__(self, sqlite_filename=None): '''Creates a database object to interface an existing database. Arguments: `sqlite_filename` (str): Name of the database file. If this argument is not provided, an interface to a database file will not be established. Returns: An :class:`~spectral.AsterDatabase` connected to the database. ''' from spectral.io.spyfile import find_file_path if sqlite_filename: self._connect(find_file_path(sqlite_filename)) else: self.db = None self.cursor = None def read_file(self, filename): return read_aster_file(filename) def _import_files(self, data_dir, ignore=bad_files): '''Read each file in the ASTER library and convert to AVIRIS bands.''' from glob import glob import numpy import os if not os.path.isdir(data_dir): raise Exception('Error: Invalid directory name specified.') if ignore is not None: filesToIgnore = [data_dir + '/' + f for f in ignore] else: filesToIgnore = [] numFiles = 0 numIgnored = 0 sigID = 1 class Sig: pass sigs = [] for f in glob(data_dir + '/*spectrum.txt'): if f in filesToIgnore: numIgnored += 1 continue print('Importing %s.' % f) numFiles += 1 sig = self.read_file(f) s = sig.sample if s['particle size'].lower == 'liquid': phase = 'liquid' else: phase = 'solid' if 'sample no.' in s: sampleNum = s['sample no.'] else: sampleNum = '' id = self._add_sample( s['name'], s['type'], s['class'], s[ 'subclass'], s['particle size'], sampleNum, s['owner'], s['origin'], phase, s['description']) instrument = os.path.basename(f).split('.')[1] environment = 'lab' m = sig.measurement # Correct numerous mispellings of "reflectance" and "transmittance" yUnit = m['y units'] if yUnit.find('reflectence') > -1: yUnit = 'reflectance (percent)' elif yUnit.find('trans') == 0: yUnit = 'transmittance (percent)' measurement = m['measurement'] if measurement[0] == 't': measurement = 'transmittance' self._add_signature(id, -1, instrument, environment, measurement, m['x units'], yUnit, m['first x value'], m['last x value'], sig.x, sig.y) if numFiles == 0: print('No data files were found in directory "%s".' \ % data_dir) else: print('Processed %d files.' % numFiles) if numIgnored > 0: print('Ignored the following %d bad files:' % (numIgnored)) for f in filesToIgnore: print('\t' + f) return sigs def get_spectrum(self, spectrumID): '''Returns a spectrum from the database. Usage: (x, y) = aster.get_spectrum(spectrumID) Arguments: `spectrumID` (int): The **SpectrumID** value for the desired spectrum from the **Spectra** table in the database. Returns: `x` (list): Band centers for the spectrum. `y` (list): Spectrum data values for each band. Returns a pair of vectors containing the wavelengths and measured values values of a measurment. For additional metadata, call "get_signature" instead. ''' import array query = '''SELECT XData, YData FROM Spectra WHERE SpectrumID = ?''' result = self.cursor.execute(query, (spectrumID,)) rows = result.fetchall() if len(rows) < 1: raise 'Measurement record not found' x = array.array(arraytypecode) frombytes(x, rows[0][0]) y = array.array(arraytypecode) frombytes(y, rows[0][1]) return (list(x), list(y)) def get_signature(self, spectrumID): '''Returns a spectrum with some additional metadata. Usage:: sig = aster.get_signature(spectrumID) Arguments: `spectrumID` (int): The **SpectrumID** value for the desired spectrum from the **Spectra** table in the database. Returns: `sig` (:class:`~spectral.database.aster.Signature`): An object with the following attributes: ============== ===== ======================================== Attribute Type Description ============== ===== ======================================== measurement_id int SpectrumID value from Spectra table sample_name str **Sample** from the **Samples** table sample_id int **SampleID** from the **Samples** table x list list of band center wavelengths y list list of spectrum values for each band ============== ===== ======================================== ''' import array # Retrieve spectrum from Spectra table query = '''SELECT Samples.Name, Samples.SampleID, XData, YData FROM Samples, Spectra WHERE Samples.SampleID = Spectra.SampleID AND Spectra.SpectrumID = ?''' result = self.cursor.execute(query, (spectrumID,)) results = result.fetchall() if len(results) < 1: raise "Measurement record not found" sig = Signature() sig.measurement_id = spectrumID sig.sample_name = results[0][0] sig.sample_id = results[0][1] x = array.array(arraytypecode) frombytes(x, results[0][2]) sig.x = list(x) y = array.array(arraytypecode) frombytes(y, results[0][3]) sig.y = list(y) return sig def create_envi_spectral_library(self, spectrumIDs, bandInfo): '''Creates an ENVI-formatted spectral library for a list of spectra. Arguments: `spectrumIDs` (list of ints): List of **SpectrumID** values for of spectra in the "Spectra" table of the ASTER database. `bandInfo` (:class:`~spectral.BandInfo`): The spectral bands to which the original ASTER library spectra will be resampled. Returns: A :class:`~spectral.io.envi.SpectralLibrary` object. The IDs passed to the method should correspond to the SpectrumID field of the ASTER database "Spectra" table. All specified spectra will be resampled to the same discretization specified by the bandInfo parameter. See :class:`spectral.BandResampler` for details on the resampling method used. ''' from spectral.algorithms.resampling import BandResampler from spectral.io.envi import SpectralLibrary import numpy import unicodedata spectra = numpy.empty((len(spectrumIDs), len(bandInfo.centers))) names = [] for i in range(len(spectrumIDs)): sig = self.get_signature(spectrumIDs[i]) resample = BandResampler( sig.x, bandInfo.centers, None, bandInfo.bandwidths) spectra[i] = resample(sig.y) names.append(unicodedata.normalize('NFKD', sig.sample_name). encode('ascii', 'ignore')) header = {} header['wavelength units'] = 'um' header['spectra names'] = names header['wavelength'] = bandInfo.centers header['fwhm'] = bandInfo.bandwidths return SpectralLibrary(spectra, header, {})
gpl-2.0
-1,652,097,644,647,048,200
33.866071
126
0.564277
false
4.237656
false
false
false
ActiveState/code
recipes/Python/436229_RecordJar_Parser/recipe-436229.py
1
2025
#!/usr/bin/env python # recordjar.py - Parse a Record-Jar into a list of dictionaries. # Copyright 2005 Lutz Horn <[email protected]> # Licensed unter the same terms as Python. def parse_jar(flo): """Parse a Record-Jar from a file like object into a list of dictionaries. This method parses a file like object as described in "The Art of Unix Programming" <http://www.faqs.org/docs/artu/ch05s02.html#id2906931>. The records are divided by lines containing '%%'. Each record consists of one or more lines, each containing a key, a colon, and a value. Whitespace around both key and value are ignored. >>> import StringIO >>> flo = StringIO.StringIO("a:b\\nc:d\\n%%\\nx:y\\n") >>> out = parse_jar(flo) >>> print out [{'a': 'b', 'c': 'd'}, {'x': 'y'}] If a record contains a key more than once, the value for this key is a list containing the values in their order of occurence. >>> flo = StringIO.StringIO("a:b\\nc:d\\n%%\\nx:y\\nx:z\\n") >>> out = parse_jar(flo) >>> print out [{'a': 'b', 'c': 'd'}, {'x': ['y', 'z']}] Leading or trailing separator lines ('%%') and lines containing only whitespace are ignored. >>> flo = StringIO.StringIO("%%\\na:b\\nc:d\\n%%\\n\\nx:y\\nx:z\\n") >>> out = parse_jar(flo) >>> print out [{'a': 'b', 'c': 'd'}, {'x': ['y', 'z']}] """ records = [] for record in flo.read().split("%%"): dict = {} for line in [line for line in record.split("\n") if line.strip() != ""]: key, value = line.split(":", 1) key, value = key.strip(), value.strip() try: dict[key].append(value) except AttributeError: dict[key] = [dict[key], value] except KeyError: dict[key] = value if len(dict) > 0: records.append(dict) return records def _test(): import doctest, recordjar return doctest.testmod(recordjar) if __name__ == "__main__": _test()
mit
-8,003,185,806,157,289,000
32.75
80
0.565432
false
3.420608
false
false
false
mbourqui/django-publications-bootstrap
publications_bootstrap/admin_views/import_bibtex.py
1
7261
# -*- coding: utf-8 -*- import re from django.contrib import messages from django.contrib.admin.views.decorators import staff_member_required from django.http import HttpResponseRedirect from django.shortcuts import render from django_countries import countries from ..bibtex import parse from ..models import Publication, Type # mapping of months MONTHS = { 'jan': 1, 'january': 1, 'feb': 2, 'february': 2, 'mar': 3, 'march': 3, 'apr': 4, 'april': 4, 'may': 5, 'jun': 6, 'june': 6, 'jul': 7, 'july': 7, 'aug': 8, 'august': 8, 'sep': 9, 'september': 9, 'oct': 10, 'october': 10, 'nov': 11, 'november': 11, 'dec': 12, 'december': 12} COUNTRIES_BY_CODE = dict(countries) # Reversed dict try: # Python 2.7.x COUNTRIES_BY_NAME = {v: k for k, v in COUNTRIES_BY_CODE.iteritems()} except: # Python 3+ COUNTRIES_BY_NAME = {v: k for k, v in COUNTRIES_BY_CODE.items()} def import_bibtex(request): if request.method == 'POST': # try to parse BibTex bib = parse(request.POST['bibliography']) # container for error messages errors = {} # publication types types = Type.objects.all() # check for errors if not bib: if not request.POST['bibliography']: errors['bibliography'] = 'This field is required.' if not errors: publications = [] # try adding publications for entry in bib: if 'title' in entry and 'author' in entry and 'year' in entry: # parse authors authors = entry['author'].split(' and ') for i in range(len(authors)): author = authors[i].split(',') author = [author[-1]] + author[:-1] authors[i] = ' '.join(author) authors = ', '.join(authors) # add missing keys keys = [ 'journal', 'booktitle', 'address', 'publisher', 'editor', 'edition', 'institution', 'school', 'organization', 'series', 'url', 'doi', 'isbn', 'tags', 'note', 'abstract', 'month'] for key in keys: if key not in entry: entry[key] = '' # map integer fields to integers entry['month'] = Publication.EMonths.get(MONTHS.get(entry['month'].lower(), 0), None) for field in ['volume', 'number', 'chapter', 'section']: entry[field] = entry.get(field, None) # remove whitespace characters (likely due to line breaks) entry['url'] = re.sub(r'\s', '', entry['url']) if 'country' not in entry: entry['country'] = '' else: if entry['country'].strip() in COUNTRIES_BY_NAME: entry['country'] = COUNTRIES_BY_NAME[entry['country'].strip()] elif entry['country'].upper() in COUNTRIES_BY_CODE: entry['country'] = entry['country'].upper() else: entry['country'] = '' # determine type type_id = None for t in types: if entry['type'] in t.bibtex_type_list: type_id = t.id break if type_id is None: errors['bibliography'] = 'Type "{}" unknown.'.format(entry['type']) break # add publication publications.append(Publication( type_id=type_id, citekey=entry['key'], title=entry['title'], authors=authors, year=entry['year'], month=entry['month'], journal=entry['journal'], book_title=entry['booktitle'], publisher=entry['publisher'], location=entry['address'], country=entry['country'], editor=entry['editor'], edition=entry['edition'], institution=entry['institution'], school=entry['school'], organization=entry['organization'], series=entry['series'], volume=entry['volume'], number=entry['number'], chapter=entry['chapter'], section=entry['section'], note=entry['note'], url=entry['url'], doi=entry['doi'], isbn=entry['isbn'], external=False, abstract=entry['abstract'], tags=entry['tags'], status=Publication.EStatuses.PUBLISHED)) else: errors['bibliography'] = 'Make sure that the keys <title>, <author> and <year> are present.' break if not publications: errors['bibliography'] = 'No valid BibTex entries found.' if errors: # some error occurred return render( request, 'admin/publications_bootstrap/import_bibtex.html', { 'errors': errors, 'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request}) else: try: # save publications for publication in publications: publication.save() except: msg = 'Some error occurred during saving of publications.' else: if len(publications) > 1: msg = 'Successfully added {} publications.'.format(len(publications)) else: msg = 'Successfully added {} publication.'.format(len(publications)) # show message messages.info(request, msg) # redirect to publication listing return HttpResponseRedirect('../') else: return render(request, 'admin/publications_bootstrap/import_bibtex.html', {'title': 'Import BibTex', 'types': Type.objects.all(), 'request': request}) import_bibtex = staff_member_required(import_bibtex)
mit
-2,223,934,309,947,411,700
36.235897
112
0.428729
false
5.081176
false
false
false
hbeatty/dell-wsman-client-api-python
setup.py
1
1520
""" Setup file for egg builds @copyright: 2010-2012 @author: Joseph Tallieu <[email protected]> @author: Vijay Halaharvi <[email protected]> @organization: Dell Inc. - PG Validation @license: GNU LGLP v2.1 """ # This file is part of WSManAPI. # # WSManAPI is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 2.1 of the License, or # (at your option) any later version. # # WSManAPI is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with WSManAPI. If not, see <http://www.gnu.org/licenses/>. from setuptools import setup, find_packages # setup meta data and entry points setup( name='wsman', version="0.9.27", description="Web Services Management", author="Vijay Halaharvi, Joseph Tallieu", author_email="[email protected], [email protected]", license="Dell Software License", packages=find_packages(), package_data={'wsman':['transport/dummy/responses/winrm/*', 'transport/dummy/responses/wsmancli/*', 'loghandlers/templates/*']}, include_package_data=True )
lgpl-3.0
-7,334,710,254,634,723,000
36.073171
80
0.694079
false
3.486239
false
false
false
dchenaux/Yoda
yoda/flask_debugtoolbar_mongo/panel.py
1
2746
from flask_debugtoolbar.panels import DebugPanel import jinja2 from . import operation_tracker from . import jinja_filters class MongoDebugPanel(DebugPanel): """Panel that shows information about MongoDB operations. """ name = 'Mongo' has_content = True def __init__(self, *args, **kwargs): super(MongoDebugPanel, self).__init__(*args, **kwargs) self.jinja_env.loader = jinja2.ChoiceLoader([ self.jinja_env.loader, jinja2.PrefixLoader({ 'debug_tb_mongo': jinja2.PackageLoader(__name__, 'templates') }) ]) filters = ('format_stack_trace', 'embolden_file', 'format_dict', 'highlight', 'pluralize') for jfilter in filters: self.jinja_env.filters[jfilter] = getattr(jinja_filters, jfilter) operation_tracker.install_tracker() def process_request(self, request): operation_tracker.reset() def nav_title(self): return 'MongoDB' def nav_subtitle(self): fun = lambda x, y: (x, len(y), '%.2f' % sum(z['time'] for z in y)) ctx = {'operations': [], 'count': 0, 'time': 0} if operation_tracker.queries: ctx['operations'].append(fun('read', operation_tracker.queries)) ctx['count'] += len(operation_tracker.queries) ctx['time'] += sum(x['time'] for x in operation_tracker.queries) if operation_tracker.inserts: ctx['operations'].append(fun('insert', operation_tracker.inserts)) ctx['count'] += len(operation_tracker.inserts) ctx['time'] += sum(x['time'] for x in operation_tracker.inserts) if operation_tracker.updates: ctx['operations'].append(fun('update', operation_tracker.updates)) ctx['count'] += len(operation_tracker.updates) ctx['time'] += sum(x['time'] for x in operation_tracker.updates) if operation_tracker.removes: ctx['operations'].append(fun('delete', operation_tracker.removes)) ctx['count'] += len(operation_tracker.removes) ctx['time'] += sum(x['time'] for x in operation_tracker.removes) ctx['time'] = '%.2f' % ctx['time'] return self.render('debug_tb_mongo/mongo-panes-subtitle.html', ctx) def title(self): return 'MongoDB Operations' def url(self): return '' def content(self): context = self.context.copy() context['queries'] = operation_tracker.queries context['inserts'] = operation_tracker.inserts context['updates'] = operation_tracker.updates context['removes'] = operation_tracker.removes return self.render('debug_tb_mongo/mongo-panel.html', context)
bsd-3-clause
3,575,773,966,745,471,000
36.616438
78
0.60488
false
3.968208
false
false
false
goniz/plexmyxbmc
plexmyxbmc/__init__.py
1
1359
#!/usr/bin/python2 __version__ = "1.0.0" import plexapi from plexmyxbmc.config import get_config from plexmyxbmc.log import get_logger plexapi.X_PLEX_PROVIDES = 'player,controller,sync-target' plexapi.X_PLEX_PRODUCT = "PlexMyXBMC" plexapi.X_PLEX_VERSION = __version__ plexapi.X_PLEX_IDENTIFIER = get_config().get('uuid', 'randomuuid') plexapi.X_PLEX_PLATFORM_VERSION = plexapi.X_PLEX_PLATFORM + plexapi.X_PLEX_PLATFORM_VERSION plexapi.X_PLEX_PLATFORM = 'Generic' BASE_HEADERS = { 'X-Plex-Provides': plexapi.X_PLEX_PROVIDES, 'X-Plex-Product': plexapi.X_PLEX_PRODUCT, 'X-Plex-Version': plexapi.X_PLEX_VERSION, 'X-Plex-Client-Identifier': plexapi.X_PLEX_IDENTIFIER, 'X-Plex-Device-Name': get_config().get('name', 'randomname'), 'X-Plex-Platform': plexapi.X_PLEX_PLATFORM, 'X-Plex-Platform-Version': plexapi.X_PLEX_PLATFORM_VERSION, } plexapi.BASE_HEADERS.update(BASE_HEADERS) logger = get_logger('plexapi', _force=True) plexapi.log = logger def time_to_millis(time): return (time['hours']*3600 + time['minutes']*60 + time['seconds'])*1000 + time['milliseconds'] def millis_to_time(t): millis = int(t) seconds = millis / 1000 minutes = seconds / 60 hours = minutes / 60 seconds %= 60 minutes %= 60 millis %= 1000 return dict(hours=hours, minutes=minutes, seconds=seconds, milliseconds=millis)
gpl-2.0
2,932,022,274,638,289,400
32.975
98
0.701987
false
2.796296
false
false
false
ghorn/debian-casadi
experimental/joris/scp.py
1
2985
# # This file is part of CasADi. # # CasADi -- A symbolic framework for dynamic optimization. # Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl, # K.U. Leuven. All rights reserved. # Copyright (C) 2011-2014 Greg Horn # # CasADi is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # CasADi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with CasADi; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # from numpy import * from casadi import * DMatrix.setPrecision(16) solver = None def sdqp_sol(h=None,c=None,a=None,uba=None,f=None,g=None): global solver if solver is None: solver = SDPSdqpSolver(sdqpStruct(h=h.sparsity(),f=f.sparsity(),g=g.sparsity(),a=a.sparsity())) solver.setOption("sdp_solver",DSdpSolver) solver.setOption("sdp_solver_options",{"_printlevel": 0}) solver.init() solver.setInput(h,"h") solver.setInput(f,"f") solver.setInput(g,"g") solver.setInput(a,"a") solver.setInput(c,"c") solver.setInput(-Inf,"lba") solver.setInput(uba,"uba") solver.evaluate() return solver.output("x"),solver.output("lam_a"), solver.output("dual") x = ssym("x",2) f = (1-x[0])**2+100*(x[1]-x[0]**2)**2 nsd = blockcat([[-x[0],2],[2,-x[1]**2]]) # <=0 g = eig_symbolic(nsd) nlp = SXFunction(nlpIn(x=x),nlpOut(f=f,g=g)) nlp.init() # Find a refence solution with another ipopt = IpoptSolver(nlp) ipopt.init() ipopt.setInput(-Inf,"lbg") ipopt.setInput(0,"ubg") ipopt.solve() print "reference sol= ", ipopt.output("x") g = DMatrix(0,1) lambd = ssym("lambda",g.shape) Lambd = ssym("lambda",nsd.sparsity()) lag = f+mul(lambd.T,g)+trace(mul(Lambd,nsd)) oracle = SXFunction(customIO(x=x,lambd=lambd,Lambd=Lambd),customIO(f=f,g=g,nsd=nsd,hess=hessian(lag,x), gradF=gradient(f,x), jacG= jacobian(g,x),jac_nsd=jacobian(vec(nsd),x))) oracle.init() lambda_k = DMatrix([0]) Lambda_k = DMatrix([0]) x_k = DMatrix([2,3]) for i in range(25): print i, x_k oracle.setInput(x_k,"x") oracle.setInput(lambda_k,"lambd") oracle.setInput(Lambda_k,"Lambd") oracle.evaluate() step, lambda_k, Lambda_k = sdqp_sol(h=oracle.output("hess"),c=oracle.output("gradF"),a=oracle.output("jacG"),uba=-oracle.output("g"),f=vertcat([ oracle.output("jac_nsd")[:,i].reshape(oracle.output("nsd").shape) for i in range(x_k.size())]),g=-oracle.output("nsd")) x_k+= step print linalg.eig(oracle.output("nsd"))[0]
lgpl-3.0
-7,634,495,534,286,407,000
30.421053
266
0.670352
false
2.733516
false
false
false
qutebrowser/qutebrowser
scripts/dev/ua_fetch.py
1
2287
#!/usr/bin/env python3 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: """Fetch and print the most common user agents. This script fetches the most common user agents according to https://github.com/Kikobeats/top-user-agents, and prints the most recent Chrome user agent for Windows, macOS and Linux. """ import math import sys import textwrap import requests import qutebrowser.config.websettings def version(ua): """Comparable version of a user agent.""" return tuple(int(v) for v in ua.upstream_browser_version.split('.')[:2]) def wrap(ini, sub, string): return textwrap.wrap(string, width=80, initial_indent=ini, subsequent_indent=sub) response = requests.get('https://raw.githubusercontent.com/Kikobeats/top-user-agents/master/index.json') if response.status_code != 200: print('Unable to fetch the user agent index', file=sys.stderr) sys.exit(1) ua_checks = { 'Win10': lambda ua: ua.os_info.startswith('Windows NT'), 'macOS': lambda ua: ua.os_info.startswith('Macintosh'), 'Linux': lambda ua: ua.os_info.startswith('X11'), } ua_strings = {} ua_versions = {} ua_names = {} for ua_string in reversed(response.json()): # reversed to prefer more common versions # Filter out browsers that are not Chrome-based parts = ua_string.split() if not any(part.startswith("Chrome/") for part in parts): continue if any(part.startswith("OPR/") or part.startswith("Edg/") for part in parts): continue if 'Chrome/99.0.7113.93' in parts: # Fake or false-positive entry continue user_agent = qutebrowser.config.websettings.UserAgent.parse(ua_string) # check which os_string conditions are met and select the most recent version for key, check in ua_checks.items(): if check(user_agent): v = version(user_agent) if v >= ua_versions.get(key, (-math.inf,)): ua_versions[key] = v ua_strings[key] = ua_string ua_names[key] = f'Chrome {v[0]} {key}' for key, ua_string in ua_strings.items(): quoted_ua_string = f'"{ua_string}"' for line in wrap(" - - ", " ", quoted_ua_string): print(line) for line in wrap(" - ", " ", ua_names[key]): print(line)
gpl-3.0
1,706,314,109,449,327,400
30.328767
104
0.647136
false
3.475684
false
false
false
vauxoo-dev/stoqdrivers
stoqdrivers/printers/bematech/MP20.py
1
4982
# -*- Mode: Python; coding: iso-8859-1 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Stoqdrivers ## Copyright (C) 2009 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, ## USA. ## ## Author(s): Ronaldo Maia <[email protected]> ## """ Bematech MP20 driver The MP20 is compatible with the MP25 command set (actually its the other way around ;) until a certain command (85, I think). Commands above that are just not executed. There are some differences on the Registers numbering as well. Also, some commands have different parameter sizes. These are: CMD MP 20 MP 25 00 29 28+30+80 (abertura de cupom) 14 - 28+30+80 (cancelamento de cupom) 32 (inicia fechamento cupom) 73 Algumas diferencas no funcionamento. Ver manual. """ from kiwi.log import Logger from decimal import Decimal from stoqdrivers.printers.bematech.MP25 import (MP25, MP25Status, CMD_STATUS, CMD_COUPON_OPEN) log = Logger('stoqdrivers.bematech.MP20') CMD_ADD_ITEM_SIMPLE = 9 class MP20Registers(object): TOTAL = 3 TOTAL_CANCELATIONS = 4 TOTAL_DISCOUNT = 5 COO = 6 GNF = 7 NUMBER_REDUCTIONS_Z = 9 CRO = 10 LAST_ITEM_ID = 12 NUMBER_TILL = 14 FISCAL_FLAGS = 17 EMISSION_DATE = 23 TOTALIZERS = 29 PAYMENT_METHODS = 32 SERIAL = 0 FIRMWARE = 1 # (size, bcd) formats = { TOTAL: ('9s', True), TOTAL_CANCELATIONS: ('7s', True), TOTAL_DISCOUNT: ('7s', True), COO: ('3s', True), GNF: ('3s', True), NUMBER_REDUCTIONS_Z: ('2s', True), CRO: ('2s', True), LAST_ITEM_ID: ('2s', True), NUMBER_TILL: ('2s', True), FISCAL_FLAGS: ('1s', False), EMISSION_DATE: ('6s', False), TOTALIZERS: ('2s', False), # 1 + (52 * 16) + (52 * 10) + (52 * 10) + (52 * 1) # 1 + 832 + 520 + 520 + 52: 1925 PAYMENT_METHODS: ('b832s520s520s52s', False), SERIAL: ('15s', False), FIRMWARE: ('3s', True), } class MP20Status(MP25Status): def __init__(self, reply): self.st1, self.st2 = reply[-2:] self.st3 = 0 class MP20(MP25): model_name = "Bematech MP20 TH FI" CMD_PROTO = 0x1b registers = MP20Registers supports_duplicate_receipt = False reply_format = '<b%sbb' status_size = 2 # # MP25 implementation # def coupon_open(self): """ This needs to be called before anything else. """ self._send_command(CMD_COUPON_OPEN, "%-29s" % (self._customer_document)) def coupon_add_item(self, code, description, price, taxcode, quantity=Decimal("1.0"), unit=None, discount=Decimal("0.0"), markup=Decimal("0.0"), unit_desc=""): # We are using a simpler command for adding items with the MP20 # because its not working with the MP25 command (ESC 63). This # simpler command does not support markup and unit data = ( "%-13s" # code "%29s" # description "%02s" # taxcode "%07d" # quantity "%08d" # value "%08d" # discount ) % (code, description, taxcode, quantity * Decimal("1e3"), price * Decimal("1e2"), discount * Decimal("1e2")) self._send_command(CMD_ADD_ITEM_SIMPLE, data) return self._get_last_item_id() def get_status(self, val=None): if val is None: val = self._send_command(CMD_STATUS, raw=True) return MP20Status(val) def cancel_last_coupon(self): """Cancel the last non fiscal coupon or the last sale.""" #XXX MP20 does not support this self.coupon_cancel() def get_ccf(self): # MP20 does not support this. We should just return the coo # http://www.forumweb.com.br/foruns/lofiversion/index.php/t64417.html return self.get_coo() def status_reply_complete(self, reply): log.debug('status_reply_complete "%s" (size=%s)' % (reply, len(reply))) return len(reply) == 18
lgpl-2.1
2,580,845,285,714,813,400
30.732484
79
0.5833
false
3.459722
false
false
false
KDE/tellico
src/fetch/scripts/dark_horse_comics.py
1
14898
#!/usr/bin/env python # -*- coding: utf-8 -*- # *************************************************************************** # Copyright (C) 2006-2009 Mathias Monnerville <[email protected]> # *************************************************************************** # # *************************************************************************** # * * # * This program is free software; you can redistribute it and/or * # * modify it under the terms of the GNU General Public License as * # * published by the Free Software Foundation; either version 2 of * # * the License or (at your option) version 3 or any later version * # * accepted by the membership of KDE e.V. (or its successor approved * # * by the membership of KDE e.V.), which shall act as a proxy * # * defined in Section 14 of version 3 of the license. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see <http://www.gnu.org/licenses/>. * # * * # *************************************************************************** # $Id: comics_darkhorsecomics.py 123 2006-03-24 08:47:48Z mathias $ """ This script has to be used with tellico (http://periapsis.org/tellico) as an external data source program. It allows searching through the Dark Horse Comics web database. Related info and cover are fetched automatically. It takes only one argument (comic title). Tellico data source setup: - source name: Dark Horse Comics (US) (or whatever you want :) - Collection type: comics collection - Result type: tellico - Path: /path/to/script/comics_darkhorsecomics.py - Arguments: Title (checked) = %1 Update (checked) = %{title} """ import sys, os, re, hashlib, random, string import urllib, time, base64 import xml.dom.minidom try: # For Python 3.0 and later from urllib.request import urlopen except ImportError: # Fall back to Python 2's urllib2 from urllib2 import urlopen XML_HEADER = """<?xml version="1.0" encoding="UTF-8"?>""" DOCTYPE = """<!DOCTYPE tellico PUBLIC "-//Robby Stephenson/DTD Tellico V9.0//EN" "http://periapsis.org/tellico/dtd/v9/tellico.dtd">""" NULLSTRING = '' VERSION = "0.2" def genMD5(): """ Generates and returns a random md5 string. Its main purpose is to allow random image file name generation. """ float = random.random() return hashlib.md5(str(float)).hexdigest() class BasicTellicoDOM: """ This class manages tellico's XML data model (DOM) """ def __init__(self): self.__doc = xml.dom.minidom.Document() self.__root = self.__doc.createElement('tellico') self.__root.setAttribute('xmlns', 'http://periapsis.org/tellico/') self.__root.setAttribute('syntaxVersion', '9') self.__collection = self.__doc.createElement('collection') self.__collection.setAttribute('title', 'My Comics') self.__collection.setAttribute('type', '6') self.__fields = self.__doc.createElement('fields') # Add all default (standard) fields self.__dfltField = self.__doc.createElement('field') self.__dfltField.setAttribute('name', '_default') self.__fields.appendChild(self.__dfltField) self.__collection.appendChild(self.__fields) self.__images = self.__doc.createElement('images') self.__root.appendChild(self.__collection) self.__doc.appendChild(self.__root) # Current movie id. See entry's id attribute in self.addEntry() self.__currentId = 0 def addEntry(self, movieData): """ Add a comic entry. Returns an entry node instance """ d = movieData entryNode = self.__doc.createElement('entry') entryNode.setAttribute('id', str(self.__currentId)) titleNode = self.__doc.createElement('title') titleNode.appendChild(self.__doc.createTextNode(unicode(d['title'], 'latin-1').encode('utf-8'))) entryNode.appendChild(titleNode) yearNode = self.__doc.createElement('pub_year') yearNode.appendChild(self.__doc.createTextNode(d['pub_year'])) entryNode.appendChild(yearNode) countryNode = self.__doc.createElement('country') countryNode.appendChild(self.__doc.createTextNode(d['country'])) entryNode.appendChild(countryNode) pubNode = self.__doc.createElement('publisher') pubNode.appendChild(self.__doc.createTextNode(d['publisher'])) entryNode.appendChild(pubNode) langNode = self.__doc.createElement('language') langNode.appendChild(self.__doc.createTextNode(d['language'])) entryNode.appendChild(langNode) writersNode = self.__doc.createElement('writers') for g in d['writer']: writerNode = self.__doc.createElement('writer') writerNode.appendChild(self.__doc.createTextNode(unicode(g, 'latin-1').encode('utf-8'))) writersNode.appendChild(writerNode) entryNode.appendChild(writersNode) genresNode = self.__doc.createElement('genres') for g in d['genre']: genreNode = self.__doc.createElement('genre') genreNode.appendChild(self.__doc.createTextNode(unicode(g, 'latin-1').encode('utf-8'))) genresNode.appendChild(genreNode) entryNode.appendChild(genresNode) commentsNode = self.__doc.createElement('comments') #for g in d['comments']: # commentsNode.appendChild(self.__doc.createTextNode(unicode("%s\n\n" % g, 'latin-1').encode('utf-8'))) commentsData = string.join(d['comments'], '\n\n') commentsNode.appendChild(self.__doc.createTextNode(unicode(commentsData, 'latin-1').encode('utf-8'))) entryNode.appendChild(commentsNode) artistsNode = self.__doc.createElement('artists') for k, v in d['artist'].iteritems(): if v == 'various': continue artistNode = self.__doc.createElement('artist') artistNode.appendChild(self.__doc.createTextNode(unicode(v, 'latin-1').encode('utf-8'))) artistsNode.appendChild(artistNode) entryNode.appendChild(artistsNode) if 'pages' in d: pagesNode = self.__doc.createElement('pages') pagesNode.appendChild(self.__doc.createTextNode(d['pages'])) entryNode.appendChild(pagesNode) if 'isbn' in d: isbnNode = self.__doc.createElement('isbn') isbnNode.appendChild(self.__doc.createTextNode(d['isbn'])) entryNode.appendChild(isbnNode) if 'issue' in d: issueNode = self.__doc.createElement('issue') issueNode.appendChild(self.__doc.createTextNode(d['issue'])) entryNode.appendChild(issueNode) if 'image' in d: imageNode = self.__doc.createElement('image') imageNode.setAttribute('format', 'JPEG') imageNode.setAttribute('id', d['image'][0]) imageNode.appendChild(self.__doc.createTextNode(unicode(d['image'][1], 'latin-1').encode('utf-8'))) coverNode = self.__doc.createElement('cover') coverNode.appendChild(self.__doc.createTextNode(d['image'][0])) entryNode.appendChild(coverNode) if 'image' in d: entryNode.appendChild(coverNode) self.__images.appendChild(imageNode) self.__collection.appendChild(entryNode) self.__currentId += 1 return entryNode def printEntry(self, nEntry): """ Prints entry's XML content to stdout """ try: print(nEntry.toxml()) except: print(sys.stderr, "Error while outputting XML content from entry to Tellico") def printXMLTree(self): """ Outputs XML content to stdout """ self.__collection.appendChild(self.__images) print(XML_HEADER); print(DOCTYPE) print(self.__root.toxml()) class DarkHorseParser: def __init__(self): self.__baseURL = 'https://www.darkhorse.com' self.__basePath = '/Comics/' self.__searchURL = '/Search/%s' self.__coverPath = '/covers/' self.__movieURL = self.__baseURL + self.__basePath # Define some regexps self.__regExps = { 'title' : '<h2 class="title">(?P<title>.*?)</h2>', 'pub_date' : '<dt>Pub.* Date:</dt>.*?<dd>(?P<pub_date>.*?)</dd>', 'isbn' : '<dt>ISBN-10:</dt><dd>(?P<isbn>.*?)</dd>', 'desc' : '<div class="product-description">(?P<desc>.*?)</div>', 'writer' : '<dt>Writer: *</dt> *<dd><a.*?>(?P<writer>.*?)</a> *</dd>', 'cover_artist' : '<dt>Artist: *</dt> *<dd><a.*>(?P<cover_artist>.*?)</a> *</dd>', 'penciller' : '<dt>Penciller: *</dt> *<dd><a.*>(?P<penciller>.*?)</a> *</dd>', 'inker' : '<dt>Inker: *</dt> *<dd><a.*>(?P<inker>.*?)</a> *</dd>', 'letterer' : '<dt>Letterer: *</dt> *<dd><a.*>(?P<letterer>.*?)</a> *</dd>', 'colorist' : '<dt>Colorist: *</dt> *<dd><a.*>(?P<colorist>.*?)</a> *</dd>', 'genre' : '<strong>Genre: *</strong> *<a.*?>(?P<genre>.*?)</a> *</div>', 'format' : '<dt>Format: *</dt> *(?P<format>.*?)<dt>', } # Compile patterns objects self.__regExpsPO = {} for k, pattern in self.__regExps.iteritems(): self.__regExpsPO[k] = re.compile(pattern, re.DOTALL) self.__domTree = BasicTellicoDOM() def run(self, title): """ Runs the parser: fetch movie related links, then fills and prints the DOM tree to stdout (in tellico format) so that tellico can use it. """ self.__getMovie(title) # Print results to stdout self.__domTree.printXMLTree() def __getHTMLContent(self, url): """ Fetch HTML data from url """ u = urlopen(url) self.__data = u.read() u.close() def __fetchMovieLinks(self): """ Retrieve all links related to the search. self.__data contains HTML content fetched by self.__getHTMLContent() that need to be parsed. """ matchList = re.findall("""<a *href="%s(?P<page>.*?)" class="product_link">.*?</a>""" % self.__basePath.replace('?', '\?'), self.__data) if not matchList: return None return list(set(matchList)) def __fetchCover(self, path, delete = True): """ Fetch cover to /tmp. Returns base64 encoding of data. The image is deleted if delete is True """ md5 = genMD5() imObj = urlopen(path.strip()) img = imObj.read() imObj.close() imgPath = "/tmp/%s.jpeg" % md5 try: f = open(imgPath, 'w') f.write(img) f.close() except: print(sys.stderr, "Error: could not write image into /tmp") b64data = (md5 + '.jpeg', base64.encodestring(img)) # Delete temporary image if delete: try: os.remove(imgPath) except: print(sys.stderr, "Error: could not delete temporary image /tmp/%s.jpeg" % md5) return b64data def __fetchMovieInfo(self, url): """ Looks for movie information """ self.__getHTMLContent(url) # First grab picture data imgMatch = re.search("""<img src="(?P<imgpath>.*%s.*?)".*>""" % self.__coverPath, self.__data) if imgMatch: imgPath = "http:" + imgMatch.group('imgpath') # Fetch cover and gets its base64 encoded data b64img = self.__fetchCover(imgPath) else: b64img = None # Now isolate data between <div class="bodytext">...</div> elements # re.DOTALL makes the "." special character match any character at all, including a newline m = re.search("""<div id="inner_content">(?P<part>.*)<div id="right_bar">""", self.__data, re.DOTALL) try: self.__data = m.group('part') except AttributeError: self.__data = "" matches = {} data = {} data['comments'] = [] data['artist'] = {} # Default values data['publisher'] = 'Dark Horse Comics' data['language'] = 'English' data['country'] = 'USA' if b64img is not None: data['image'] = b64img data['pub_year'] = NULLSTRING for name, po in self.__regExpsPO.iteritems(): data[name] = NULLSTRING if name == 'desc': matches[name] = re.findall(self.__regExps[name], self.__data, re.S | re.I) else: matches[name] = po.search(self.__data) if matches[name]: if name == 'title': title = matches[name].group('title').strip() data[name] = title # Look for issue information m = re.search("#(?P<issue>[0-9]+)", title) if m: data['issue'] = m.group('issue') else: data['issue'] = '' elif name == 'pub_date': pub_date = matches[name].group('pub_date').strip() data['pub_year'] = pub_date[-4:] # Add this to comments field data['comments'].insert(0, "Pub. Date: %s" % pub_date) elif name == 'isbn': isbn = matches[name].group('isbn').strip() data[name] = isbn elif name == 'desc': # Find biggest size max = 0 for i in range(len(matches[name])): if len(matches[name][i]) > len(matches[name][max]): max = i data['comments'].append(matches[name][max].strip()) elif name == 'writer': # We may find several writers data[name] = [] writersList = re.sub('</?a.*?>', '', matches[name].group('writer')).split(',') for d in writersList: data[name].append(d.strip()) elif name == 'cover_artist': data['artist']['Cover Artist'] = matches[name].group('cover_artist').strip() elif name == 'penciller': data['artist']['Penciller'] = matches[name].group('penciller').strip() elif name == 'inker': data['artist']['Inker'] = matches[name].group('inker').strip() elif name == 'colorist': data['artist']['Colorist'] = matches[name].group('colorist').strip() elif name == 'letterer': data['artist']['Letterer'] = matches[name].group('letterer').strip() elif name == 'genre': # We may find several genres data[name] = [] genresList = re.sub('</?a.*?>', '', matches[name].group('genre')).split(',') for d in genresList: data[name].append(d.strip()) elif name == 'format': format = matches[name].group('format').strip() data['comments'].insert(1, format) m = re.search("(?P<pages>[0-9]+)", format) if m: data['pages'] = m.group('pages') else: data['pages'] = '' return data def __getMovie(self, title): if not len(title): return self.__title = title self.__getHTMLContent("%s%s" % (self.__baseURL, self.__searchURL % urllib.quote(self.__title))) # Get all links links = self.__fetchMovieLinks() # Now retrieve info if links: for entry in links: data = self.__fetchMovieInfo( url = self.__movieURL + entry ) # Add DC link (custom field) data['darkhorse'] = "%s%s" % (self.__movieURL, entry) node = self.__domTree.addEntry(data) # Print entries on-the-fly #self.__domTree.printEntry(node) else: return None def halt(): print("HALT.") sys.exit(0) def showUsage(): print("Usage: %s comic" % sys.argv[0]) sys.exit(1) def main(): if len(sys.argv) < 2: showUsage() parser = DarkHorseParser() parser.run(sys.argv[1]) if __name__ == '__main__': main()
gpl-2.0
-8,448,121,832,135,674,000
32.254464
137
0.6127
false
3.217711
false
false
false
jonaprieto/agda-pkg
apkg/commands/list.py
1
3039
''' apkg ~~~~ A package manager for Agda. ''' # ---------------------------------------------------------------------------- import click import logging import click_log as clog from operator import attrgetter, itemgetter from pony.orm import db_session, select from natsort import natsorted from ..service.database import db from ..service.database import ( Library , LibraryVersion ) from ..service.logging import logger, clog # ---------------------------------------------------------------------------- # -- Command def. @click.group() def list(): pass listFields = ["name", "version", "url"] @list.command() @clog.simple_verbosity_option(logger) @click.option('--full' , type=bool , is_flag=True , help='Show name, version and description per package.' ) @click.option('--field' , type=str , default="" , help='Show a specific field e.g.: name, version, url') @db_session def list(full, field): """List all installed packages.""" short = not full libraries = select(l for l in Library if l)[:] libraries = natsorted(libraries, key=lambda x : attrgetter('name')(x).lower()) if len(libraries) == 0: logger.info("[!] No libraries available to list.") logger.info(" Consider run the following command:") logger.info(" $ apkg init") return orderFields = [ #, "library" #, "sha" "description" # , "license" # , "include" # , "depend" # , "testedWith" , "keywords" # , "installed" # , "cached" # , "fromIndex" # , "fromUrl" # , "fromGit" , "origin" # , "default" ] i = 0 if short and field == "": logger.info("{:<20.20} {:<15.20} {:.72}" .format("Library name", "Latest version", "URL")) logger.info("-"*105) for library in libraries: v = library.getLatestVersion() if v is not None: if not short: logger.info(v.library.name) logger.info("="*len(v.library.name)) info = v.info for k in orderFields: val = info.get(k, None) if val is not None or val != "" or len(val) > 0: click.echo("{0}: {1}".format(k,val)) vs = ','.join(str(ver) for ver in v.library.versions) if len(vs) > 0: print("Versions:", vs) else: if field in listFields: if field == "name": print(v.library.name) elif field == "version": print(v.name) else: print(v.library.url) else: print("{:<20.20} {:<15.20} {:.72}" .format(v.library.name,v.name,v.library.url)) i += 1 if not short and i < len(libraries): logger.info("")
mit
-6,497,745,852,845,697,000
24.546218
80
0.471866
false
4.025166
false
false
false
raccoongang/xblock-video
video_xblock/backends/wistia.py
1
11501
# -*- coding: utf-8 -*- """ Wistia Video player plugin. """ import HTMLParser import json import httplib import logging import re import requests import babelfish from video_xblock import BaseVideoPlayer from video_xblock.constants import TranscriptSource from video_xblock.utils import ugettext as _ log = logging.getLogger(__name__) class WistiaPlayer(BaseVideoPlayer): """ WistiaPlayer is used for videos hosted on the Wistia Video Cloud. """ # From official Wistia documentation. May change in the future # https://wistia.com/doc/construct-an-embed-code#the_regex url_re = re.compile( r'https?:\/\/(.+)?(wistia.com|wi.st)\/(medias|embed)\/(?P<media_id>.*)' ) # Token field is stored in metadata only if authentication was successful metadata_fields = ['token', ] # Current api (v1) for requesting transcripts. # For example: https://api.wistia.com/v1/medias/jzmku8z83i/captions.json # Docs on captions: https://wistia.com/doc/data-api#captions # Docs on auth: https://wistia.com/doc/data-api#authentication, https://wistia.com/doc/oauth2 captions_api = { # To check on authentication status; reference: https://wistia.com/doc/data-api#authentication 'auth_sample_url': 'api.wistia.com/v1/medias.json?api_password={token}', # To fetch a specific transcript; reference: https://wistia.com/doc/data-api#captions_show 'download_url': 'http://api.wistia.com/v1/medias/{media_id}/captions/' '{lang_code}.json?api_password={token}', # To get list of captions; reference: https://wistia.com/doc/data-api#captions_index 'url': 'api.wistia.com/v1/medias/{media_id}/captions.json?api_password={token}', 'response': { 'language_code': 'language', 'language_label': 'english_name', 'subs': 'text' } } # Stores default transcripts fetched from the captions API default_transcripts = [] fields_help = { 'token': 'You can get a master token following the guide of ' '<a href="https://wistia.com/doc/data-api" target="_blank">Wistia</a>. ' 'Please ensure appropriate operations scope has been set on the video platform.' } @property def advanced_fields(self): """ Tuple of VideoXBlock fields to display in Basic tab of edit modal window. Brightcove videos require Brightcove Account id. """ return super(WistiaPlayer, self).advanced_fields @property def trans_fields(self): """ List of VideoXBlock fields to display on `Manual & default transcripts` panel. """ fields_list = super(WistiaPlayer, self).trans_fields # Add `token` after `default_transcripts` fields_list.append('token') return fields_list def media_id(self, href): """ Extract Platform's media id from the video url. E.g. https://example.wistia.com/medias/12345abcde -> 12345abcde """ return self.url_re.search(href).group('media_id') def get_frag(self, **context): """ Compose an XBlock fragment with video player to be rendered in student view. Extend general player fragment with Wistia specific context and JavaScript. """ context['data_setup'] = json.dumps(WistiaPlayer.player_data_setup(context)) frag = super(WistiaPlayer, self).get_frag(**context) frag.add_content( self.render_resource('static/html/wistiavideo.html', **context) ) js_files = [ 'static/vendor/js/vjs.wistia.js', 'static/vendor/js/videojs-offset.min.js', 'static/js/videojs/player-context-menu.js' ] for js_file in js_files: frag.add_javascript(self.resource_string(js_file)) return frag @staticmethod def player_data_setup(context): """ Wistia Player data setup. """ result = BaseVideoPlayer.player_data_setup(context) result.update({ "techOrder": ["wistia"], "sources": [{ "type": "video/wistia", "src": context['url'] + "?controlsVisibleOnLoad=false" }], }) return result def authenticate_api(self, **kwargs): """ Call a sample Wistia API url to check on authentication success. Reference: https://wistia.com/doc/data-api#authentication Arguments: kwargs (dict): Wistia master token key-value pair. Returns: auth_data (dict): Master token, provided by a user, which is to be stored in Wistia's player metadata. error_status_message (str): Message with authentication outcomes for the sake of verbosity. """ token, media_id = kwargs.get('token'), kwargs.get('video_id') # pylint: disable=unused-variable auth_data, error_message = {}, '' auth_data['token'] = token url = self.captions_api.get('auth_sample_url').format(token=str(token)) response = requests.get('https://' + url) if response.status_code == httplib.UNAUTHORIZED: error_message = "Authentication failed. " \ "Please ensure you have provided a valid master token, using Video API Token field." return auth_data, error_message def get_default_transcripts(self, **kwargs): """ Fetch transcripts list from Wistia API. Urls of transcripts are to be fetched later on with separate API calls. References: https://wistia.com/doc/data-api#captions_index https://wistia.com/doc/data-api#captions_show Arguments: kwargs (dict): Key-value pairs with video_id, fetched from video xblock, and token, fetched from Wistia API. Returns: list: List of dicts of transcripts. Example: [ { 'lang': 'en', 'label': 'English', 'url': 'default_url_to_be_replaced', 'source': 'default' }, # ... ] """ video_id = kwargs.get('video_id') token = kwargs.get('token') url = self.captions_api['url'].format(token=token, media_id=video_id) message = _('Success.') self.default_transcripts = [] # Fetch available transcripts' languages (codes and English labels), and assign its' urls. try: # get all languages caps data: response = requests.get('https://{}'.format(url)) except IOError as exc: # Probably, current API has changed message = _('No timed transcript may be fetched from a video platform.\nError details: {}').format( exc.message ) log.exception("Transcripts INDEX request failure.") return self.default_transcripts, message # If a video does not exist, the response will be an empty HTTP 404 Not Found. # Reference: https://wistia.com/doc/data-api#captions_index if response.status_code == httplib.NOT_FOUND: message = _("Wistia video {} doesn't exist.").format(video_id) return self.default_transcripts, message # Fetch other failure cases: if not response.ok: message = _("Invalid request.") return self.default_transcripts, message try: wistia_data = response.json() except ValueError: wistia_data = '' # No transcripts case, see: wistia.com/doc/data-api#captions_index if not wistia_data: message = _("For now, video platform doesn't have any timed transcript for this video.") return self.default_transcripts, message transcripts_data = [ [el.get('language'), el.get('english_name')] for el in wistia_data ] # Populate default_transcripts for lang_code, lang_label in transcripts_data: download_url = self.captions_api['download_url'].format( media_id=video_id, lang_code=lang_code, token=token ) # Wistia's API uses ISO-639-2, so "lang_code" is a 3-character code, e.g. "eng". # Reference: https://wistia.com/doc/data-api#captions_show # Convert from ISO-639-2 to ISO-639-1; reference: https://pythonhosted.org/babelfish/ try: lang_code = babelfish.Language(lang_code).alpha2 except ValueError: # In case of B or T codes, e.g. 'fre'. # Reference: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes lang_code = babelfish.Language.fromalpha3b(lang_code).alpha2 # pylint: disable=no-member lang_label = self.get_transcript_language_parameters(lang_code)[1] self.default_transcripts.append({ 'lang': lang_code, 'label': lang_label, 'url': download_url, 'source': TranscriptSource.DEFAULT, }) return self.default_transcripts, message @staticmethod def format_transcript_text_line(line): """ Replace comma with dot in timings, e.g. 00:00:10,500 should be 00:00:10.500. """ new_line = u"" for token in line.split(): decoded_token = token.encode('utf8', 'ignore') formatted_token = re.sub(r'(\d{2}:\d{2}:\d{2}),(\d{3})', r'\1.\2', decoded_token) new_line += unicode(formatted_token.decode('utf8')) + u" " return new_line def format_transcript_text(self, text): """ Prepare unicode transcripts to be converted to WebVTT format. """ new_text = [ self.format_transcript_text_line(line) for line in text[0].splitlines() ] new_text = '\n'.join(new_text) html_parser = HTMLParser.HTMLParser() unescaped_text = html_parser.unescape(new_text) if u"WEBVTT" not in text: text = u"WEBVTT\n\n" + unicode(unescaped_text) else: text = unicode(unescaped_text) return text def download_default_transcript(self, url, language_code): """ Get default transcript fetched from a video platform API and format it to WebVTT-like unicode. References: https://wistia.com/doc/data-api#captions_index https://wistia.com/doc/data-api#captions_show Arguments: url (str): API url to fetch a default transcript from. language_code (str): Language ISO-639-2 code of a default transcript to be downloaded. Returns: text (unicode): Text of transcripts. """ try: response = requests.get(url) json_data = response.json() return json_data[u'text'] except IOError: log.exception("Transcript fetching failure: language [{}]".format(language_code)) return u'' except (ValueError, KeyError, TypeError, AttributeError): log.exception("Can't parse fetched transcript: language [{}]".format(language_code)) return u'' def dispatch(self, request, suffix): """ Wistia dispatch method. """ pass
gpl-3.0
-7,618,465,501,326,326,000
36.584967
120
0.589166
false
4.003133
false
false
false
tbttfox/TwistyTools
ttLib/ttRegion.py
1
6597
#!/usr/bin/python from __future__ import division from ttPoint import TINYNUM,Point,cross,matMul,angle,distance2,cart2uv from ttPlane import pointDistPlane from ttTriangle import Triangle from ttSphere import Sphere from ttBase import TTBase from Draw.DrawRegion import DrawRegion class Region(TTBase): def __init__(self,arcList,pnList): """ Regions are made in the Graph class >> Region(arcList,pnList,graph) Where: arcList is an ordered list of arcs pnList is an ordered list of arc "directions" (0 means the arc is convex, 1 means the arc is concave) baseSide is the list of circles that overlap the current region """ super(Region,self).__init__() self.enum = TTBase.enumType.Region #these will be reordered self.arcs = None self.posNeg = None self.corners = None self._center = None self.allNeg = False if sum(pnList) == len(pnList): self.allNeg = True #here we order the arcs/posNeg/corners #if there's only two arcs, everything is already in order if len(arcList) > 2: arcs = list(arcList[:]) pn = list(pnList[:]) if self.allNeg: idx = 0 corn = [arcs[idx].end, arcs[idx].start] else: #we have at least one positive arc idx = pn.index(0) #grab the first positive arc corn = [arcs[idx].start, arcs[idx].end] pnOrd = [pn.pop(idx)] #PosNeg Ordered arcsOrd = [arcs.pop(idx)] #Arcs Ordered #print "arcStart ",arcsOrd[0].start #print "arcEnd ",arcsOrd[0].end #loop through the list to find if anything begins/ends with the last item on the list #while corn[0] != corn[-1]: #go 'till last arc connects to the first for _ in range(len(pn)): # looping the variable "_" just means don't use found = 0 for i in range(len(arcs)): if arcs[i].start == corn[-1]: corn.append(arcs[i].end) arcsOrd.append(arcs.pop(i)) pnOrd.append(pn.pop(i)) found = 1 break elif arcs[i].end == corn[-1]: corn.append(arcs[i].start) arcsOrd.append(arcs.pop(i)) pnOrd.append(pn.pop(i)) found = 1 break if found == 1: continue else: print "problem finding a ccycle in region.__init__" self.posNeg = pnOrd self.corners = corn[:-1] self.arcs = arcsOrd self.parents = [] self.setParents() @property def center(self): if self._center == None: if len(self.corners) > 0: vecChain = Point(0,0,0) for p in self.corners: vecChain = vecChain + p #everything is done on the unit sphere self._center = vecChain.n return self._center def __eq__(self,other): if not isinstance(other,Region): return False if len(self.arcs) != len(other.arcs): return False for i in range(len(self.arcs)): if self.arcs == other.arcs[i:] + other.arcs[:i]: return True return False def fanTriangle(self,offset=0): corn = self.corners #already sorted if len(corn) < 3: print "Trying to make a triangle out of < 3 corners" return None corn = corn[offset:] + corn[:offset] tris = [] for i in range(2,len(corn)): tris.append(Triangle(corn[0], corn[i-1], corn[i])) return tris def similar(self,other): if not isinstance(other,Region): return False if len(self.arcs) != len(other.arcs): return False if len(self.arcs) == 2: myd = distance2(self.arcs[0].start, self.arcs[0].end) yourd = distance2(other.arcs[0].start, other.arcs[0].end) if myd != yourd: return False myx = distance2(self.arcs[1].c, self.arcs[0].c) yourx = distance2(other.arcs[1].c, other.arcs[0].c) if myx != yourx: return False myrads = sorted([self.arcs[0].rad, self.arcs[1].rad]) yourrads = sorted([other.arcs[0].rad, other.arcs[1].rad]) if -TINYNUM < myrads[0] - yourrads[0] < TINYNUM: if -TINYNUM < myrads[1] - yourrads[1] < TINYNUM: return True return False myTris = self.fanTriangle() for i in range(len(self.arcs)): yourTris = other.fanTriangle(i) if myTris == yourTris: return True return False def contains(self,pt): for i,a in enumerate(self.arcs): d = pointDistPlane(pt,a.circle.toPlane()) if self.posNeg[i] == 0 and d < -TINYNUM: return False elif self.posNeg[i] == 1 and d > TINYNUM: return False return True @property def drawObject(self): if self._drawObject == None: self._drawObject = DrawRegion(self) return self._drawObject def regionCoreRegion(A,B): ##TODO## # This function relies on a graph object # pull this functionality out of the new combined shell object # so we can use it here without duplicating code ##TODO## allCircles = [x.circle for x in A.arcs] + [x.circle for x in B.arcs] allPN = A.posNeg + B.posNeg tempGraph = Graph(allCircles) keep = [] ccls = [] mypn = [] #for all the arcs in the overlapping regions for arc in tempGraph.arcs: mp = arc.midpoint if A.contains(mp) and B.contains(mp): try: idx = allCircles.index(arc.circle) except AttributeError: continue except: raise keep.append(arc) #if the circle is positive in the region #it'll be positive in the core if allPn[idx] == 0: mypn.append(0) ccls.append(arc.circle) else: mypn.append(1) return Region(keep,mypn,tempGraph)
gpl-3.0
-1,122,696,833,930,972,200
32.658163
97
0.51766
false
3.908175
false
false
false
Fat-Zer/FreeCAD_sf_master
src/Mod/Fem/femexamples/thermomech_spine.py
9
6411
# *************************************************************************** # * Copyright (c) 2019 Bernd Hahnebach <[email protected]> * # * Copyright (c) 2020 Sudhanshu Dubey <[email protected] * # * * # * This file is part of the FreeCAD CAx development system. * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** # to run the example use: """ from femexamples.thermomech_spine import setup setup() """ import FreeCAD import Fem import ObjectsFem mesh_name = "Mesh" # needs to be Mesh to work with unit tests def init_doc(doc=None): if doc is None: doc = FreeCAD.newDocument() return doc def get_information(): info = {"name": "Thermomech Spine", "meshtype": "solid", "meshelement": "Tet10", "constraints": ["fixed", "initial temperature", "temperature", "heatflux"], "solvers": ["calculix"], "material": "solid", "equation": "thermomechanical" } return info def setup(doc=None, solvertype="ccxtools"): # setup model if doc is None: doc = init_doc() # geometry object geom_obj = doc.addObject("Part::Box", "Box") geom_obj.Height = 25.4 geom_obj.Width = 25.4 geom_obj.Length = 203.2 doc.recompute() if FreeCAD.GuiUp: geom_obj.ViewObject.Document.activeView().viewAxonometric() geom_obj.ViewObject.Document.activeView().fitAll() # analysis analysis = ObjectsFem.makeAnalysis(doc, "Analysis") # solver if solvertype == "calculix": solver_object = analysis.addObject( ObjectsFem.makeSolverCalculix(doc, "SolverCalculiX") )[0] elif solvertype == "ccxtools": solver_object = analysis.addObject( ObjectsFem.makeSolverCalculixCcxTools(doc, "CalculiXccxTools") )[0] solver_object.WorkingDir = u"" # should be possible with elmer too # elif solvertype == "elmer": # analysis.addObject(ObjectsFem.makeSolverElmer(doc, "SolverElmer")) else: FreeCAD.Console.PrintWarning( "Not known or not supported solver type: {}. " "No solver object was created.\n".format(solvertype) ) if solvertype == "calculix" or solvertype == "ccxtools": solver_object.SplitInputWriter = False solver_object.AnalysisType = "thermomech" solver_object.GeometricalNonlinearity = "linear" solver_object.ThermoMechSteadyState = True solver_object.MatrixSolverType = "default" solver_object.IterationsThermoMechMaximum = 2000 solver_object.IterationsControlParameterTimeUse = True # material material_object = analysis.addObject( ObjectsFem.makeMaterialSolid(doc, "MechanicalMaterial") )[0] mat = material_object.Material mat["Name"] = "Steel-Generic" mat["YoungsModulus"] = "200000 MPa" mat["PoissonRatio"] = "0.30" mat["Density"] = "7900 kg/m^3" mat["ThermalConductivity"] = "43.27 W/m/K" # SvdW: Change to Ansys model values mat["ThermalExpansionCoefficient"] = "12 um/m/K" mat["SpecificHeat"] = "500 J/kg/K" # SvdW: Change to Ansys model values material_object.Material = mat # fixed_constraint fixed_constraint = analysis.addObject( ObjectsFem.makeConstraintFixed(doc, "FemConstraintFixed") )[0] fixed_constraint.References = [(geom_obj, "Face1")] # initialtemperature_constraint initialtemperature_constraint = analysis.addObject( ObjectsFem.makeConstraintInitialTemperature(doc, "FemConstraintInitialTemperature") )[0] initialtemperature_constraint.initialTemperature = 300.0 # temperature_constraint temperature_constraint = analysis.addObject( ObjectsFem.makeConstraintTemperature(doc, "FemConstraintTemperature") )[0] temperature_constraint.References = [(geom_obj, "Face1")] temperature_constraint.Temperature = 310.93 # heatflux_constraint heatflux_constraint = analysis.addObject( ObjectsFem.makeConstraintHeatflux(doc, "FemConstraintHeatflux") )[0] heatflux_constraint.References = [ (geom_obj, "Face3"), (geom_obj, "Face4"), (geom_obj, "Face5"), (geom_obj, "Face6") ] heatflux_constraint.AmbientTemp = 255.3722 heatflux_constraint.FilmCoef = 5.678 # mesh from .meshes.mesh_thermomech_spine_tetra10 import create_nodes, create_elements fem_mesh = Fem.FemMesh() control = create_nodes(fem_mesh) if not control: FreeCAD.Console.PrintError("Error on creating nodes.\n") control = create_elements(fem_mesh) if not control: FreeCAD.Console.PrintError("Error on creating elements.\n") femmesh_obj = analysis.addObject( ObjectsFem.makeMeshGmsh(doc, mesh_name) )[0] femmesh_obj.FemMesh = fem_mesh femmesh_obj.Part = geom_obj femmesh_obj.SecondOrderLinear = False doc.recompute() return doc
lgpl-2.1
5,691,031,743,129,874,000
37.160714
91
0.58696
false
3.921101
false
false
false
roscopecoltran/scraper
.staging/meta-engines/xlinkBook/update/update_mergers_and_acquisitions.py
1
10303
#!/usr/bin/env python # -*- coding: utf-8 -*- from spider import * sys.path.append("..") from utils import Utils class MergersAndAcquisitionsSpider(Spider): def __init__(self): Spider.__init__(self) self.school = 'wiki' def processMergers(self): utils = Utils() wiki_dict = {'google' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Google',\ 'facebook' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Facebook',\ 'microsoft' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Microsoft',\ 'apple' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Apple',\ 'ibm' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_IBM',\ 'yahoo' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Yahoo!',\ 'twitter' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Twitter'} wiki_dict = {'google' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Google'} for key, url in wiki_dict.items(): r = requests.get(url) soup = BeautifulSoup(r.text) table = soup.find('table', class_='wikitable') #print table soup = BeautifulSoup(table.prettify()) count = 0 title = '' desc = 'description:' file_name = self.get_file_name('economics/mergers-and-acquisitions/' + key, self.school) file_lines = self.countFileLineNum(file_name) f = self.open_db(file_name + ".tmp") self.count = 0 item_id = key + '-merger-' rows = soup.find_all('tr') print len(rows) for td in soup.find_all('td'): count += 1 if key == 'google': #if count > 8 or (count == 8 and self.count == len(rows) - 2): if count == 7: print title count = 0 self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = '' print '----------------------------------' if count != 7: #if count == 3: # desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' if count == 1: desc += 'date:' + td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 2: title = utils.removeDoubleSpace(td.text.strip()) elif count == 3: desc += 'business:' + td.text.strip().replace(' and ', ', ') + ' ' elif count == 4: desc += 'country:' + td.text.strip() + ' ' elif count == 5: desc += 'price:$' + td.text.strip()[td.text.strip().find('♠') + 1 :].strip() + ' ' elif count == 6: desc += 'description:' + td.text.strip() + ' ' if key == 'facebook': if count > 10 or (count == 10 and self.count == len(rows) - 2): count = 1 print title self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = 'description:' print '----------------------------------' if count != 1 and count != 9 and count != 10: if count == 2: desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 3: title = utils.removeDoubleSpace(td.text.strip()) elif count == 5 and td.a != None: desc += td.a.text.strip() + ' ' else: desc += td.text.strip() + ' ' if key == 'microsoft': if count > 7 or (count == 7 and self.count == len(rows) - 2): count = 1 print title self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = 'description:' print '----------------------------------' if count != 1 and count != 7: if count == 2: desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 3: title = utils.removeDoubleSpace(td.text.strip()) else: desc += td.text.strip() + ' ' if key == 'apple': if count > 8 or (count == 8 and self.count == len(rows) - 2): print title count = 1 self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = 'description:' print '----------------------------------' if count != 1 and count != 7 and count != 8: if count == 2: desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 3: title = utils.removeDoubleSpace(td.text.strip()) else: desc += td.text.strip() + ' ' if key == 'ibm': if count > 6 or (count == 6 and self.count == len(rows) - 2): print title count = 1 self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = 'description:' print '----------------------------------' if count != 6: if count == 1: desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 2: title = utils.removeDoubleSpace(td.text.strip()) else: desc += td.text.strip().replace('\n','') + ' ' if key == 'yahoo': if count > 8 or (count == 8 and self.count == len(rows) - 2): count = 1 print title self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = 'description:' print '----------------------------------' if count != 1 and count != 8: if count == 2: desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 3: title = utils.removeDoubleSpace(td.text.strip()) else: desc += td.text.strip() + ' ' if key == 'twitter': if count > 8 or (count == 8 and self.count == len(rows) - 2): count = 1 print title self.count += 1 self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc)) title = '' desc = 'description:' print '----------------------------------' if count != 1 and count != 8: if count == 2: desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' ' elif count == 3: title = utils.removeDoubleSpace(td.text.strip()) else: desc += td.text.strip() + ' ' self.close_db(f) #if file_lines != self.count and self.count > 0: if True: self.do_upgrade_db(file_name) print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n" else: self.cancel_upgrade(file_name) print "no need upgrade\n" def processAll(self): file_name = self.get_file_name('economics/mergers-and-acquisitions/all', self.school) file_lines = self.countFileLineNum(file_name) f = self.open_db(file_name + ".tmp") self.count = 0 r = requests.get('https://en.wikipedia.org/wiki/Category:Lists_of_corporate_acquisitions') soup = BeautifulSoup(r.text) div = soup.find('div', class_='mw-category') soup = BeautifulSoup(div.prettify()) for a in soup.find_all('a'): print a.text.strip() self.count += 1 self.write_db(f, 'all-mergers-' + str(self.count), a.text.strip(), 'https://en.wikipedia.org' + a['href']) self.close_db(f) if file_lines != self.count and self.count > 0: self.do_upgrade_db(file_name) print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n" else: self.cancel_upgrade(file_name) print "no need upgrade\n" def doWork(self): self.processAll() self.processMergers() start = MergersAndAcquisitionsSpider() start.doWork()
mit
4,579,076,035,847,220,000
49.743842
118
0.404038
false
4.443917
false
false
false
RobinQuetin/CAIRIS-web
cairis/cairis/AssetsDialog.py
1
2963
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx import armid import Asset from AssetDialog import AssetDialog from DialogClassParameters import DialogClassParameters import ARM import os import xml.sax from DimensionBaseDialog import DimensionBaseDialog class AssetsDialog(DimensionBaseDialog): def __init__(self,parent): DimensionBaseDialog.__init__(self,parent,armid.ASSETS_ID,'Assets',(930,300),'asset.png') self.rmFrame = parent idList = [armid.ASSETS_ASSETLIST_ID,armid.ASSETS_BUTTONADD_ID,armid.ASSETS_BUTTONDELETE_ID] columnList = ['Name','Type'] self.buildControls(idList,columnList,self.dbProxy.getAssets,'asset') listCtrl = self.FindWindowById(armid.ASSETS_ASSETLIST_ID) listCtrl.SetColumnWidth(0,200) listCtrl.SetColumnWidth(1,200) def addObjectRow(self,assetListCtrl,listRow,asset): assetListCtrl.InsertStringItem(listRow,asset.name()) assetListCtrl.SetStringItem(listRow,1,asset.type()) def onAdd(self,evt): try: addParameters = DialogClassParameters(armid.ASSET_ID,'Add asset',AssetDialog,armid.ASSET_BUTTONCOMMIT_ID,self.dbProxy.addAsset,True) self.addObject(addParameters) self.rmFrame.updateObjectSelection(self.selectedLabel) except ARM.ARMException,errorText: dlg = wx.MessageDialog(self,str(errorText),'Add asset',wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() return def onUpdate(self,evt): selectedObjt = self.objts[self.selectedLabel] assetId = selectedObjt.id() try: updateParameters = DialogClassParameters(armid.ASSET_ID,'Edit asset',AssetDialog,armid.ASSET_BUTTONCOMMIT_ID,self.dbProxy.updateAsset,False) self.updateObject(selectedObjt,updateParameters) except ARM.ARMException,errorText: dlg = wx.MessageDialog(self,str(errorText),'Edit asset',wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy def onDelete(self,evt): try: self.deleteObject('No asset','Delete asset',self.dbProxy.deleteAsset) self.rmFrame.updateObjectSelection() except ARM.ARMException,errorText: dlg = wx.MessageDialog(self,str(errorText),'Delete asset',wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy
apache-2.0
2,933,360,615,195,132,400
39.589041
146
0.746541
false
3.552758
false
false
false
lhirschfeld/JargonBot
custombot.py
1
3061
import pickle import praw import random from textblob import TextBlob from datetime import datetime from sklearn import linear_model class RedditBot: """A class that performs basic operations, working with Reddit's PRAW API.""" def __init__(self, botName): # Setup the bot and primary variables. self.r = praw.Reddit(botName) self.responses = [] with open('ids.pickle', 'rb') as handle: try: self.ids = pickle.load(handle) except EOFError: self.ids = [] with open('models.pickle', 'rb') as handle: try: self.models = pickle.load(handle) except EOFError: self.models = {} def updateIds(self): # Save the new ids of comments that have been responded to. with open('ids.pickle', 'wb') as handle: pickle.dump(self.ids, handle, protocol=pickle.HIGHEST_PROTOCOL) def createModel(self, sub, init_fit): new_model = linear_model.LinearRegression() new_model.fit(init_fit[0], init_fit[1]) # TODO: Create sub class that stores this data. self.models[sub] = (new_model, 1, init_fit[0], init_fit[1]) with open('models.pickle', 'wb') as handle: pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL) def updateModels(self, modelParams): # Model params is a list of strings which contains the keys in # each result which should be used to update the model. # Models is a dictionary with a touple at each key containing: # (linear regression, randomness rate, x fits, y fits) currentTime = datetime.now() oldResponses = [(currentTime - r["time"]).total_seconds() > 3600 for r in self.responses] self.responses = [(currentTime - r["time"]).total_seconds() < 3600 for r in self.responses] for r in oldResponses: result = 0 url = "https://reddit.com/" + r["sID"] + "?comment=" + r["cID"] submission = self.r.get_submission(url=url) comment_queue = submission.comments[:] if comment_queue: com = comment_queue.pop(0) result += com.score comment_queue.extend(com.replies) while comment_queue: com = comment_queue.pop(0) text = TextBlob(com.text) result += text.sentiment.polarity * com.score x = [] for key in modelParams: x.append(r[key]) # Get old fits x_fits = self.models[r["sub"]][2].append(x) y_fits = self.models[r["sub"]][3].append(result) self.models[r["sub"]][0].fit(x_fits, y_fits) # Update odds of random choice self.models[r]["sub"][1] *= 0.96 with open('models.pickle', 'wb') as handle: pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
mit
3,835,738,549,379,795,000
35.440476
78
0.560928
false
4.065073
false
false
false
toomore/grs
tools/make_twse_list.py
1
3616
# -*- coding: utf-8 -*- import csv import re import urllib2 from datetime import datetime NOW = datetime(2013, 12, 17) SAVEPATH = '../grs/twse_list.csv' INDUSTRYCODE = '../grs/industry_code.csv' TWSEURL = 'http://www.twse.com.tw/ch/trading/exchange/MI_INDEX/MI_INDEX2_print.php?genpage=genpage/Report%(year)s%(mon)02d/A112%(year)s%(mon)02d%(day)02d%%s.php&type=csv' % {'year': NOW.year, 'mon': NOW.month, 'day': NOW.day} TWSECLS = {'0049': u'封閉式基金', '0099P': u'ETF', '019919T': u'受益證券', '0999': u'認購權證', #(不含牛證) '0999P': u'認售權證', #(不含熊證) '0999C': u'牛證', '0999B': u'熊證', '0999GA': u'附認股權特別股', '0999GD': u'附認股權公司債', '0999G9': u'認股權憑證', '01': u'水泥工業', '02': u'食品工業', '03': u'塑膠工業', '04': u'紡織纖維', '05': u'電機機械', '06': u'電器電纜', '07': u'化學生技醫療', '21': u'化學工業', '22': u'生技醫療業', '08': u'玻璃陶瓷', '09': u'造紙工業', '10': u'鋼鐵工業', '11': u'橡膠工業', '12': u'汽車工業', '13': u'電子工業', '24': u'半導體業', '25': u'電腦及週邊設備業', '26': u'光電業', '27': u'通信網路業', '28': u'電子零組件業', '29': u'電子通路業', '30': u'資訊服務業', '31': u'其他電子業', '14': u'建材營造', '15': u'航運業', '16': u'觀光事業', '17': u'金融保險', '18': u'貿易百貨', '9299': u'存託憑證', '23': u'油電燃氣業', '19': u'綜合', '20': u'其他', 'CB': u'可轉換公司債',} #'ALL_1': u'全部'} def fetch_twse_list(): with open(SAVEPATH, 'w') as files: csv_file = csv.writer(files) re_pattern = re.compile(r'(=")?[\d\w]{4,6}(=)?') re_sub = re.compile(r'[^\w\d]') for no in TWSECLS: for i in csv.reader(urllib2.urlopen(TWSEURL % no).readlines()): if len(i) >= 3 and re_pattern.match(i[0]): pass else: i.pop(0) if len(i) >= 2 and re_pattern.match(i[0]): csv_file.writerow([re_sub.sub('', i[0]), i[1].decode('cp950').encode('utf-8'), no, TWSECLS[no].encode('utf-8')]) with open(SAVEPATH, 'r') as files: csv_file = csv.reader(files) all_items = {} for i in csv_file: all_items.update({i[0]: i}) with open(SAVEPATH, 'w') as files: csv_file = csv.writer(files) #csv_file.writerow(['文件更新', datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'x', 'x']) csv_file.writerow(['UPDATE', datetime.now().strftime('%Y/%m/%d'), 'x', 'x']) csv_file.writerow(['證期會代碼', '公司簡稱', '分類代碼', '分類名稱']) for i in sorted(all_items): csv_file.writerow(all_items[i]) def output_industry_code(): with open(INDUSTRYCODE, 'w') as files: csv_file = csv.writer(files) for i in sorted(TWSECLS): csv_file.writerow([i, TWSECLS[i].encode('utf-8')]) if __name__ == '__main__': fetch_twse_list() output_industry_code()
mit
-7,749,647,010,060,471,000
31.55102
225
0.445141
false
2.175989
false
false
false
snakeztc/NeuralDialog-CVAE
config_utils.py
1
1880
# Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University class KgCVAEConfig(object): description= None use_hcf = True # use dialog act in training (if turn off kgCVAE -> CVAE) update_limit = 3000 # the number of mini-batch before evaluating the model # how to encode utterance. # bow: add word embedding together # rnn: RNN utterance encoder # bi_rnn: bi_directional RNN utterance encoder sent_type = "bi_rnn" # latent variable (gaussian variable) latent_size = 200 # the dimension of latent variable full_kl_step = 10000 # how many batch before KL cost weight reaches 1.0 dec_keep_prob = 1.0 # do we use word drop decoder [Bowman el al 2015] # Network general cell_type = "gru" # gru or lstm embed_size = 200 # word embedding size topic_embed_size = 30 # topic embedding size da_embed_size = 30 # dialog act embedding size cxt_cell_size = 600 # context encoder hidden size sent_cell_size = 300 # utterance encoder hidden size dec_cell_size = 400 # response decoder hidden size backward_size = 10 # how many utterance kept in the context window step_size = 1 # internal usage max_utt_len = 40 # max number of words in an utterance num_layer = 1 # number of context RNN layers # Optimization parameters op = "adam" grad_clip = 5.0 # gradient abs max cut init_w = 0.08 # uniform random from [-init_w, init_w] batch_size = 30 # mini-batch size init_lr = 0.001 # initial learning rate lr_hold = 1 # only used by SGD lr_decay = 0.6 # only used by SGD keep_prob = 1.0 # drop out rate improve_threshold = 0.996 # for early stopping patient_increase = 2.0 # for early stopping early_stop = True max_epoch = 60 # max number of epoch of training grad_noise = 0.0 # inject gradient noise?
apache-2.0
5,411,400,849,462,010,000
34.471698
79
0.66117
false
3.462247
false
false
false
LionelDupuy/ARCHI_PHEN
ImageJ/DatabaseInput_deprecated.py
1
5558
import time from datetime import date import numpy from PIL import Image import zbar import os,sys import wx # GUI # Handle time lapse! scanner = zbar.ImageScanner() # configure the reader scanner.parse_config('enable') #scanner.set_config(0, zbar.Config.ENABLE, 0) #scanner.set_config(zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1) label = "" # TODO # Read label better (crop enhance contrast etc...) # copy files # record previous file def readQRCODE(ImageFile): label = "" pilImage = Image.open(ImageFile) width, height = pilImage.size pilImage = pilImage.crop((int(0.18*width), int(0.2*height),int(0.97*width), int(0.95*height))) pilImage = pilImage.convert('L') width, height = pilImage.size raw = pilImage.tostring() # wrap image data image = zbar.Image(width, height, 'Y800', raw) # scan the image for barcodes scanner.scan(image) # extract results for symbol in image: label = symbol.data # clean up del(image) return label class MainWindow(wx.Frame): def __init__(self, parent, title): wx.Frame.__init__(self, parent, title=title, size=(400,300)) self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY) self.CreateStatusBar() # A Statusbar in the bottom of the window # FOlders self.dest_folder = os.path.dirname(sys.argv[0]) self.root_folder = os.path.dirname(sys.argv[0]) # Setting up the menu. filemenu= wx.Menu() # wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets. filemenu.Append(1, "&Base Folders"," Set folders") filemenu.Append(2, "&Run"," scan for files") filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program") filemenu.AppendSeparator() filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program") # xxxxxxx self.Bind(wx.EVT_MENU, self.get_folder, id=1) self.Bind(wx.EVT_MENU, self.scan_data, id=2) # Creating the menubar. menuBar = wx.MenuBar() menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content. self.Show(True) def get_folder(self, id): dlg = wx.DirDialog(self, "Choose Root Folder:") if dlg.ShowModal() == wx.ID_OK: self.root_folder = dlg.GetPath() dlg.Destroy() def scan_data(self, id): ################################################################# # Find all suitable files in the current folder ################################################################# dir = self.root_folder#os.path.dirname(sys.argv[0]) sys.path.append(dir) for f in os.listdir(dir): file, ext = os.path.splitext(f) # Handles no-extension files, etc. if ext == '.JPG': base_row = file.split("-") base = base_row[0] if len(base_row) == 1: if os.path.isfile(dir+ "\\"+ file + "-QR-.jpg"): genotype = readQRCODE(dir+ "\\"+ file + "-QR-.jpg") # image properties file_tmp1 = file.split('_') file_id = file_tmp1[1] #os.path.getmtime(dir+ "\\"+ file +ext) # Image identifiers identifyer = [None,None,None] if len(genotype) > 5: text = "Root directory: " + dir + "\n" text += "File: " + file + "\n" text += "Genotype: " + genotype self.control.SetValue(text) wx.Yield() identifyer = genotype.split('_') else: pilImage = Image.open(dir+ "\\"+ file + "-QR-.jpg") width, height = pilImage.size pilImage = pilImage.crop((int(0.18*width), int(0.3*height),int(0.97*width), int(0.92*height))) width, height = pilImage.size sc = 0.6 pilImage = pilImage.resize((int(width*sc),int(height*sc)), Image.ANTIALIAS) img = wx.EmptyImage( *pilImage.size ) pilImageCopy = pilImage.copy() pilImageCopyRGB = pilImageCopy.convert( 'RGB' ) # Discard any alpha from the PIL image. pilImageRgbData =pilImageCopyRGB.tostring() img.SetData( pilImageRgbData ) identifyer_length = 0 while identifyer_length>-1:# !=3: dlg = wx.TextEntryDialog(self, 'Type "Species Population Id" with space as separation', 'Could not read bar code', '') dlg.SetValue("") self.pnl = MyFrame(dlg, -1, "Label not read", size=(int(width*sc),int(height*sc)), pos = (800,100), style = wx.DEFAULT_FRAME_STYLE, pic = img) self.pnl.Show(True) if dlg.ShowModal() == wx.ID_OK: txtvalue = dlg.GetValue() #genotype.split('_') identifyer = txtvalue.split(' ') identifyer_length = len(identifyer) dlg.Destroy() else: text = "!!! Could not recover barcode for !!! :\n\n" text += "Root directory: " + dir + "\n" text += "File: " + file + "\n" self.control.SetValue(text) wx.Yield() class MyFrame(wx.Frame): def __init__( self, parent, ID, title, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, pic = None ): wx.Frame.__init__(self, parent, ID, title, pos, size, style) panel = wx.Panel(self, -1) wx.StaticBitmap(panel, -1, pic.ConvertToBitmap(), (0, 0)) def OnCloseMe(self, event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() app = wx.App(False) frame = MainWindow(None, "Sample editor") app.MainLoop()
mit
4,614,048,488,324,428,000
29.412429
150
0.585103
false
3.092933
true
false
false
tato69/myfirstrepo
day3/makeasentece.py
1
1633
import random s_nouns = ["A dude", "My mom", "The king", "Some guy", "A cat with rabies", "A sloth", "Your homie", "This cool guy my gardener met yesterday", "Superman"] p_nouns = ["These dudes", "Both of my moms", "All the kings of the world", "Some guys", "All of a cattery's cats", "The multitude of sloths living under your bed", "Your homies", "Like, these, like, all these people", "Supermen"] s_verbs = ["eats", "kicks", "gives", "treats", "meets with", "creates", "hacks", "configures", "spies on", "retards", "meows on", "flees from", "tries to automate", "explodes"] p_verbs = ["eat", "kick", "give", "treat", "meet with", "create", "hack", "configure", "spy on", "retard", "meow on", "flee from", "try to automate", "explode"] infinitives = ["to make a pie.", "for no apparent reason.", "because the sky is green.", "for a disease.", "to be able to make toast explode.", "to know more about archeology."] def sing_sen_maker(): '''Makes a random senctence from the different parts of speech. Uses a SINGULAR subject''' a = raw_input("Would you like to add a new word?\n") if a.lower() == "yes": new_word = raw_input("Please enter a singular noun.\n") # s_nouns.append(new_word) while new_word == '': new_word = raw_input("The string cannot be empty! Please enter a singular noun.\n") print new_word, random.choice(s_verbs), random.choice(s_nouns).lower() , random.choice(infinitives) elif a.lower() != "no": print "only asnwer accepted is 'yes' or 'no'" sing_sen_maker() else: print random.choice(s_nouns), random.choice(s_verbs), random.choice(s_nouns).lower() , random.choice(infinitives) sing_sen_maker()
apache-2.0
-213,020,807,480,288,740
67.041667
229
0.670545
false
2.781942
false
false
false
catapult-project/catapult
common/py_utils/py_utils/refactor/annotated_symbol/class_definition.py
3
1291
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import absolute_import import symbol from py_utils.refactor.annotated_symbol import base_symbol __all__ = [ 'Class', ] class Class(base_symbol.AnnotatedSymbol): @classmethod def Annotate(cls, symbol_type, children): if symbol_type != symbol.stmt: return None compound_statement = children[0] if compound_statement.type != symbol.compound_stmt: return None statement = compound_statement.children[0] if statement.type == symbol.classdef: return cls(statement.type, statement.children) elif (statement.type == symbol.decorated and statement.children[-1].type == symbol.classdef): return cls(statement.type, statement.children) else: return None @property def suite(self): # TODO: Complete. raise NotImplementedError() def FindChild(self, snippet_type, **kwargs): return self.suite.FindChild(snippet_type, **kwargs) def FindChildren(self, snippet_type): return self.suite.FindChildren(snippet_type) def Cut(self, child): self.suite.Cut(child) def Paste(self, child): self.suite.Paste(child)
bsd-3-clause
-7,862,515,505,693,531,000
24.82
72
0.701007
false
3.819527
false
false
false
mozilla/kuma
kuma/attachments/tests/test_templates.py
1
1910
import pytest from pyquery import PyQuery as pq from kuma.core.urlresolvers import reverse from kuma.core.utils import to_html from kuma.wiki.models import Revision from . import make_test_file from ..models import Attachment @pytest.mark.security def test_xss_file_attachment_title(admin_client, constance_config, root_doc, wiki_user, editor_client, settings): constance_config.WIKI_ATTACHMENT_ALLOWED_TYPES = 'text/plain' # use view to create new attachment file_for_upload = make_test_file() files_url = reverse('attachments.edit_attachment', kwargs={'document_path': root_doc.slug}) title = '"><img src=x onerror=prompt(navigator.userAgent);>' post_data = { 'title': title, 'description': 'xss', 'comment': 'xss', 'file': file_for_upload, } response = admin_client.post(files_url, data=post_data, HTTP_HOST=settings.WIKI_HOST) assert response.status_code == 302 # now stick it in/on a document attachment = Attachment.objects.get(title=title) content = '<img src="%s" />' % attachment.get_file_url() root_doc.current_revision = Revision.objects.create( document=root_doc, creator=wiki_user, content=content) # view it and verify markup is escaped response = editor_client.get(root_doc.get_edit_url(), HTTP_HOST=settings.WIKI_HOST) assert response.status_code == 200 doc = pq(response.content) text = doc('.page-attachments-table .attachment-name-cell').text() assert text == ('%s\nxss' % title) html = to_html(doc('.page-attachments-table .attachment-name-cell')) assert '&gt;&lt;img src=x onerror=prompt(navigator.userAgent);&gt;' in html # security bug 1272791 for script in doc('script'): assert title not in script.text_content()
mpl-2.0
6,381,598,535,668,310,000
37.979592
79
0.646073
false
3.694391
false
false
false
aomelchenko/python_koans
python2/koans/about_classes.py
1
4830
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutClasses(Koan): class Dog(object): "Dogs need regular walkies. Never, ever let them drive." def test_instances_of_classes_can_be_created_adding_parentheses(self): fido = self.Dog() self.assertEqual('Dog', type(fido).__name__) def test_classes_have_docstrings(self): self.assertMatch("Dogs need regular walkies. Never, ever let them drive.", self.Dog.__doc__) # ------------------------------------------------------------------ class Dog2(object): def __init__(self): self._name = 'Paul' def set_name(self, a_name): self._name = a_name def test_init_method_is_the_constructor(self): dog = self.Dog2() self.assertEqual('Paul', dog._name) def test_private_attributes_are_not_really_private(self): dog = self.Dog2() dog.set_name("Fido") self.assertEqual("Fido", dog._name) # The _ prefix in _name implies private ownership, but nothing is truly # private in Python. def test_you_can_also_access_the_value_out_using_getattr_and_dict(self): fido = self.Dog2() fido.set_name("Fido") self.assertEqual("Fido", getattr(fido, "_name")) # getattr(), setattr() and delattr() are a way of accessing attributes # by method rather than through assignment operators self.assertEqual("Fido", fido.__dict__["_name"]) # Yes, this works here, but don't rely on the __dict__ object! Some # class implementations use optimization which result in __dict__ not # showing everything. # ------------------------------------------------------------------ class Dog3(object): def __init__(self): self._name = None def set_name(self, a_name): self._name = a_name def get_name(self): return self._name name = property(get_name, set_name) def test_that_name_can_be_read_as_a_property(self): fido = self.Dog3() fido.set_name("Fido") self.assertEqual("Fido", fido.get_name()) # access as method self.assertEqual("Fido", fido.name) # access as property # ------------------------------------------------------------------ class Dog4(object): def __init__(self): self._name = None @property def name(self): return self._name @name.setter def name(self, a_name): self._name = a_name def test_creating_properties_with_decorators_is_slightly_easier(self): fido = self.Dog4() fido.name = "Fido" self.assertEqual("Fido", fido.name) # ------------------------------------------------------------------ class Dog5(object): def __init__(self, initial_name): self._name = initial_name @property def name(self): return self._name def test_init_provides_initial_values_for_instance_variables(self): fido = self.Dog5("Fido") self.assertEqual("Fido", fido.name) def test_args_must_match_init(self): self.assertRaises(TypeError, self.Dog5) # Evaluates self.Dog5() # THINK ABOUT IT: # Why is this so? def test_different_objects_have_difference_instance_variables(self): fido = self.Dog5("Fido") rover = self.Dog5("Rover") self.assertEqual(False, rover.name == fido.name) # ------------------------------------------------------------------ class Dog6(object): def __init__(self, initial_name): self._name = initial_name def get_self(self): return self def __str__(self): return self._name def __repr__(self): return "<Dog named '" + self._name + "'>" def test_inside_a_method_self_refers_to_the_containing_object(self): fido = self.Dog6("Fido") self.assertEqual(fido, fido.get_self()) # Not a string! def test_str_provides_a_string_version_of_the_object(self): fido = self.Dog6("Fido") self.assertEqual('Fido', str(fido)) def test_str_is_used_explicitly_in_string_interpolation(self): fido = self.Dog6("Fido") self.assertEqual("My dog is Fido", "My dog is " + str(fido)) def test_repr_provides_a_more_complete_string_version(self): fido = self.Dog6("Fido") self.assertEqual("<Dog named 'Fido'>", repr(fido)) def test_all_objects_support_str_and_repr(self): seq = [1, 2, 3] self.assertEqual('[1, 2, 3]', str(seq)) self.assertEqual("[1, 2, 3]", repr(seq)) self.assertEqual("STRING", str("STRING")) self.assertEqual("'STRING'", repr("STRING"))
mit
4,466,670,656,665,157,600
29.764331
100
0.540373
false
3.744186
true
false
false
mouthwateringmedia/python-docx
example-makedocument.py
1
3666
#!/usr/bin/env python2.6 ''' This file makes an docx (Office 2007) file from scratch, showing off most of python-docx's features. If you need to make documents from scratch, use this file as a basis for your work. Part of Python's docx module - http://github.com/mikemaccana/python-docx See LICENSE for licensing information. ''' from docx import * if __name__ == '__main__': # Default set of relationshipships - these are the minimum components of a document relationships = relationshiplist() # Make a new document tree - this is the main part of a Word document document = newdocument() # This xpath location is where most interesting content lives docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0] # Append two headings and a paragraph docbody.append(heading('''Welcome to Python's docx module''',1) ) docbody.append(heading('Make and edit docx in 200 lines of pure Python',2)) docbody.append(paragraph('The module was created when I was looking for a Python support for MS Word .doc files on PyPI and Stackoverflow. Unfortunately, the only solutions I could find used:')) # Add a numbered list for point in ['''COM automation''','''.net or Java''','''Automating OpenOffice or MS Office''']: docbody.append(paragraph(point,style='ListNumber')) docbody.append(paragraph('''For those of us who prefer something simpler, I made docx.''')) docbody.append(heading('Making documents',2)) docbody.append(paragraph('''The docx module has the following features:''')) # Add some bullets for point in ['Paragraphs','Bullets','Numbered lists','Multiple levels of headings','Tables','Document Properties']: docbody.append(paragraph(point,style='ListBullet')) docbody.append(paragraph('Tables are just lists of lists, like this:')) # Append a table docbody.append(table([['A1','A2','A3'],['B1','B2','B3'],['C1','C2','C3']])) docbody.append(heading('Editing documents',2)) docbody.append(paragraph('Thanks to the awesomeness of the lxml module, we can:')) for point in ['Search and replace','Extract plain text of document','Add and delete items anywhere within the document']: docbody.append(paragraph(point,style='ListBullet')) # Add an image relationships,picpara = picture(relationships,'image1.png','This is a test description') docbody.append(picpara) # Search and replace print 'Searching for something in a paragraph ...', if search(docbody, 'the awesomeness'): print 'found it!' else: print 'nope.' print 'Searching for something in a heading ...', if search(docbody, '200 lines'): print 'found it!' else: print 'nope.' print 'Replacing ...', docbody = replace(docbody,'the awesomeness','the goshdarned awesomeness') print 'done.' # Add a pagebreak docbody.append(pagebreak(type='page', orient='portrait')) docbody.append(heading('Ideas? Questions? Want to contribute?',2)) docbody.append(paragraph('''Email <[email protected]>''')) # Create our properties, contenttypes, and other support files coreprops = coreproperties(title='Python docx demo',subject='A practical example of making docx from Python',creator='Mike MacCana',keywords=['python','Office Open XML','Word']) appprops = appproperties() contenttypes = contenttypes() websettings = websettings() wordrelationships = wordrelationships(relationships) # Save our document savedocx(document,coreprops,appprops,contenttypes,websettings,wordrelationships,'Welcome to the Python docx module.docx')
mit
3,625,672,895,550,942,700
45.417722
198
0.698036
false
3.958963
false
false
false
juju/python-libjuju
juju/provisioner.py
1
11307
import os import re import shlex import tempfile import uuid from subprocess import CalledProcessError import paramiko from .client import client arches = [ [re.compile(r"amd64|x86_64"), "amd64"], [re.compile(r"i?[3-9]86"), "i386"], [re.compile(r"(arm$)|(armv.*)"), "armhf"], [re.compile(r"aarch64"), "arm64"], [re.compile(r"ppc64|ppc64el|ppc64le"), "ppc64el"], [re.compile(r"s390x?"), "s390x"], ] def normalize_arch(rawArch): """Normalize the architecture string.""" for arch in arches: if arch[0].match(rawArch): return arch[1] DETECTION_SCRIPT = """#!/bin/bash set -e os_id=$(grep '^ID=' /etc/os-release | tr -d '"' | cut -d= -f2) if [ "$os_id" = 'centos' ]; then os_version=$(grep '^VERSION_ID=' /etc/os-release | tr -d '"' | cut -d= -f2) echo "centos$os_version" else lsb_release -cs fi uname -m grep MemTotal /proc/meminfo cat /proc/cpuinfo """ INITIALIZE_UBUNTU_SCRIPT = """set -e (id ubuntu &> /dev/null) || useradd -m ubuntu -s /bin/bash umask 0077 temp=$(mktemp) echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > $temp install -m 0440 $temp /etc/sudoers.d/90-juju-ubuntu rm $temp su ubuntu -c 'install -D -m 0600 /dev/null ~/.ssh/authorized_keys' export authorized_keys="{}" if [ ! -z "$authorized_keys" ]; then su ubuntu -c 'echo $authorized_keys >> ~/.ssh/authorized_keys' fi """ class SSHProvisioner: """Provision a manually created machine via SSH.""" user = "" host = "" private_key_path = "" def __init__(self, user, host, private_key_path): self.host = host self.user = user self.private_key_path = private_key_path def _get_ssh_client(self, host, user, key): """Return a connected Paramiko ssh object. :param str host: The host to connect to. :param str user: The user to connect as. :param str key: The private key to authenticate with. :return: object: A paramiko.SSHClient :raises: :class:`paramiko.ssh_exception.SSHException` if the connection failed """ ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) pkey = None # Read the private key into a paramiko.RSAKey if os.path.exists(key): with open(key, 'r') as f: pkey = paramiko.RSAKey.from_private_key(f) ####################################################################### # There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL5) where # # the server may not send the SSH_MSG_USERAUTH_BANNER message except # # when responding to an auth_none request. For example, paramiko will # # attempt to use password authentication when a password is set, but # # the server could deny that, instead requesting keyboard-interactive.# # The hack to workaround this is to attempt a reconnect, which will # # receive the right banner, and authentication can proceed. See the # # following for more info: # # https://github.com/paramiko/paramiko/issues/432 # # https://github.com/paramiko/paramiko/pull/438 # ####################################################################### try: ssh.connect(host, port=22, username=user, pkey=pkey) except paramiko.ssh_exception.SSHException as e: if 'Error reading SSH protocol banner' == str(e): # Once more, with feeling ssh.connect(host, port=22, username=user, pkey=pkey) else: # Reraise the original exception raise e return ssh def _run_command(self, ssh, cmd, pty=True): """Run a command remotely via SSH. :param object ssh: The SSHClient :param str cmd: The command to execute :param list cmd: The `shlex.split` command to execute :param bool pty: Whether to allocate a pty :return: tuple: The stdout and stderr of the command execution :raises: :class:`CalledProcessError` if the command fails """ if isinstance(cmd, str): cmd = shlex.split(cmd) if type(cmd) is not list: cmd = [cmd] cmds = ' '.join(cmd) stdin, stdout, stderr = ssh.exec_command(cmds, get_pty=pty) retcode = stdout.channel.recv_exit_status() if retcode > 0: output = stderr.read().strip() raise CalledProcessError(returncode=retcode, cmd=cmd, output=output) return ( stdout.read().decode('utf-8').strip(), stderr.read().decode('utf-8').strip() ) def _init_ubuntu_user(self): """Initialize the ubuntu user. :return: bool: If the initialization was successful :raises: :class:`paramiko.ssh_exception.AuthenticationException` if the authentication fails """ ssh = None try: # Run w/o allocating a pty, so we fail if sudo prompts for a passwd ssh = self._get_ssh_client( self.host, self.user, self.private_key_path, ) stdout, stderr = self._run_command(ssh, "sudo -n true", pty=False) except paramiko.ssh_exception.AuthenticationException as e: raise e finally: if ssh: ssh.close() # Infer the public key public_key = None public_key_path = "{}.pub".format(self.private_key_path) if not os.path.exists(public_key_path): raise FileNotFoundError( "Public key '{}' doesn't exist.".format(public_key_path) ) with open(public_key_path, "r") as f: public_key = f.readline() script = INITIALIZE_UBUNTU_SCRIPT.format(public_key) try: ssh = self._get_ssh_client( self.host, self.user, self.private_key_path, ) self._run_command( ssh, ["sudo", "/bin/bash -c " + shlex.quote(script)], pty=True ) except paramiko.ssh_exception.AuthenticationException as e: raise e finally: ssh.close() return True def _detect_hardware_and_os(self, ssh): """Detect the target hardware capabilities and OS series. :param object ssh: The SSHClient :return: str: A raw string containing OS and hardware information. """ info = { 'series': '', 'arch': '', 'cpu-cores': '', 'mem': '', } stdout, stderr = self._run_command( ssh, ["sudo", "/bin/bash -c " + shlex.quote(DETECTION_SCRIPT)], pty=True, ) lines = stdout.split("\n") info['series'] = lines[0].strip() info['arch'] = normalize_arch(lines[1].strip()) memKb = re.split(r'\s+', lines[2])[1] # Convert megabytes -> kilobytes info['mem'] = round(int(memKb) / 1024) # Detect available CPUs recorded = {} for line in lines[3:]: physical_id = "" print(line) if line.find("physical id") == 0: physical_id = line.split(":")[1].strip() elif line.find("cpu cores") == 0: cores = line.split(":")[1].strip() if physical_id not in recorded.keys(): info['cpu-cores'] += cores recorded[physical_id] = True return info def provision_machine(self): """Perform the initial provisioning of the target machine. :return: bool: The client.AddMachineParams :raises: :class:`paramiko.ssh_exception.AuthenticationException` if the upload fails """ params = client.AddMachineParams() if self._init_ubuntu_user(): try: ssh = self._get_ssh_client( self.host, self.user, self.private_key_path ) hw = self._detect_hardware_and_os(ssh) params.series = hw['series'] params.instance_id = "manual:{}".format(self.host) params.nonce = "manual:{}:{}".format( self.host, str(uuid.uuid4()), # a nop for Juju w/manual machines ) params.hardware_characteristics = { 'arch': hw['arch'], 'mem': int(hw['mem']), 'cpu-cores': int(hw['cpu-cores']), } params.addresses = [{ 'value': self.host, 'type': 'ipv4', 'scope': 'public', }] except paramiko.ssh_exception.AuthenticationException as e: raise e finally: ssh.close() return params async def install_agent(self, connection, nonce, machine_id): """ :param object connection: Connection to Juju API :param str nonce: The nonce machine specification :param str machine_id: The id assigned to the machine :return: bool: If the initialization was successful """ # The path where the Juju agent should be installed. data_dir = "/var/lib/juju" # Disabling this prevents `apt-get update` from running initially, so # charms will fail to deploy disable_package_commands = False client_facade = client.ClientFacade.from_connection(connection) results = await client_facade.ProvisioningScript( data_dir=data_dir, disable_package_commands=disable_package_commands, machine_id=machine_id, nonce=nonce, ) self._run_configure_script(results.script) def _run_configure_script(self, script): """Run the script to install the Juju agent on the target machine. :param str script: The script returned by the ProvisioningScript API :raises: :class:`paramiko.ssh_exception.AuthenticationException` if the upload fails """ _, tmpFile = tempfile.mkstemp() with open(tmpFile, 'w') as f: f.write(script) try: # get ssh client ssh = self._get_ssh_client( self.host, "ubuntu", self.private_key_path, ) # copy the local copy of the script to the remote machine sftp = paramiko.SFTPClient.from_transport(ssh.get_transport()) sftp.put( tmpFile, tmpFile, ) # run the provisioning script stdout, stderr = self._run_command( ssh, "sudo /bin/bash {}".format(tmpFile), ) except paramiko.ssh_exception.AuthenticationException as e: raise e finally: os.remove(tmpFile) ssh.close()
apache-2.0
-8,809,007,503,847,249,000
30.761236
79
0.533386
false
4.150881
false
false
false
sanguinariojoe/FreeCAD
src/Mod/Arch/importWebGL.py
9
44100
#*************************************************************************** #* Copyright (c) 2013 Yorik van Havre <[email protected]> * #* Copyright (c) 2020 Travis Apple <[email protected]> * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #*************************************************************************** # # REFS: # https://github.com/mrdoob/three.js/blob/master/examples/webgl_interactive_buffergeometry.html # https://threejs.org/examples/#webgl_buffergeometry_lines # https://forum.freecadweb.org/viewtopic.php?t=51245 # https://forum.freecadweb.org/viewtopic.php?t=29487 # https://threejs.org/examples/#webgl_raycast_sprite # # Params for export() # 'colors' is of the form: {'Body': [1,0,0], 'Body001': [1,1,0], 'Body002': [1,0,1] } # 'camera' is of the form: "PerspectiveCamera {\n viewportMapping ADJUST_CAMERA\n position 30.242626 -51.772324 85.63475\n orientation -0.4146691 0.088459305 -0.90566254 4.7065201\nnearDistance 53.126431\n farDistance 123.09125\n aspectRatio 1\n focalDistance 104.53851\n heightAngle 0.78539819\n\n}" # The 'camera' string for the active document may be generated from: import OfflineRenderingUtils; OfflineRenderingUtils.getCamera(FreeCAD.ActiveDocument.FileName); # # Development reload oneliner: # def re(): from importlib import reload;import importWebGL;reload(importWebGL);o=FreeCAD.getDocument("YourDocName");importWebGL.export([o.getObject("YourBodyName")],u"C:/path/to/your/file.htm"); """FreeCAD WebGL Exporter""" import FreeCAD,Mesh,Draft,Part,OfflineRenderingUtils,json,six import textwrap if FreeCAD.GuiUp: import FreeCADGui from DraftTools import translate else: FreeCADGui = None def translate(ctxt, txt): return txt if open.__module__ in ['__builtin__','io']: pythonopen = open ## @package importWebGL # \ingroup ARCH # \brief FreeCAD WebGL Exporter # # This module provides tools to export HTML files containing the # exported objects in WebGL format and a simple three.js-based viewer. disableCompression = False # Compress object data before sending to JS base = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!#$%&()*+-:;/=>?@[]^_,.{|}~`' # safe str chars for js in all cases baseFloat = ',.-0123456789' def getHTMLTemplate(): return textwrap.dedent("""\ <!DOCTYPE html> <html lang="en"> <head> <title>$pagetitle</title> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0"> <meta name="generator" content="FreeCAD $version"> <style> * { margin: 0; padding: 0; } body { background: #ffffff; /* Old browsers */ background: -moz-linear-gradient(top, #e3e9fc 0%, #ffffff 70%, #e2dab3 100%); /* FF3.6-15 */ background: -webkit-linear-gradient(top, #e3e9fc 0%,#ffffff 70%,#e2dab3 100%); /* Chrome10-25, Safari5.1-6 */ background: linear-gradient(to bottom, #e3e9fc 0%,#ffffff 70%,#e2dab3 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */ width: 100vw; height: 100vh; } canvas { display: block; } #mainCanvas { width: 100%; height: 100%; } #arrowCanvas { position: absolute; left: 0px; bottom: 0px; width: 150px; height: 150px; z-index: 100; } select { width: 170px; } </style> </head> <body> <canvas id="mainCanvas"></canvas> <canvas id="arrowCanvas"></canvas> <script type="module"> // Direct from mrdoob: https://www.jsdelivr.com/package/npm/three import * as THREE from 'https://cdn.jsdelivr.net/npm/[email protected]/build/three.module.js'; import { OrbitControls } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/controls/OrbitControls.js'; import { GUI } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/libs/dat.gui.module.js'; import { Line2 } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/lines/Line2.js'; import { LineMaterial } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/lines/LineMaterial.js'; import { LineGeometry } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/lines/LineGeometry.js'; import { EdgeSplitModifier } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/modifiers/EdgeSplitModifier.js'; const data = $data; // Z is up for FreeCAD THREE.Object3D.DefaultUp = new THREE.Vector3(0, 0, 1); const defaultWireColor = new THREE.Color('rgb(0,0,0)'); const defaultWireLineWidth = 2; // in pixels const raycasterObj = []; // list of obj that can mouseover highlight const canvas = document.querySelector('#mainCanvas'); const scene = new THREE.Scene(); const renderer = new THREE.WebGLRenderer({ alpha: true, antialias: true, canvas: canvas }); // Clear bg so we can set it with css renderer.setClearColor(0x000000, 0); let renderRequested = false; // HemisphereLight gives different colors of light from the top // and bottom simulating reflected light from the 'ground' and // 'sky' scene.add(new THREE.HemisphereLight(0xC7E8FF, 0xFFE3B3, 0.4)); const dLight1 = new THREE.DirectionalLight(0xffffff, 0.4); dLight1.position.set(5, -2, 3); scene.add(dLight1); const dLight2 = new THREE.DirectionalLight(0xffffff, 0.4); dLight2.position.set(-5, 2, 3); scene.add(dLight2); if (data.compressed) { const base = data.base; const baseFloat = data.baseFloat; function baseDecode(input) { const baseCt = base.length; const output = []; const len = parseInt(input[0]); // num chars of each element for (let i = 1; i < input.length; i += len) { const str = input.substring(i, i + len).trim(); let val = 0; for (let s = 0; s < str.length; s++) { const ind = base.indexOf(str[s]); val += ind * Math.pow(baseCt, s); } output.push(val); } return output; } function floatDecode(input) { const baseCt = base.length; const baseFloatCt = baseFloat.length; let numString = ''; for (let i = 0; i < input.length; i += 4) { const b90chunk = input.substring(i, i + 4).trim(); let quotient = 0; for (let s = 0; s < b90chunk.length; s++) { const ind = base.indexOf(b90chunk[s]); quotient += ind * Math.pow(baseCt, s); } let buffer = ''; for (let s = 0; s < 7; s++) { buffer = baseFloat[quotient % baseFloatCt] + buffer; quotient = parseInt(quotient / baseFloatCt); } numString += buffer; } let trailingCommas = 0; for (let s = 1; s < 7; s++) { if (numString[numString.length - s] == baseFloat[0]) { trailingCommas++; } } numString = numString.substring(0, numString.length - trailingCommas); return numString; } // Decode from base90 and distribute the floats for (const obj of data.objects) { obj.floats = JSON.parse('[' + floatDecode(obj.floats) + ']'); obj.verts = baseDecode(obj.verts).map(x => obj.floats[x]); obj.facets = baseDecode(obj.facets); obj.wires = obj.wires.map(w => baseDecode(w).map(x => obj.floats[x])); obj.facesToFacets = obj.facesToFacets.map(x => baseDecode(x)); } } // Get bounds for global clipping const globalMaxMin = [{min: null, max: null}, {min: null, max: null}, {min: null, max: null}]; for (const obj of data.objects) { for (let v = 0; v < obj.verts.length; v++) { if (globalMaxMin[v % 3] === null || obj.verts[v] < globalMaxMin[v % 3].min) { globalMaxMin[v % 3].min = obj.verts[v]; } if (globalMaxMin[v % 3] === null || obj.verts[v] > globalMaxMin[v % 3].max) { globalMaxMin[v % 3].max = obj.verts[v]; } } } let bigrange = 0; // add a little extra for (const i of globalMaxMin) { const range = i.max - i.min; if (range > bigrange) { bigrange = range; } i.min -= range * 0.01; i.max += range * 0.01; } const camCenter = new THREE.Vector3( 0.5 * (globalMaxMin[0].max - globalMaxMin[0].min) + globalMaxMin[0].min, 0.5 * (globalMaxMin[1].max - globalMaxMin[1].min) + globalMaxMin[1].min, 0.5 * (globalMaxMin[2].max - globalMaxMin[2].min) + globalMaxMin[2].min ); const viewSize = 1.5 * bigrange; // make the view area a little bigger than the object const aspectRatio = canvas.clientWidth / canvas.clientHeight; const originalAspect = aspectRatio; function initCam(camera) { // XXX this needs to treat the perspective and orthographic // cameras differently camera.position.set( data.camera.position_x, data.camera.position_y, data.camera.position_z); camera.lookAt(camCenter); camera.updateMatrixWorld(); } let cameraType = data.camera.type; const persCamera = new THREE.PerspectiveCamera( 50, aspectRatio, 1, 100000); initCam(persCamera); const orthCamera = new THREE.OrthographicCamera( -aspectRatio * viewSize / 2, aspectRatio * viewSize / 2, viewSize / 2, -viewSize / 2, -100000, 100000); initCam(orthCamera); function assignMesh(positions, color, opacity, faces) { const baseGeometry = new THREE.BufferGeometry(); baseGeometry.setAttribute('position', new THREE.BufferAttribute( positions, 3)); // EdgeSplitModifier is used to combine verts so that smoothing normals can be generated WITHOUT removing the hard edges of the design // REF: https://threejs.org/examples/?q=edge#webgl_modifier_edgesplit - https://github.com/mrdoob/three.js/pull/20535 const edgeSplit = new EdgeSplitModifier(); const cutOffAngle = 20; const geometry = edgeSplit.modify( baseGeometry, cutOffAngle * Math.PI / 180); geometry.computeVertexNormals(); geometry.computeBoundingSphere(); const material = new THREE.MeshLambertMaterial({ color: color, side: THREE.DoubleSide, vertexColors: false, flatShading: false, opacity: opacity, transparent: opacity != 1.0, fog: false }); const meshobj = new THREE.Mesh(geometry, material); meshobj.name = meshobj.uuid; faces.push(meshobj.uuid); scene.add(meshobj); raycasterObj.push(meshobj); } const objects = []; for (const obj of data.objects) { // Each face gets its own material because they each can // have different colors const faces = []; if (obj.facesToFacets.length > 0) { for (let f=0; f < obj.facesToFacets.length; f++) { const facecolor = obj.faceColors.length > 0 ? obj.faceColors[f] : obj.color; const positions = new Float32Array(obj.facesToFacets[f].length * 9); for (let a=0; a < obj.facesToFacets[f].length; a++) { for (let b=0; b < 3; b++) { for (let c=0; c < 3; c++) { positions[9 * a + 3 * b + c] = obj.verts[3 * obj.facets[3 * obj.facesToFacets[f][a] + b ] + c ]; } } } assignMesh(positions, facecolor, obj.opacity, faces); } } else { // No facesToFacets means that there was a tessellate() // mismatch inside FreeCAD. Use all facets in object to // create this mesh const positions = new Float32Array(obj.facets.length * 3); for (let a=0; a < obj.facets.length; a++) { for (let b=0; b < 3; b++) { positions[3 * a + b] = obj.verts[3 * obj.facets[a] + b]; } } assignMesh(positions, obj.color, obj.opacity, faces); } // Wires // cannot have lines in WebGL that are wider than 1px due to browser limitations so Line2 workaround lib is used // REF: https://threejs.org/examples/?q=fat#webgl_lines_fat - https://jsfiddle.net/brLk6aud/1/ // This material is shared by all wires in this object const wirematerial = new LineMaterial( { color: defaultWireColor, linewidth: defaultWireLineWidth, dashed: false, dashSize: 1, gapSize: 1, dashScale: 3 } ); wirematerial.resolution.set( canvas.clientWidth * window.devicePixelRatio, canvas.clientHeight * window.devicePixelRatio); const wires = []; for (const w of obj.wires) { const wiregeometry = new LineGeometry(); wiregeometry.setPositions(w); const wire = new Line2(wiregeometry, wirematerial); wire.computeLineDistances(); wire.scale.set(1, 1, 1); wire.name = wire.uuid; scene.add(wire); wires.push(wire.name); } objects.push({ data: obj, faces: faces, wires: wires, wirematerial: wirematerial }); } // ---- GUI Init ---- const gui = new GUI({ width: 300 }); const guiparams = { wiretype: 'Normal', wirewidth: defaultWireLineWidth, wirecolor: '#' + defaultWireColor.getHexString(), clippingx: 100, clippingy: 100, clippingz: 100, cameraType: cameraType, navright: function() { navChange([1, 0, 0]); }, navtop: function() { navChange([0, 0, 1]); }, navfront: function() { navChange([0, -1, 0]); } }; // ---- Wires ---- const wiretypes = { Normal: 'Normal', Dashed: 'Dashed', None: 'None' }; const wireFolder = gui.addFolder('Wire'); wireFolder.add(guiparams, 'wiretype', wiretypes).name('Wire Display').onChange(wireChange); wireFolder.add(guiparams, 'wirewidth').min(1).max(5).step(1).name('Wire Width').onChange(wireChange); wireFolder.addColor(guiparams, 'wirecolor').name('Wire Color').onChange(wireChange); function wireChange() { for (const obj of objects) { const m = obj.wirematerial; if (m.dashed) { if (guiparams.wiretype != 'Dashed') { m.dashed = false; delete m.defines.USE_DASH; } } else { if (guiparams.wiretype == 'Dashed') { m.dashed = true; // Dashed lines require this as of r122. delete if not dashed m.defines.USE_DASH = ""; // https://discourse.threejs.org/t/dashed-line2-material/10825 } } if (guiparams.wiretype == 'None') { m.visible = false; } else { m.visible = true; } m.linewidth = guiparams.wirewidth; m.color = new THREE.Color(guiparams.wirecolor); m.needsUpdate = true; } requestRender(); } wireChange(); // ---- Clipping ---- const clippingFolder = gui.addFolder('Clipping'); clippingFolder.add(guiparams, 'clippingx').min(0).max(100).step(1).name('X-Axis Clipping').onChange(clippingChange); clippingFolder.add(guiparams, 'clippingy').min(0).max(100).step(1).name('Y-Axis Clipping').onChange(clippingChange); clippingFolder.add(guiparams, 'clippingz').min(0).max(100).step(1).name('Z-Axis Clipping').onChange(clippingChange); const clipPlaneX = new THREE.Plane(new THREE.Vector3( -1, 0, 0 ), 0); const clipPlaneY = new THREE.Plane(new THREE.Vector3( 0, -1, 0 ), 0); const clipPlaneZ = new THREE.Plane(new THREE.Vector3( 0, 0, -1 ), 0); function clippingChange() { if (guiparams.clippingx < 100 || guiparams.clippingy < 100 || guiparams.clippingz < 100) { if (renderer.clippingPlanes.length == 0) { renderer.clippingPlanes.push(clipPlaneX, clipPlaneY, clipPlaneZ); } } clipPlaneX.constant = (globalMaxMin[0].max - globalMaxMin[0].min) * guiparams.clippingx / 100.0 + globalMaxMin[0].min; clipPlaneY.constant = (globalMaxMin[1].max - globalMaxMin[1].min) * guiparams.clippingy / 100.0 + globalMaxMin[1].min; clipPlaneZ.constant = (globalMaxMin[2].max - globalMaxMin[2].min) * guiparams.clippingz / 100.0 + globalMaxMin[2].min; requestRender(); } // ---- Camera & Navigation ---- const camFolder = gui.addFolder('Camera'); const cameraTypes = { Perspective: 'Perspective', Orthographic: 'Orthographic' }; camFolder.add(guiparams, 'cameraType', cameraTypes).name('Camera type').onChange(cameraChange); camFolder.add(guiparams, 'navright').name('View Right'); camFolder.add(guiparams, 'navtop').name('View Top'); camFolder.add(guiparams, 'navfront').name('View Front'); function navChange(v) { const t = new THREE.Vector3(); new THREE.Box3().setFromObject(scene).getSize(t); persControls.object.position.set( v[0] * t.x * 2 + camCenter.x, v[1] * t.y * 2 + camCenter.y, v[2] * t.z * 2 + camCenter.z); persControls.target = camCenter; persControls.update(); orthControls.object.position.set( v[0] * t.x + camCenter.x, v[1] * t.y + camCenter.y, v[2] * t.z + camCenter.z); orthControls.target = camCenter; orthControls.update(); // controls.update() implicitly calls requestRender() } function cameraChange(v) { cameraType = v; requestRender(); } const guiObjects = gui.addFolder('Objects'); for (const obj of objects) { // Ignore objects with no vertices if (obj.data.verts.length > 0) { const guiObjData = { obj: obj, color: obj.data.color, opacity: obj.data.opacity }; const guiObject = guiObjects.addFolder(obj.data.name); guiObject.addColor(guiObjData, 'color').name('Color').onChange(GUIObjectChange); guiObject.add(guiObjData, 'opacity').min(0.0).max(1.0).step(0.05).name('Opacity').onChange(GUIObjectChange); } } function GUIObjectChange(v) { for (const f of this.object.obj.faces) { const m = scene.getObjectByName(f).material; if (this.property == 'color') { m.color.setStyle(v); } if (this.property == 'opacity') { m.opacity = v; m.transparent = (v != 1.0); } } if (this.property == 'opacity') { const m = this.object.obj.wirematerial; m.opacity = v; m.transparent = (v != 1.0); } requestRender(); } // Make simple orientation arrows and box - REF: http://jsfiddle.net/b97zd1a3/16/ const arrowCanvas = document.querySelector('#arrowCanvas'); const arrowRenderer = new THREE.WebGLRenderer({ alpha: true, canvas: arrowCanvas }); // clear arrowRenderer.setClearColor(0x000000, 0); arrowRenderer.setSize(arrowCanvas.clientWidth * window.devicePixelRatio, arrowCanvas.clientHeight * window.devicePixelRatio, false); const arrowScene = new THREE.Scene(); const arrowCamera = new THREE.PerspectiveCamera( 50, arrowCanvas.clientWidth / arrowCanvas.clientHeight, 1, 500 ); arrowCamera.up = persCamera.up; // important! const arrowPos = new THREE.Vector3(0, 0, 0); arrowScene.add(new THREE.ArrowHelper( new THREE.Vector3(1, 0, 0), arrowPos, 60, 0x7F2020, 20, 10)); arrowScene.add(new THREE.ArrowHelper( new THREE.Vector3(0, 1, 0), arrowPos, 60, 0x207F20, 20, 10)); arrowScene.add(new THREE.ArrowHelper( new THREE.Vector3(0, 0, 1), arrowPos, 60, 0x20207F, 20, 10)); arrowScene.add(new THREE.Mesh( new THREE.BoxGeometry(40, 40, 40), new THREE.MeshLambertMaterial( { color: 0xaaaaaa, flatShading: false }) )); arrowScene.add(new THREE.HemisphereLight(0xC7E8FF, 0xFFE3B3, 1.2)); // Controls const persControls = new OrbitControls(persCamera, renderer.domElement); persControls.target = camCenter; // rotate around center of parts // persControls.enablePan = false; // persControls.enableDamping = true; persControls.update(); const orthControls = new OrbitControls(orthCamera, renderer.domElement); orthControls.target = camCenter; // rotate around center of parts // orthControls.enablePan = false; // orthControls.enableDamping = true; orthControls.update(); function render() { renderRequested = false; persControls.update(); if (cameraType == 'Perspective') { arrowCamera.position.copy(persCamera.position); arrowCamera.position.sub(persControls.target); } orthControls.update(); if (cameraType == 'Orthographic') { arrowCamera.position.copy(orthCamera.position); arrowCamera.position.sub(orthControls.target); } arrowCamera.lookAt(arrowScene.position); arrowCamera.position.setLength(200); if (cameraType == 'Perspective') { renderer.render(scene, persCamera); } if (cameraType == 'Orthographic') { renderer.render(scene, orthCamera); } arrowRenderer.render(arrowScene, arrowCamera); }; function requestRender() { if (!renderRequested) { renderRequested = true; requestAnimationFrame(render); } } persControls.addEventListener('change', requestRender); orthControls.addEventListener('change', requestRender); renderer.domElement.addEventListener('mousemove', onMouseMove); window.addEventListener('resize', onMainCanvasResize, false); onMainCanvasResize(); requestRender(); function onMainCanvasResize() { const pixelRatio = window.devicePixelRatio; const width = canvas.clientWidth * pixelRatio | 0; const height = canvas.clientHeight * pixelRatio | 0; const needResize = canvas.width !== width || canvas.height !== height; const aspect = canvas.clientWidth / canvas.clientHeight; if (needResize) { renderer.setSize(width, height, false); // See https://stackoverflow.com/questions/39373113/three-js-resize-window-not-scaling-properly const change = originalAspect / aspect; const newSize = viewSize * change; orthCamera.left = -aspect * newSize / 2; orthCamera.right = aspect * newSize / 2; orthCamera.top = newSize / 2; orthCamera.bottom = -newSize / 2; orthCamera.updateProjectionMatrix(); persCamera.aspect = canvas.clientWidth / canvas.clientHeight; persCamera.updateProjectionMatrix(); } for (const obj of objects) { obj.wirematerial.resolution.set(width, height); } requestRender(); } // XXX use mouse click to toggle the gui for the selected object? function onMouseMove(e) { let c = false; if (cameraType == 'Orthographic') { c = orthCamera; } if (cameraType == 'Perspective') { c = persCamera; } if (!c) { return; } const raycaster = new THREE.Raycaster(); raycaster.setFromCamera(new THREE.Vector2( (e.clientX / canvas.clientWidth) * 2 - 1, -(e.clientY / canvas.clientHeight) * 2 + 1), c); const intersects = raycaster.intersectObjects(raycasterObj); let chosen = ''; for (const i of intersects) { const m = i.object.material; if (m.opacity > 0) { if (m.emissive.getHex() == 0x000000) { m.emissive.setHex( 0x777777 ); m.needsUpdate = true; requestRender(); } chosen = i.object.name; break; } } for (const r of raycasterObj) { if (r.name == chosen) { continue; } if (r.material.emissive.getHex() != 0x000000) { r.material.emissive.setHex(0x000000); r.material.needsUpdate = true; requestRender(); } } } </script> </body> </html> """) def export( exportList, filename, colors = None, camera = None ): """Exports objects to an html file""" global disableCompression, base, baseFloat data = { 'camera':{}, 'file':{}, 'objects':[] } if not FreeCADGui and not camera: camera = OfflineRenderingUtils.getCamera(FreeCAD.ActiveDocument.FileName) if camera: # REF: https://github.com/FreeCAD/FreeCAD/blob/master/src/Mod/Arch/OfflineRenderingUtils.py camnode = OfflineRenderingUtils.getCoinCamera(camera) cameraPosition = camnode.position.getValue().getValue() data['camera']['type'] = 'Orthographic' if 'PerspectiveCamera' in camera: data['camera']['type'] = 'Perspective' data['camera']['focalDistance'] = camnode.focalDistance.getValue() data['camera']['position_x'] = cameraPosition[0] data['camera']['position_y'] = cameraPosition[1] data['camera']['position_z'] = cameraPosition[2] else: v = FreeCADGui.ActiveDocument.ActiveView data['camera']['type'] = v.getCameraType() data['camera']['focalDistance'] = v.getCameraNode().focalDistance.getValue() data['camera']['position_x'] = v.viewPosition().Base.x data['camera']['position_y'] = v.viewPosition().Base.y data['camera']['position_z'] = v.viewPosition().Base.z # Take the objects out of groups objectslist = Draft.get_group_contents(exportList, walls=True, addgroups=False) # objectslist = Arch.pruneIncluded(objectslist) for obj in objectslist: # Pull all obj data before we dig down the links label = obj.Label color = '#cccccc'; opacity = 1.0 if FreeCADGui: color = Draft.getrgb(obj.ViewObject.ShapeColor, testbw = False) opacity = int((100 - obj.ViewObject.Transparency)/5) / 20 # 0>>1 with step of 0.05 elif colors: if label in colors: color = Draft.getrgb(colors[label], testbw = False) validObject = False if obj.isDerivedFrom('Mesh::Feature'): mesh = obj.Mesh validObject = True if obj.isDerivedFrom('Part::Feature'): objShape = obj.Shape validObject = True if obj.isDerivedFrom('App::Link'): linkPlacement = obj.LinkPlacement while True: # drill down to get to the actual obj if obj.isDerivedFrom("App::Link"): if obj.ViewObject.OverrideMaterial: color = Draft.getrgb(obj.ViewObject.ShapeMaterial.DiffuseColor, testbw = False) obj = obj.LinkedObject if hasattr(obj, "__len__"): FreeCAD.Console.PrintMessage(label + ": Sub-Links are Unsupported.\n") break elif obj.isDerivedFrom('Part::Feature'): objShape = obj.Shape.copy(False) objShape.Placement = linkPlacement validObject = True break elif obj.isDerivedFrom("Mesh::Feature"): mesh = obj.Mesh.copy() mesh.Placement = linkPlacement validObject = True break if not validObject: continue objdata = { 'name': label, 'color': color, 'opacity': opacity, 'verts':'', 'facets':'', 'wires':[], 'faceColors':[], 'facesToFacets':[], 'floats':[] } if obj.isDerivedFrom('Part::Feature'): deviation = 0.5 if FreeCADGui: deviation = obj.ViewObject.Deviation # obj.ViewObject.DiffuseColor is length=1 when all faces are the same color, length=len(faces) for when they're not if len(obj.ViewObject.DiffuseColor) == len(objShape.Faces): for fc in obj.ViewObject.DiffuseColor: objdata['faceColors'].append( Draft.getrgb(fc, testbw = False) ) # get verts and facets for ENTIRE object shapeData = objShape.tessellate( deviation ) mesh = Mesh.Mesh(shapeData) if len(objShape.Faces) > 1: # Map each Facet created by tessellate() to a Face so that it can be colored correctly using faceColors # This is done by matching the results of a tessellate() on EACH FACE to the overall tessellate stored in shapeData # if there is any error in matching these two then we display the whole object as one face and forgo the face colors for f in objShape.Faces: faceData = f.tessellate( deviation ) found = True for fv in range( len(faceData[0]) ): # face verts. List of type Vector() found = False for sv in range( len(shapeData[0]) ): #shape verts if faceData[0][fv] == shapeData[0][sv]: # do not use isEqual() here faceData[0][fv] = sv # replace with the index of shapeData[0] found = True break if not found: break if not found: FreeCAD.Console.PrintMessage("Facet to Face Mismatch.\n") objdata['facesToFacets'] = [] break # map each of the face facets to the shape facets and make a list of shape facet indices that belong to this face facetList = [] for ff in faceData[1]: # face facets found = False for sf in range( len(shapeData[1]) ): #shape facets if faceData[0][ff[0]] in shapeData[1][sf] and faceData[0][ff[1]] in shapeData[1][sf] and faceData[0][ff[2]] in shapeData[1][sf]: facetList.append(sf) found = True break if not found: break if not found: FreeCAD.Console.PrintMessage("Facet List Mismatch.\n") objdata['facesToFacets'] = [] break objdata['facesToFacets'].append( baseEncode(facetList) ) wires = [] # Add wires for f in objShape.Faces: for w in f.Wires: wo = Part.Wire(Part.__sortEdges__(w.Edges)) wire = [] for v in wo.discretize(QuasiDeflection = 0.005): wire.append( '{:.5f}'.format(v.x) ) # use strings to avoid 0.00001 written as 1e-05 wire.append( '{:.5f}'.format(v.y) ) wire.append( '{:.5f}'.format(v.z) ) wires.append( wire ) if not disableCompression: for w in range( len(wires) ): for wv in range( len(wires[w]) ): found = False for f in range( len(objdata['floats']) ): if objdata['floats'][f] == wires[w][wv]: wires[w][wv] = f found = True break if not found: objdata['floats'].append( wires[w][wv] ) wires[w][wv] = len(objdata['floats'])-1 wires[w] = baseEncode(wires[w]) objdata['wires'] = wires vIndex = {} verts = [] for p in range( len(mesh.Points) ): vIndex[ mesh.Points[p].Index ] = p verts.append( '{:.5f}'.format(mesh.Points[p].Vector.x) ) verts.append( '{:.5f}'.format(mesh.Points[p].Vector.y) ) verts.append( '{:.5f}'.format(mesh.Points[p].Vector.z) ) # create floats list to compress verts and wires being written into the JS if not disableCompression: for v in range( len(verts) ): found = False for f in range( len(objdata['floats']) ): if objdata['floats'][f] == verts[v]: verts[v] = f found = True break if not found: objdata['floats'].append( verts[v] ) verts[v] = len(objdata['floats'])-1 objdata['verts'] = baseEncode(verts) facets = [] for f in mesh.Facets: for i in f.PointIndices: facets.append( vIndex[i] ) objdata['facets'] = baseEncode(facets) # compress floats if not disableCompression: # use ratio of 7x base13 to 4x base90 because 13^7 ~ 90^4 fullstr = json.dumps(objdata['floats'], separators=(',', ':')) fullstr = fullstr.replace('[', '').replace(']', '').replace('"', '') floatStr = '' baseFloatCt = len(baseFloat) baseCt = len(base) for fs in range( 0, len(fullstr), 7 ): # chunks of 7 chars, skip the first one str7 = fullstr[fs:(fs+7)] quotient = 0 for s in range( len(str7) ): quotient += baseFloat.find(str7[s]) * pow(baseFloatCt, (6-s)) for v in range(4): floatStr += base[ quotient % baseCt ] quotient = int(quotient / baseCt) objdata['floats'] = floatStr data['objects'].append( objdata ) html = getHTMLTemplate() html = html.replace('$pagetitle',FreeCAD.ActiveDocument.Label) version = FreeCAD.Version() html = html.replace('$version',version[0] + '.' + version[1] + '.' + version[2]) # Remove data compression in JS data['compressed'] = not disableCompression data['base'] = base data['baseFloat'] = baseFloat html = html.replace('$data', json.dumps(data, separators=(',', ':')) ) # Shape Data if six.PY2: outfile = pythonopen(filename, "wb") else: outfile = pythonopen(filename, "w") outfile.write( html ) outfile.close() FreeCAD.Console.PrintMessage( translate("Arch", "Successfully written") + ' ' + filename + "\n" ) def baseEncode( arr ): """Compresses an array of ints into a base90 string""" global disableCompression, base if disableCompression: return arr if len(arr) == 0: return '' longest = 0 output = [] baseCt = len(base) for v in range( len(arr) ): buffer = '' quotient = arr[v] while True: buffer += base[ quotient % baseCt ] quotient = int(quotient / baseCt) if quotient == 0: break output.append( buffer ) if len(buffer) > longest: longest = len(buffer) output = [('{:>'+str(longest)+'}').format(x) for x in output] # pad each element return str(longest) + ('').join(output)
lgpl-2.1
5,400,546,240,878,188,000
48.273743
311
0.474444
false
4.505977
false
false
false
glemaitre/UnbalancedDataset
examples/over-sampling/plot_smote.py
2
2231
""" ===== SMOTE ===== An illustration of the SMOTE method and its variant. """ # Authors: Fernando Nogueira # Christos Aridas # Guillaume Lemaitre <[email protected]> # License: MIT import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.decomposition import PCA from imblearn.over_sampling import SMOTE print(__doc__) def plot_resampling(ax, X, y, title): c0 = ax.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0", alpha=0.5) c1 = ax.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1", alpha=0.5) ax.set_title(title) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.spines['left'].set_position(('outward', 10)) ax.spines['bottom'].set_position(('outward', 10)) ax.set_xlim([-6, 8]) ax.set_ylim([-6, 6]) return c0, c1 # Generate the dataset X, y = make_classification(n_classes=2, class_sep=2, weights=[0.3, 0.7], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=80, random_state=10) # Instanciate a PCA object for the sake of easy visualisation pca = PCA(n_components=2) # Fit and transform x to visualise inside a 2D feature space X_vis = pca.fit_transform(X) # Apply regular SMOTE kind = ['regular', 'borderline1', 'borderline2', 'svm'] sm = [SMOTE(kind=k) for k in kind] X_resampled = [] y_resampled = [] X_res_vis = [] for method in sm: X_res, y_res = method.fit_sample(X, y) X_resampled.append(X_res) y_resampled.append(y_res) X_res_vis.append(pca.transform(X_res)) # Two subplots, unpack the axes array immediately f, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2) # Remove axis for second plot ax2.axis('off') ax_res = [ax3, ax4, ax5, ax6] c0, c1 = plot_resampling(ax1, X_vis, y, 'Original set') for i in range(len(kind)): plot_resampling(ax_res[i], X_res_vis[i], y_resampled[i], 'SMOTE {}'.format(kind[i])) ax2.legend((c0, c1), ('Class #0', 'Class #1'), loc='center', ncol=1, labelspacing=0.) plt.tight_layout() plt.show()
mit
2,950,088,382,133,793,000
27.974026
76
0.619005
false
2.771429
false
false
false
bewantbe/eogRichExif
eogRichExif.py
1
12212
''' eogRichExif A eog (Eye of GNOME Image Viewer) plugin which shows many Exif info in side pane. Thanks to the eogMetaEdit plugin. ''' ''' eogRichExif is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. eogRichExif is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with eogRichExif. If not, see <http://www.gnu.org/licenses/>. ''' from gi.repository import GObject, Gtk, Eog from os.path import join, basename from urllib.parse import urlparse import xml.sax.saxutils import pyexiv2 import math class eogRichExif(GObject.Object, Eog.WindowActivatable): # Override EogWindowActivatable's window property # This is the EogWindow this plugin instance has been activated for window = GObject.property(type=Eog.Window) Debug = False def __init__(self): # will be execulted when activating GObject.Object.__init__(self) def do_activate(self): if self.Debug: print('The answer landed on my rooftop, whoa') # get sidebar self.sidebar = self.window.get_sidebar() # need to track file changes in the EoG thumbview (any better idea?) self.thumbview = self.window.get_thumb_view() # the EogImage selected in the thumbview self.thumbImage = None self.cb_ids = {} self.plugin_window = None # Python and GTK # https://python-gtk-3-tutorial.readthedocs.org/en/latest/introduction.html # http://www.pygtk.org/pygtk2tutorial/sec-Notebooks.html # http://gnipsel.com/glade/ builder = Gtk.Builder() builder.add_from_file(join(self.plugin_info.get_data_dir(),\ "eogRichExif.glade")) self.plugin_window = builder.get_object('eogRichExif') self.label_exif = builder.get_object('label_exif') # add dialog to the sidebar Eog.Sidebar.add_page(self.sidebar, "RichExif", self.plugin_window) self.cb_ids['selection-changed'] = {} self.cb_ids['selection-changed'][self.thumbview] = \ self.thumbview.connect('selection-changed', \ self.selection_changed_cb, self) def do_deactivate(self): '''remove all the callbacks stored in dict self.cb_ids ''' if self.Debug: print('The answer fell off my rooftop, woot') for S in self.cb_ids: for W, id in self.cb_ids[S].items(): W.disconnect(id) # Load metadata @staticmethod def selection_changed_cb(thumb, self): if self.Debug: print("--- dbg: in selection_changed_cb ---") # Get file path self.thumbImage = self.thumbview.get_first_selected_image() Event = Gtk.get_current_event() self.filePath = None self.fileURL = None if self.thumbImage != None: self.fileURL = self.thumbImage.get_uri_for_display() # https://docs.python.org/2/library/urlparse.html self.filePath = urlparse(self.fileURL).path if self.Debug: print('loading thumb meta: \n ', self.filePath, '\n URL: ', self.fileURL) else: if self.Debug: print('Fail to load metadata!') return False # Read metadata # http://python3-exiv2.readthedocs.org/en/latest/tutorial.html self.metadata = pyexiv2.ImageMetadata(self.filePath) try: self.metadata.read() except: self.metadata = None self.label_exif.set_markup("Cannot read metadata.\n self.filePath=%s" % self.filePath) return # try: self.set_info() # except KeyError as e: # self.label_exif.set_markup("Metadata incomplete?\n Error: {0}\n".format(e)) # return False to let any other callbacks execute as well return False def set_info(self): def is_integer(a): if math.fabs(a-math.floor(a+0.5)) < 1e-5: return True else: return False st_markup = '%s\n' % self.filePath; if 'Exif.Image.Model' in self.metadata: image_make = '' if 'Exif.Image.Make' in self.metadata: image_make = xml.sax.saxutils.escape(self.metadata['Exif.Image.Make'].value) + '\n ' image_model = xml.sax.saxutils.escape(self.metadata['Exif.Image.Model'].value) st_markup += '<b>Camera:</b>\n %s%s\n' % (image_make, image_model) # Time NO_TIME = '0000:00:00 00:00:00' s_time_tag = [ [NO_TIME, 'Exif.Image.DateTime', 'DateTime'], [NO_TIME, 'Exif.Image.DateTimeOriginal', 'DateTimeOriginal'], [NO_TIME, 'Exif.Photo.DateTimeOriginal', 'DateTimeOriginal'], [NO_TIME, 'Exif.Image.DateTimeDigitized', 'DateTimeDigitized'], [NO_TIME, 'Exif.Photo.DateTimeDigitized', 'DateTimeDigitized']] for idx, ttag in enumerate(s_time_tag): if ttag[1] in self.metadata: s_time_tag[idx][0] = self.metadata[ttag[1]].value # remove nonsence data s_time_tag = list(filter(lambda x: x[0]!=NO_TIME, s_time_tag)) if len(set([r[0] for r in s_time_tag])) > 1: # time are different for ttag in s_time_tag: st_markup += '<b>%s:</b>\n<tt> %s</tt>\n' % (ttag[2], ttag[0].strftime('%Y-%m-%d %H:%M:%S')) elif len(s_time_tag) == 0: st_markup += '<b>DateTime:</b>\n<tt> ??</tt>\n' else: # unique time st_markup += '<b>DateTime:</b>\n<tt> %s</tt>\n' % (s_time_tag[0][0].strftime('%Y-%m-%d %H:%M:%S')) # ExposureTime if 'Exif.Photo.ExposureTime' in self.metadata: st_exposure_time = self.metadata['Exif.Photo.ExposureTime'].human_value else: st_exposure_time = '?? s' # FNumber if 'Exif.Photo.FNumber' in self.metadata: f_number = self.metadata['Exif.Photo.FNumber'].human_value elif 'Exif.Photo.ApertureValue' in self.metadata: f_number = self.metadata['Exif.Photo.ApertureValue'].human_value else: f_number = 'F??' # ISO iso = '' if 'Exif.Photo.ISOSpeedRatings' in self.metadata: iso = self.metadata['Exif.Photo.ISOSpeedRatings'].human_value else: if 'Exif.Nikon3.ISOSettings' in self.metadata: iso = self.metadata['Exif.Nikon3.ISOSettings'].human_value if 'Exif.NikonIi.ISO' in self.metadata: iso = self.metadata['Exif.NikonIi.ISO'].human_value # extra ISO if 'Exif.NikonIi.ISOExpansion' in self.metadata: iso_ext = self.metadata['Exif.NikonIi.ISOExpansion'].human_value if 'off' in iso_ext.lower(): iso += '' # do nothing else: iso += '(%s)' % iso_ext st_markup += '<b>Exposure:</b>\n' st_markup += '<tt> %s, %s</tt>\n' % (st_exposure_time, f_number) st_markup += '<tt> ISO %s</tt>\n' % (iso) # Focal Length if 'Exif.Photo.FocalLength' in self.metadata: st_focal_length = "%.1f mm" % self.metadata['Exif.Photo.FocalLength'].value.__float__() else: st_focal_length = "?? mm" if 'Exif.Photo.FocalLengthIn35mmFilm' in self.metadata: st_focal_length_35mm = "%.1f mm (35mm)" % self.metadata['Exif.Photo.FocalLengthIn35mmFilm'].value.__float__() else: st_focal_length_35mm = '?? mm (35mm)' st_markup += '<tt> %s</tt>\n' % (st_focal_length) st_markup += '<tt> %s</tt>\n' % (st_focal_length_35mm) if 'Exif.Photo.Flash' in self.metadata: st_markup += '<b>Flash:</b>\n' st_markup += ' %s\n' % self.metadata['Exif.Photo.Flash'].human_value def sign(a): return (a > 0) - (a < 0) # White Balance st_markup += '<b>WhiteBalance:</b>\n' if 'Exif.Nikon3.WhiteBalance' in self.metadata: wb_extra = self.metadata['Exif.Nikon3.WhiteBalance'].human_value.strip() if 'Exif.Nikon3.WhiteBalanceBias' in self.metadata: v = self.metadata['Exif.Nikon3.WhiteBalanceBias'].value wb_extra += ', Bias: %s:%d, %s:%d' % (('A','_','B')[sign(v[0])+1], abs(v[0]), ('M','_','G')[sign(v[1])+1], abs(v[1])) st_markup += ' %s\n' % wb_extra elif 'Exif.CanonPr.WhiteBalanceRed' in self.metadata: wb_extra = self.metadata['Exif.Photo.WhiteBalance'].human_value.strip() v_r = self.metadata['Exif.CanonPr.WhiteBalanceRed'].value v_b = self.metadata['Exif.CanonPr.WhiteBalanceBlue'].value wb_extra += ', Bias: R:%d, B:%d' % (v_r, v_b) # not sure the logic if 'Manual' in wb_extra: v_t = self.metadata['Exif.CanonPr.ColorTemperature'].value wb_extra += ', %dK' % v_t st_markup += ' %s\n' % wb_extra else: if 'Exif.Photo.WhiteBalance' in self.metadata: wb = self.metadata['Exif.Photo.WhiteBalance'].human_value else: wb = '' st_markup += ' %s\n' % wb # Focus Mode if 'Exif.Nikon3.Focus' in self.metadata: st_markup += '<b>Focus Mode:</b>\n' st_markup += ' %s\n' % self.metadata['Exif.Nikon3.Focus'].value.strip() if 'Exif.NikonAf2.ContrastDetectAF' in self.metadata: st_cdaf = self.metadata['Exif.NikonAf2.ContrastDetectAF'].human_value if 'on' in st_cdaf.lower(): st_markup += ' ContrastDetectAF:\n %s\n' % st_cdaf if 'Exif.NikonAf2.PhaseDetectAF' in self.metadata: st_pdaf = self.metadata['Exif.NikonAf2.PhaseDetectAF'].human_value if 'on' in st_pdaf.lower(): st_markup += ' PhaseDetectAF:\n %s\n' % st_pdaf if 'Exif.Sony1.FocusMode' in self.metadata: st_markup += '<b>Focus Mode:</b>\n' st_markup += ' %s\n' % self.metadata['Exif.Sony1.FocusMode'].human_value.strip() st_markup += ' %s\n' % self.metadata['Exif.Sony1.AFMode'].human_value.strip() if 'Exif.CanonCs.FocusMode' in self.metadata: st_markup += '<b>Focus Mode:</b>\n' st_markup += ' %s\n' % self.metadata['Exif.CanonCs.FocusMode'].human_value.strip() st_markup += ' FocusType: %s\n' % self.metadata['Exif.CanonCs.FocusType'].human_value.strip() st_markup += '<b>Extra settings:</b>\n' s_tag_name_extra = [ ('Exif.Photo.ExposureBiasValue', 'Exposure Bias Value'), ('Exif.Photo.ExposureProgram', 'Exposure Program'), ('Exif.Photo.MeteringMode', 'Metering Mode'), ('Exif.Photo.SceneCaptureType', 'Scene Capture Type'), ('Exif.Photo.ColorSpace', 'Color Space'), # Nikon ('Exif.Nikon3.ActiveDLighting', 'DLighting'), ('Exif.NikonVr.VibrationReduction', 'Vibration Reduction'), ('Exif.Nikon3.NoiseReduction', 'Noise Reduction'), ('Exif.Nikon3.HighISONoiseReduction', 'High ISO Noise Reduction'), ('Exif.Nikon3.ShootingMode', 'Shooting Mode'), # Canon ('Exif.CanonFi.NoiseReduction', 'Noise Reduction'), # Sony ('Exif.Sony1.AutoHDR', 'Auto HDR'), ('Exif.Sony1.LongExposureNoiseReduction', 'LongExposureNoiseReduction') ] for tag_name in s_tag_name_extra: if tag_name[0] in self.metadata: st_markup += ' <i>%s:</i>\n %s\n' % \ (tag_name[1], self.metadata[tag_name[0]].human_value) st_markup += '<b>Lens:</b>\n' s_tag_name_lens = [ ('Exif.NikonLd3.FocalLength', 'Focal Length'), ('Exif.NikonLd3.AFAperture', 'AFAperture'), ('Exif.NikonLd3.FocusDistance', 'Focus Distance'), ] for tag_name in s_tag_name_lens: if tag_name[0] in self.metadata: st_markup += ' <i>%s:</i> %s\n' % \ (tag_name[1], self.metadata[tag_name[0]].human_value) st_markup += '<b>Lens Model:</b>\n' if 'Exif.Nikon3.Lens' in self.metadata: st_markup += ' %s\n' % self.metadata['Exif.Nikon3.Lens'].human_value if 'Exif.Canon.LensModel' in self.metadata: st_markup += ' %s\n' % self.metadata['Exif.Canon.LensModel'].human_value if 'Exif.Photo.LensModel' in self.metadata: st_markup += ' %s\n' % self.metadata['Exif.Photo.LensModel'].human_value if 'Exif.GPSInfo.GPSLatitudeRef' in self.metadata: lr = self.metadata['Exif.GPSInfo.GPSLatitudeRef'].value lv = self.metadata['Exif.GPSInfo.GPSLatitude'].value ar = self.metadata['Exif.GPSInfo.GPSLongitudeRef'].value av = self.metadata['Exif.GPSInfo.GPSLongitude'].value st_markup += '<b>GPS:</b>\n %.0f° %.0f\' %.2f" %s,\n %.0f° %.0f\' %.2f" %s,\n' % \ (float(lv[0]), float(lv[1]), float(lv[2]), lr, \ float(av[0]), float(av[1]), float(av[2]), ar) st_markup += ' %s %s.\n' % (self.metadata['Exif.GPSInfo.GPSAltitude'].human_value,\ self.metadata['Exif.GPSInfo.GPSAltitudeRef'].human_value) previews = self.metadata.previews st_markup += '<b>Number of thumbnails:</b>\n <tt>%d</tt>\n' % len(previews) # if 'NIKON' in image_make: # if ('Exif.Photo.UserComment' in self.metadata): # st_markup += '<b>UserComment:</b>\n <tt>%s</tt>\n' % self.metadata['Exif.Photo.UserComment'].human_value self.label_exif.set_markup(st_markup)
gpl-3.0
-9,212,282,907,351,276,000
36.453988
121
0.661507
false
2.663613
false
false
false
razz0/DataMining
apriori_sequential.py
1
8210
"""Implementation of the Apriori algorithm for sequential patterns, F(k-1) x F(k-1) variant. Model sequences like ((1, 2, 3), (4, 5), (4, 6)). To get course sequences with empty elements as (0,): course_seqs = [x.course_sequence for x in s.students] course_seqs2 = [tuple([seq or (0,) for seq in x.course_sequence]) for x in s.students] """ from collections import defaultdict from pprint import pprint import copy def flatten(sequence): """Flatten events in sequence elements to list of events""" return [event for element in sequence for event in element] def is_subsequence(seq1, seq2): """Check if seq1 is a subsequence of seq2 >>> is_subsequence(((2,), (3, 5)), ((2, 4), (3, 5, 6), (8,))) True >>> is_subsequence(((1,), (2,)), ((1, 2), (3, 4))) False >>> is_subsequence(((2,), (4,)), ((2, 4), (2, 4), (2, 5))) True """ seq = copy.deepcopy(seq1) for element in seq2: if seq and set(seq[0]) <= set(element): seq = seq[1:] return True if not seq else False def support_count(sequence, seq_list): """ Count support count for sequence :param itemset: items to measure support count for :param transactions: list of sets (all transactions) >>> simple_seqs = [((1,), (2, 3)), ((2,), (3,)), ((2, 4,),), ((4,),)] >>> [support_count(((item,),), simple_seqs) for item in range(1, 5)] [1, 3, 2, 2] """ return len([seq for seq in seq_list if is_subsequence(sequence, seq)]) def _sequential_candidate_generation(sequences, k): """ Generate candidate sequences of length k. :param sequences: list of sequences containing elements containing events :param k: > 1 >>> pprint(_sequential_candidate_generation([(('A',),), (('B',),), (('C',),)], 2)) [(('A',), ('A',)), (('A',), ('B',)), (('A', 'B'),), (('A',), ('C',)), (('A', 'C'),), (('B',), ('A',)), (('B',), ('B',)), (('B',), ('C',)), (('B', 'C'),), (('C',), ('A',)), (('C',), ('B',)), (('C',), ('C',))] >>> _sequential_candidate_generation([(('A', 'B'),), (('A', 'C'),), (('B',), ('C',))], 3) [(('A', 'B'), ('C',))] >>> _sequential_candidate_generation([(('A',), ('B',)), (('A', 'C'),), (('B', 'C'),), (('C', 'C'),)], 3) [(('A',), ('B', 'C')), (('A', 'C', 'C'),), (('B', 'C', 'C'),)] >>> pprint(_sequential_candidate_generation([((1,),), ((2,),), ((3,),)], 2)) [((1,), (1,)), ((1,), (2,)), ((1, 2),), ((1,), (3,)), ((1, 3),), ((2,), (1,)), ((2,), (2,)), ((2,), (3,)), ((2, 3),), ((3,), (1,)), ((3,), (2,)), ((3,), (3,))] >>> _sequential_candidate_generation([((1,), (2,)), ((2,), (3,))], 3) [((1,), (2,), (3,))] """ new_candidates = [] for index1, seq1 in enumerate(sequences): for index2, seq2 in enumerate(sequences): if k == 2: # Assume we get 1-sequences like we should new_candidates.append((seq1[0], seq2[0],)) if seq1[0] < seq2[0]: new_candidates.append(((seq1[0] + seq2[0]),)) elif k > 2: seq1_flattened = flatten(seq1) seq2_flattened = flatten(seq2) if index1 == index2: continue if seq1_flattened[1:] == seq2_flattened[:-1]: new_sequence = copy.deepcopy(seq1) if len(seq2[-1]) > 1: new_sequence = new_sequence[:-1] + (new_sequence[-1] + (seq2_flattened[-1],),) else: new_sequence += (seq2[-1],) new_candidates.append(new_sequence) return new_candidates def get_subsequences(sequence): """ Get length k-1 subsequences of length k sequence >>> get_subsequences((('A', 'B'), ('C',))) [(('A', 'B'),), (('A',), ('C',)), (('B',), ('C',))] >>> get_subsequences((('A', 'B'), ('C',), ('D', 'E'))) [(('A', 'B'), ('C',), ('D',)), (('A', 'B'), ('C',), ('E',)), (('A', 'B'), ('D', 'E')), (('A',), ('C',), ('D', 'E')), (('B',), ('C',), ('D', 'E'))] :rtype : tuple :return: """ subseqs = [] for i in reversed(range(0, len(sequence))): element = sequence[i] for j in reversed(range(0, len(element))): event = element[j] if len(element) == 1: subseq = sequence[:i] + sequence[(i + 1):] else: subseq = list(sequence) subseq[i] = subseq[i][:j] + subseq[i][(j + 1):] subseqs.append(tuple(subseq)) return subseqs def apriori_sequential(sequences, minsup, fixed_k=None, verbose=False): """ Apriori method for sequential patterns :param transactions: list of iterables (list of transactions containing items) :param all_items: list distinct items :param minsup: minimum support >>> seqs = [((1, 2, 4), (2, 3), (5,)), \ ((1, 2), (2, 3, 4)), \ ((1, 2), (2, 3, 4), (2, 4, 5)), \ ((2,), (3, 4), (4, 5)), \ ((1, 3), (2, 4, 5))] >>> pprint(apriori_sequential(seqs, 0.8)) [{((1,),): 0.80000000000000004}, {((2,),): 1.0}, {((3,),): 1.0}, {((4,),): 1.0}, {((5,),): 0.80000000000000004}, {((1,), (2,)): 0.80000000000000004}, {((2,), (3,)): 0.80000000000000004}, {((2, 4),): 0.80000000000000004}, {((3,), (5,)): 0.80000000000000004}] >>> seqs = [((1,), (), (), (2,), (), (), (3,)), \ ((1, 2,), (), (2,3 ), (2,), (), (3,), ()), \ ((1,), (2,), (), (2,), (3,), (3,), (2, 3, 4))] """ k = 1 N = len(sequences) frequent_sequences = [[], []] # k index, zero always empty support = defaultdict(int) if verbose: print 'Initializing length 1 frequent sequences...' for seq in sequences: events = sorted(set(flatten(seq))) for event in events: event_seq = ((event,),) if event_seq not in support: support[event_seq] = support_count(event_seq, sequences) #print "k==1, event seq: %s - support: %s" % (event_seq, support[event_seq]) if support[event_seq] >= N * minsup and event_seq not in frequent_sequences[1]: frequent_sequences[1].append(event_seq) if verbose: print 'Initialized %s 1-sequences' % len(frequent_sequences[1]) print 'Generating longer frequent sequences...' pruned_candidates = ['dummy', 'dummy'] while pruned_candidates and len(pruned_candidates) > 1 and (not fixed_k or k < fixed_k): k += 1 candidate_seqs = _sequential_candidate_generation(frequent_sequences[k - 1], k) if verbose: print 'k=%s - candidate sequence count %s' % (k, len(candidate_seqs),) if not candidate_seqs: break pruned_candidates = [] for can_seq in candidate_seqs: subseqs = get_subsequences(can_seq) if all([subseq in frequent_sequences[k - 1] for subseq in subseqs]) and can_seq not in pruned_candidates: pruned_candidates.append(can_seq) for pruned_index, pruned_seq in enumerate(pruned_candidates): if verbose and k > 3 and len(pruned_candidates) > 50 \ and pruned_index % (1 + len(pruned_candidates) / 5) == 0: print 'Candidate %s / %s' % (pruned_index, len(pruned_candidates)) for seq in sequences: if is_subsequence(pruned_seq, seq): support[pruned_seq] += 1 frequent_sequences.append([seq for seq in pruned_candidates if support[seq] >= N * minsup]) if fixed_k: try: freq_items = [{freqseq: support[freqseq] / float(N)} for freqseq in frequent_sequences[fixed_k]] except IndexError: return [] else: freq_items = [{freqseq: support[freqseq] / float(N)} for freq_k in frequent_sequences for freqseq in freq_k] return freq_items if __name__ == "__main__": print 'Running doctests' import doctest res = doctest.testmod() if not res[0]: print 'OK!'
gpl-2.0
133,243,926,120,616,290
32.92562
150
0.487576
false
3.44958
false
false
false
batermj/algorithm-challenger
code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_json/test_decode.py
1
4355
import decimal from io import StringIO from collections import OrderedDict from test.test_json import PyTest, CTest class TestDecode: def test_decimal(self): rval = self.loads('1.1', parse_float=decimal.Decimal) self.assertTrue(isinstance(rval, decimal.Decimal)) self.assertEqual(rval, decimal.Decimal('1.1')) def test_float(self): rval = self.loads('1', parse_int=float) self.assertTrue(isinstance(rval, float)) self.assertEqual(rval, 1.0) def test_empty_objects(self): self.assertEqual(self.loads('{}'), {}) self.assertEqual(self.loads('[]'), []) self.assertEqual(self.loads('""'), "") def test_object_pairs_hook(self): s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4), ("qrt", 5), ("pad", 6), ("hoy", 7)] self.assertEqual(self.loads(s), eval(s)) self.assertEqual(self.loads(s, object_pairs_hook=lambda x: x), p) self.assertEqual(self.json.load(StringIO(s), object_pairs_hook=lambda x: x), p) od = self.loads(s, object_pairs_hook=OrderedDict) self.assertEqual(od, OrderedDict(p)) self.assertEqual(type(od), OrderedDict) # the object_pairs_hook takes priority over the object_hook self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p)) # check that empty object literals work (see #17368) self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict), OrderedDict()) self.assertEqual(self.loads('{"empty": {}}', object_pairs_hook=OrderedDict), OrderedDict([('empty', OrderedDict())])) def test_decoder_optimizations(self): # Several optimizations were made that skip over calls to # the whitespace regex, so this test is designed to try and # exercise the uncommon cases. The array cases are already covered. rval = self.loads('{ "key" : "value" , "k":"v" }') self.assertEqual(rval, {"key":"value", "k":"v"}) def check_keys_reuse(self, source, loads): rval = loads(source) (a, b), (c, d) = sorted(rval[0]), sorted(rval[1]) self.assertIs(a, c) self.assertIs(b, d) def test_keys_reuse(self): s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]' self.check_keys_reuse(s, self.loads) decoder = self.json.decoder.JSONDecoder() self.check_keys_reuse(s, decoder.decode) self.assertFalse(decoder.memo) def test_extra_data(self): s = '[1, 2, 3]5' msg = 'Extra data' self.assertRaisesRegex(self.JSONDecodeError, msg, self.loads, s) def test_invalid_escape(self): s = '["abc\\y"]' msg = 'escape' self.assertRaisesRegex(self.JSONDecodeError, msg, self.loads, s) def test_invalid_input_type(self): msg = 'the JSON object must be str' for value in [1, 3.14, [], {}, None]: self.assertRaisesRegex(TypeError, msg, self.loads, value) def test_string_with_utf8_bom(self): # see #18958 bom_json = "[1,2,3]".encode('utf-8-sig').decode('utf-8') with self.assertRaises(self.JSONDecodeError) as cm: self.loads(bom_json) self.assertIn('BOM', str(cm.exception)) with self.assertRaises(self.JSONDecodeError) as cm: self.json.load(StringIO(bom_json)) self.assertIn('BOM', str(cm.exception)) # make sure that the BOM is not detected in the middle of a string bom_in_str = '"{}"'.format(''.encode('utf-8-sig').decode('utf-8')) self.assertEqual(self.loads(bom_in_str), '\ufeff') self.assertEqual(self.json.load(StringIO(bom_in_str)), '\ufeff') def test_negative_index(self): d = self.json.JSONDecoder() self.assertRaises(ValueError, d.raw_decode, 'a'*42, -50000) def test_deprecated_encode(self): with self.assertWarns(DeprecationWarning): self.loads('{}', encoding='fake') class TestPyDecode(TestDecode, PyTest): pass class TestCDecode(TestDecode, CTest): pass
apache-2.0
4,445,666,602,125,950,000
41.281553
77
0.583008
false
3.608119
true
false
false
kishori82/MetaPathways_Python.3.0
utilities/compute_status.py
1
18299
#!/usr/bin/python # File created on 27 Jan 2012. from __future__ import division __author__ = "Kishori M Konwar" __copyright__ = "Copyright 2013, MetaPathways" __credits__ = ["r"] __version__ = "1.0" __maintainer__ = "Kishori M Konwar" __status__ = "Release" try: import sys, os, re, math, gzip from glob import glob from os import makedirs, sys, remove, rename, path from optparse import OptionParser except: print """ Could not load some user defined module functions""" print """ Make sure your typed 'source MetaPathwaysrc'""" print """ """ sys.exit(3) DBLIST = ["COG-14-2016-10-20", "kegg-uniprot-2016-10-20", "metacyc-2016-10-31", "refseq-2016-10-06-rel-78", "eggnog-v4-2016-10-30"] MAX_NUM = 10000000 def printf(fmt, *args): sys.stdout.write(fmt % args) sys.stdout.flush() def eprintf(fmt, *args): sys.stderr.write(fmt % args) sys.stderr.flush() class FastaRecord(): def __init__(self, longname, sequence): self.longname = longname self.sequence = sequence fields = [ x.strip() for x in self.longname.split(' ') ] if len(fields) > 0: self.name = fields[0] else: self.name = None class FastaReader(): """Parses a fasta record from a string or file.""" stop = False START_PATTERN = re.compile(r'^>') name = None future_name =None sequence="" def __init__(self, fasta_filename): try: self.file = open(fasta_filename, 'r') except IOError: print "Cannot open fasta file " + fasta_filename def __iter__(self): return self def close(self): self.file.close() def next(self): if self.stop: raise StopIteration try: if not self.name: self.name = self.file.readline().strip() line = self.file.readline() except: line = None if not line: self.stop = True raise StopIteration fragments = [] while line and not self.START_PATTERN.search(line): fragments.append(line.strip()) line = self.file.readline() # print line if self.future_name: self.name = self.future_name if line: self.future_name = line.strip() self.sequence =''.join(fragments) self.seqname = self.name return FastaRecord(self.name, self.sequence) usage= sys.argv[0] + """ -i file.fna """ parser = None def createParser(): global parser epilog = """ This script computes the sequence stats for the fasta files """ epilog = re.sub(r'[ \t\f\v]+',' ', epilog) parser = OptionParser(usage=usage, epilog=epilog) parser.add_option("-f", dest="folders", action='append', help='add the folder to be examined, it expects a input and output folders under this folder') parser.add_option("-s", dest="stages", default=[], action='append', help=''' INPUT : 1\n ORFs : 2\n B/LAST : 3\n PARSE : 4\n ANNOT : 5\n PGDB : 6\n add the folder to be examined''') parser.add_option("-t", dest="type", default='1', choices=['1', '2', '3', '4'], help=''' present : 1 isNonEmpty : 2 num lines : 3 file size : 4 turns on the cumulative mod''') parser.add_option("-c", action="store_false", dest="cumul", default=True, help="print the preceeding stages") parser.add_option("-m", dest="top", type='int', default=100000000, help='max number of samples to read [default : 100000000]') parser.add_option("-n", dest="max_num", type='int', default=100000000, help='max number of items to count to save time [default : 100000000]') def valid_arguments(opts, args): state = True if opts.folders == None : print 'ERROR: Did not specify any folder' state = False return state def isAminoAcidSequence(sequence): if sequence: count = 0 list = [ 'a', 't', 'c', 'g', 'A', 'T', 'C', 'G'] for x in sequence: if x in list: count+=1 if count/len(sequence) < 0.80: return True else: return False return True def filter_sequence(sequence): if isAminoAcidSequence(sequence): return sequence sequence = re.sub(r'[^atcgATCG]','-', sequence.strip()) subsequences = sequence.split('-') max_length = 0; longest_sequence = ""; for seq in subsequences: if len(seq) > max_length : longest_sequence = seq max_length = len(seq) return longest_sequence class FastaRecord(object): def __init__(self, name, sequence): self.name = name self.sequence = sequence # return FastaRecord(title, sequence) def read_fasta_records(input_file): records = [] sequence="" name="" while 1: line = input_file.readline() if line == "": if sequence!="" and name!="": records.append(FastaRecord(name, sequence)) return records if line=='\n': continue line = line.rstrip() if line.startswith(">") : if sequence!="" and name!="": records.append(FastaRecord(name, sequence)) name = line.rstrip() sequence ="" else: sequence = sequence + line.rstrip() return records def numLinesPf(filename): global MAX_NUM count = 0 commPATT = re.compile(r'^NAME') fh = None if path.exists(filename): fh = open(filename) elif path.exists(filename + ".gz"): fh = gzip.open(filename + ".gz") else: return 0 for line in fh: if commPATT.search(line): count += 1 if count > MAX_NUM: break fh.close() return count def numLines(filename): global MAX_NUM count = 0 commPATT = re.compile(r'^#') fh = None if path.exists(filename): fh = open(filename) elif path.exists(filename + ".gz"): fh = gzip.open(filename + ".gz") else: return 0 for line in fh: if not commPATT.search(line): count += 1 if count > MAX_NUM: break fh.close() return count def numSeqFasta(file): """ process one fasta sequence at a time """ global MAX_NUM fastareader= FastaReader(file) count = 0 for record in fastareader: seqname = record.name seq = record.sequence length = len(seq) count += 1 if count > MAX_NUM: break fastareader.close() return count def maxSizeFasta(file): """ process one fasta sequence at a time """ fastareader= FastaReader(file) max_length=0 count = 0 for record in fastareader: if count > 10000: break seqname = record.name seq = record.sequence length = len(seq) count += 1 if length > max_length: max_length =length fastareader.close() return max_length def avgSizeFasta(file): """ process one fasta sequence at a time """ fastareader= FastaReader(file) tot_length=0 count = 0 for record in fastareader: if count > 10000: break seqname = record.name seq = record.sequence length = len(seq) tot_length += length count += 1 fastareader.close() avg = tot_length/count return avg def extractSampleName(name): sample_name = name sample_name = re.sub(r'^.*/','',sample_name, re.I) sample_name = re.sub(r'^.*\\','',sample_name, re.I) sample_name = re.sub(r'\.fasta$','',sample_name, re.I) sample_name = re.sub(r'\.fna$','',sample_name, re.I) sample_name = re.sub(r'\.faa$','',sample_name, re.I) sample_name = re.sub(r'\.fas$','',sample_name, re.I) sample_name = re.sub(r'\.fa$','',sample_name, re.I) return sample_name def add_samples(folder, folders_samples, top): files = glob( folder + '/input/*.fasta') count = top for file in sorted(files): if count==0: break count -= 1 sample_name = extractSampleName(file) if not folder in folders_samples: folders_samples[folder] = {} folders_samples[folder][sample_name] = {} def check_file(file): if not path.exists(file): return path.exists(file + '.gz') return path.exists(file) def isNotEmpty(file): size = 0 if path.exists(file): try: size = path.getsize(file) except: pass return size def check_type1(folders_samples, folder, stages, top): count = top for sample in sorted(folders_samples[folder].keys()): if count==0: break count -=1 if '1' in stages: status = check_file(folder + '/input/' + sample+'.fasta') if status: folders_samples[folder][sample]['1'] = 'Present' else: folders_samples[folder][sample]['1'] = 'Absent' if '2' in stages: filename =folder + '/output/' + sample+ '/orf_prediction/' + sample + '.qced.faa' status = check_file(filename) if status: folders_samples[folder][sample]['2'] = 'Present' else: folders_samples[folder][sample]['2'] = 'Absent' for db in get_db_names(stages, '3'): filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout" status = check_file(filename) if status: folders_samples[folder][sample]['3:' +db] = 'Present' else: folders_samples[folder][sample]['3:' + db] = 'Absent' for db in get_db_names(stages, '4'): filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout.parsed.txt" status = check_file(filename) if status: folders_samples[folder][sample]['4:' + db] = 'Present' else: folders_samples[folder][sample]['4:' + db] = 'Absent' def check_type2(folders_samples, folder, stages, top): count = top for sample in sorted(folders_samples[folder].keys()): if count==0: break count -= 1 if '1' in stages: filename = folder + '/input/' + sample+'.fasta' size = isNotEmpty(filename) folders_samples[folder][sample]['1'] = size if '2' in stages: filename =folder + '/output/' + sample+ '/orf_prediction/' + sample + '.qced.faa' size = isNotEmpty(filename) folders_samples[folder][sample]['2'] = size for db in get_db_names(stages, '3'): filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout" size = isNotEmpty(filename) if size: folders_samples[folder][sample]['3:' + db ] = int(size) else: folders_samples[folder][sample]['3:' + db] = int(size) for db in get_db_names(stages, '4'): filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout.parsed.txt" size = isNotEmpty(filename) if size: folders_samples[folder][sample]['4:' + db ] = int(size) else: folders_samples[folder][sample]['4:' + db ] = int(size) def check_type3(folders_samples, folder, stages, top): count = top i = 1 for sample in sorted(folders_samples[folder].keys()): if count==0: break count -= 1 eprintf(" %3d\t%s\n",i, sample) i+=1 if '1' in stages: filename = folder + '/input/' + sample+'.fasta' size = numSeqFasta(filename) folders_samples[folder][sample]['1'] = int(size) if '2' in stages: filename =folder + '/output/' + sample+ '/orf_prediction/' + sample + '.qced.faa' size = numLines(filename) folders_samples[folder][sample]['2'] = size for db in get_db_names(stages, '3'): filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout" size = numLines(filename) folders_samples[folder][sample]['3:' + db ] = int(size) for db in get_db_names(stages, '4'): filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout.parsed.txt" size = numLines(filename) folders_samples[folder][sample]['4:' + db ] = int(size) if '5' in stages: filename =folder + '/output/' + sample+ '/results/annotation_table/' + sample + ".ORF_annotation_table.txt" size = numLines(filename) folders_samples[folder][sample]['5'] = int(size) if '6' in stages: filename =folder + '/output/' + sample+ '/results/annotation_table/' + sample + ".functional_and_taxonomic_table.txt" size = numLines(filename) folders_samples[folder][sample]['6'] = int(size) if '7' in stages: filename =folder + '/output/' + sample+ '/ptools/' + "0.pf" size = numLinesPf(filename) folders_samples[folder][sample]['7'] = int(size) def get_db_names(stages, c): threePATT = re.compile(r'^' + c + ':') stage_suffix = {} for stage in stages.keys(): result = threePATT.search(stage) if result: fields = [x.strip() for x in stage.split(':') ] stage_suffix[fields[1]] = True return sorted(stage_suffix.keys()) def check_type(folder_samples, folder, stages, type, top): if type=='1': check_type1(folder_samples, folder, stages, top) return if type=='2': check_type2(folder_samples, folder, stages, top) return if type=='3': check_type3(folder_samples, folder, stages, top) return def print_status(folders_samples, folder, sample, stage, type) : # print folders_samples printf('\t' + str(folders_samples[folder][sample][stage])) # the main function SIZE = 1000 def main(argv, errorlogger = None, runstatslogger = None): global parser (opts, args) = parser.parse_args(argv) if not valid_arguments(opts, args): print usage sys.exit(0) stages = {} for stage in opts.stages: stages[stage] = True type = opts.type # adding the sampls folders_samples = {} for folder in opts.folders: add_samples(folder, folders_samples, opts.top) #MAX_NUM global MAX_NUM MAX_NUM = opts.max_num # stage processing count = opts.top for folder in opts.folders: eprintf("%s\n",folder) check_type(folders_samples, folder, stages, opts.type, opts.top) printf("#FOLDER\tSAMPLE") if '1' in stages: printf("\tINPUT_FILE") if '2' in stages: printf("\tORF_FILE") for db in get_db_names(stages, '3'): if '3:'+db in stages: printf("\t" + db + ".LASTout") for db in get_db_names(stages, '4'): if '4:'+db in stages: printf("\t" + db + ".LASTout.parsed.txt") if '5' in stages: printf("\tORF_ANNOTATION") if '6' in stages: printf("\tTAXONOMIC_FUNCTIONAL") if '7' in stages: printf("\tPTOOLS_ORF") printf("\n") status1 = '' status2 = '' if type=='1': status1 = 'Y/N' status2 = 'Y/N' if type=='2': status1 = 'Size' status2 = 'Size' if type=='3': status1 = 'Avg Len' status2 = 'Num Lines' printf("#Name\tName") if '1' in stages: printf("\t"+ status1 + "(1)") if '2' in stages: printf("\t"+ status2 + "(2)") for db in get_db_names(stages, '3'): if '3:'+db in stages: printf("\t"+ status2 + "(3)") for db in get_db_names(stages, '4'): if '4:'+db in stages: printf("\t"+ status2 + "(4)" ) if '5' in stages: printf("\t"+ status2 + "(5)") if '6' in stages: printf("\t"+ status2 + "(6)") if '7' in stages: printf("\t"+ status2 + "(7)") printf("\n") count = opts.top for folder in opts.folders: for sample in sorted(folders_samples[folder].keys()): if count ==0: break count -= 1 printf("%s\t%s",folder, sample) if '1' in stages: print_status(folders_samples, folder, sample, '1', opts.type) if '2' in stages: print_status(folders_samples, folder, sample, '2', opts.type) for db in get_db_names(stages, '3'): if '3:'+db in stages: print_status(folders_samples, folder, sample, '3:'+ db, opts.type) for db in get_db_names(stages, '4'): if '4:'+db in stages: print_status(folders_samples, folder, sample, '4:'+ db, opts.type) if '5' in stages: print_status(folders_samples, folder, sample, '5', opts.type) if '6' in stages: print_status(folders_samples, folder, sample, '6', opts.type) if '7' in stages: print_status(folders_samples, folder, sample, '7', opts.type) printf("\n") # print folders_samples # python ~/MetaPathways_Python.3.0/utilities/compute_status.py -f . -s 1 -s 2 -s 3:COG-14-2016-10-20 -s 3:kegg-uniprot-2016-10-20 -s 3:metacyc-2016-10-31 -s 3:refseq-2016-10-06-rel-78 -s 3:eggnog-v4-2016-10-30 -s 4:COG-14-2016-10-20 -s 4:kegg-uniprot-2016-10-20 -s 4:metacyc-2016-10-31 -s 4:refseq-2016-10-06-rel-78 -s 4:eggnog-v4-2016-10-30 -s 5 -s 6 -s 7 -t 3 -m 4 -n 400 # the main function of metapaths if __name__ == "__main__": createParser() main(sys.argv[1:])
mit
3,113,348,630,334,160,000
25.713869
384
0.539155
false
3.608558
false
false
false
YYKI/blog-webapp
www/apis.py
1
2900
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'syuson' ''' JSON API definition ''' import json, logging, inspect, functools class APIError(Exception): """ the base APIError which contains error(required), data(optional) and message(optional) """ def __init__(self, error, data='', message=''): super(APIError, self).__init__(message) self.error = error self.data = data self.message = message class APIValueError(APIError): """ Indicate the input value has error or invalid. The data specifies the error field of input form. """ def __init__(self, field, message=''): super(APIValueError, self).__init__('value:invalid', field, message) class APIResourceNotFoundError(APIError): """ Indicate the resource was not found. The data specifies the resource name. """ def __init__(self, field, message=''): super(APIResourceNotFoundError, self).__init__('value:notfound', field, message) class APIPermissionError(APIError): """ Indicate the api has no permission. """ def __init__(self, message=''): super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message) self.arg = message class Page(object): """docstring for Page""" def __init__(self, item_count, page_index=1, page_size=3): ''' Init Pagination by item_count, page_index and page_size. >>> p1 = Page(100, 1) >>> p1.page_count 10 >>> p1.offset 0 >>> p1.limit 10 >>> p2 = Page(90, 9, 10) >>> p2.page_count 9 >>> p2.offset 80 >>> p2.limit 10 >>> p3 = Page(91, 10, 10) >>> p3.page_count 10 >>> p3.offset 90 >>> p3.limit 10 ''' self.item_count = item_count self.page_size = page_size self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0) if (item_count == 0) or (page_index > self.page_count): self.offset = 0 self.limit = 0 self.page_index = 1 else: self.page_index = page_index self.offset = self.page_size * (page_index - 1) self.limit = self.page_size self.has_next = self.page_index < self.page_count self.has_previous = self.page_index > 1 print('item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % ( self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)) def __str__(self): return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % ( self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit) __repr__ = __str__
gpl-3.0
-8,884,925,662,730,124,000
27.712871
105
0.561034
false
3.575832
false
false
false
faylau/oVirt3.3WebAPITest
src/TestData/Host/ITC03010304_CreateHost_NameVerify.py
1
2575
#encoding:utf-8 __authors__ = ['"Liu Fei" <[email protected]>'] __version__ = "V0.1" ''' # ChangeLog: #--------------------------------------------------------------------------------- # Version Date Desc Author #--------------------------------------------------------------------------------- # V0.1 2014/10/17 初始版本 Liu Fei #--------------------------------------------------------------------------------- ''' '''----------------------------------------------------------------------------------------- @note: Pre-TestData -----------------------------------------------------------------------------------------''' '''----------------------------------------------------------------------------------------- @note: Test-Data -----------------------------------------------------------------------------------------''' # 主机名称:(1)包含特殊字符;(2)超过255个字符. host_name_list = ['node-ITC03010304-~!@#$%^', 'node-ITC03010304-abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz01234' ] xml_host_info = ''' <data_driver> <host> <name>%s</name> <address>10.1.167.4</address> <root_password>qwer1234</root_password> </host> <host> <name>%s</name> <address>10.1.167.4</address> <root_password>qwer1234</root_password> </host> </data_driver> ''' % (host_name_list[0], host_name_list[1]) '''----------------------------------------------------------------------------------------- @note: Post-TestData -----------------------------------------------------------------------------------------''' xml_del_option = ''' <action> <force>true</force> <async>false</async> </action> ''' '''----------------------------------------------------------------------------------------- @note: ExpectedResult -----------------------------------------------------------------------------------------''' expected_status_code = 400 expected_info_list = [ ''' <fault> <reason>Operation Failed</reason> <detail>[Host name must be formed of alphanumeric characters, numbers or "-_."]</detail> </fault> ''', ''' <fault> <reason>Operation Failed</reason> <detail>[size must be between 1 and 255]</detail> </fault> ''' ]
apache-2.0
5,178,600,399,952,293,000
35.608696
276
0.366337
false
4.809524
false
true
false
rabrahm/ceres
utils/FastRotators/spfr.py
1
18831
from pylab import * import pyfits from PyAstronomy import pyasl import scipy from scipy import interpolate from scipy import ndimage from scipy import signal import pickle from matplotlib.backends.backend_pdf import PdfPages import os #from pyevolve import G1DList #from pyevolve import GSimpleGA from multiprocessing import Pool import time def download_models(webpage='http://svo2.cab.inta-csic.es/theory/models/coelho/high/data/',dest='../../data/'): os.system('mkdir '+dest+'/COELHO2014') cwd = os.getcwd() os.chdir(dest+'/COELHO2014') tf = np.arange(6000,10001,250) gf = np.arange(2.5,4.6,0.5) #gf = np.array([2.5]) zf = np.array([-1.,-0.5,0.0,0.2]) for t in tf: for g in gf: for z in zf: modname = get_modname(t,g,z) if z<0: sz = 'm' else: sz = 'p' sz = sz+str(float(np.absolute(z))).replace('.','')+'p00/' os.system('wget ' + webpage+sz+modname+'.fits') os.system('wget ' + webpage+sz+modname+'plc.fits') os.chdir(cwd) return True def n_Edlen(l): sigma = 1e4 / l sigma2 = sigma*sigma n = 1 + 1e-8 * (8342.13 + 2406030 / (130-sigma2) + 15997/(38.9-sigma2)) return n def n_Morton(l): sigma = 1e4 / l sigma2 = sigma*sigma n = 1 + 6.4328e-5 + 2.94981e-2 / (146.-sigma2) + 2.5540e-4/(41.-sigma2) return n def ToAir(l): return (l / n_Edlen(l)) def ToVacuum(l): cond = 1 l_prev = l.copy() while(cond): l_new = n_Edlen(l_prev) * l if (max(np.absolute(l_new - l_prev)) < 1e-10): cond = 0 l_prev = l_new return l_prev def get_modname(t,g,z): st = str(int(t)) if t<10000: st = '0'+st sg = '+'+str(np.around(g,1)) if z < 0: sz = 'm' else: sz = 'p' z=float(z) sz = sz + str(np.around(np.absolute(z),1)) sz = sz.replace('.','') return 't'+st+'_g'+sg+'_'+sz+'p00_hr' def get_model(t,g,z,model_path='../../data/COELHO2014/'): modname = model_path + get_modname(t,g,z) try: out = pyfits.getdata(modname+'.fits') except: out = pyfits.getdata(modname+'plc.fits') return out def get_near(x,vec): if x == vec[0]: mmin = vec[0] mmax = vec[1] elif x == vec[-1]: mmin = vec[-2] mmax = vec[-1] else: tvec = vec - x In = np.where(tvec < 0)[0] mmin = tvec[In].max() + x Ix = np.where(tvec >= 0)[0] mmax = tvec[Ix].min() + x return mmin,mmax def trilinear_interpolation(t,g,z,model_path='../../data/COELHO2014/'): teffs = np.arange(6000,10001,250) loggs = np.arange(2.5,4.6,0.5) fehs = np.array([-1.,-0.5,0.0,0.2]) x0,x1 = get_near(t,teffs) y0,y1 = get_near(g,loggs) z0,z1 = get_near(z,fehs) xd = (t-x0)/(x1-x0) yd = (g-y0)/(y1-y0) zd = (z-z0)/(z1-z0) try: hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'.fits') except: hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'plc.fits') c000 = get_model(x0,y0,z0,model_path) c001 = get_model(x0,y0,z1,model_path) c010 = get_model(x0,y1,z0,model_path) c100 = get_model(x1,y0,z0,model_path) c110 = get_model(x1,y1,z0,model_path) c101 = get_model(x1,y0,z1,model_path) c011 = get_model(x0,y1,z1,model_path) c111 = get_model(x1,y1,z1,model_path) wav = np.arange(len(c111))*hd['CDELT1'] + hd['CRVAL1'] c00 = c000*(1-xd) + c100*xd c01 = c001*(1-xd) + c101*xd c10 = c010*(1-xd) + c110*xd c11 = c011*(1-xd) + c111*xd c0 = c00*(1-yd) + c10*yd c1 = c01*(1-yd) + c11*yd c = c0*(1-zd) + c1*zd return wav,c def normalize_model(w,f): ow = w.copy() of = f.copy() #plot(w,f) while True: #medflts = scipy.signal.medfilt(f,1001) coef = np.polyfit(w,f,6) fited = np.polyval(coef,w) res = f - fited I = np.where(res > -np.sqrt(np.var(res)))[0] w,f = w[I],f[I] if len(w) < 0.3* len(ow): break #plot(ow,np.polyval(coef,ow)) #show() return coef def spec_ccf(sw,sf,mw,mf,vi,vf,dv): mf = mf -1 mf = -mf #plot(mw,mf) tck = interpolate.splrep(mw,mf,k=1) v = vi retccf = [] vels = [] while v<=vf: swt = sw * (1 + v/299792.458) mft = interpolate.splev(swt,tck) #if v == 0: # plot(swt,mft) # plot(swt,sft) # show() mft -= np.mean(mft) sft = sf - np.mean(sf) #sft = sf.copy() #print np.sum(mft**2),np.sum(sft**2) retccf.append(np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2))) vels.append(v) v+=dv return np.array(vels),np.array(retccf) def ccf_fft(swt,sft,mwt,mft): mf = mft -1 mf = -mf #plot(mw,mf) tck = interpolate.splrep(np.log(mwt),mf,k=1) sw = np.log(swt) tck2 = interpolate.splrep(sw,sft,k=1) nsw = np.linspace(sw[0], sw[-1], 5000) sf = interpolate.splev(nsw,tck2) mf = interpolate.splev(nsw,tck) sf -= np.mean(sf) mf -= np.mean(mf) plot(nsw,sf) plot(nsw,mf) show() retccf = np.fft.ifft(np.conj(np.fft.fft(sf))*np.fft.fft(mf)) retccf = np.hstack((retccf[2500:],retccf[:2500])) retvels = np.arange(len(retccf)) - 0.5*len(retccf) retvels *= (nsw[1]-nsw[0]) retvels = 299792.458*(np.exp(retvels)-1.) return retvels, retccf def ccf_simple(sw,sf,mw,mf,rv): mf = mf -1 mf = -mf #plot(mw,mf) tck = interpolate.splrep(mw,mf,k=1) swt = sw * (1 + rv/299792.458) mft = interpolate.splev(swt,tck) mft -= np.mean(mft) sft = sf - np.mean(sf) return np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2)) def clean_strong_lines(mw,sc,mode=1): if mode==1: #"""" I = np.where((mw>6520)&(mw<6600))[0] sc[I] = 1. I = np.where((mw>5888)&(mw<5897))[0] sc[I] = 1. I = np.where((mw>4310)&(mw<4360))[0] sc[I] = 1. I = np.where((mw>4840)&(mw<4880))[0] sc[I] = 1. I = np.where((mw>4070)&(mw<4130))[0] sc[I] = 1. I = np.where((mw>3875)&(mw<3900))[0] sc[I] = 1. I = np.where((mw>3920)&(mw<3945))[0] sc[I] = 1. I = np.where((mw>3955)&(mw<3980))[0] sc[I] = 1. I = np.where(mw<3850)[0] sc[I] = 1. #""" if mode==2: #"""" I = np.where((mw>6550)&(mw<6570))[0] sc[I] = 1. I = np.where((mw>5888)&(mw<5897))[0] sc[I] = 1. I = np.where((mw>4320)&(mw<4350))[0] sc[I] = 1. I = np.where((mw>4850)&(mw<4870))[0] sc[I] = 1. I = np.where((mw>4090)&(mw<4110))[0] sc[I] = 1. I = np.where((mw>3875)&(mw<3900))[0] sc[I] = 1. I = np.where((mw>3920)&(mw<3945))[0] sc[I] = 1. I = np.where((mw>3955)&(mw<3980))[0] sc[I] = 1. I = np.where(mw<3850)[0] sc[I] = 1. #""" return sc def RVforFR(wavs,flxs,teff=6700,logg=4.0,feh=-1.0,vsini=100.,model_path='../../data/COELHO2014/',vmin=-1000.,vmax=1000.,vstep=10.): def fitfunc(p,x): ret = p[3] + p[0] * np.exp(-.5*((x-p[1])/p[2])**2) return ret errfunc = lambda p,x,y: np.ravel( (fitfunc(p,x)-y) ) #sc = get_model(teff,logg,feh) #hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits') #wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1'] teff = float(teff) try: sc = get_model(teff,logg,feh) hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits') mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1'] except: mw,sc = trilinear_interpolation(teff,logg,feh,model_path) for order in range(len(flxs)): flxs[order] = clean_strong_lines(wavs[order],flxs[order]) sc = clean_strong_lines(mw,sc) II = np.where(sc != 1)[0] JJ = np.where(sc == 1)[0] coef = normalize_model(mw[II],sc[II]) sc /= np.polyval(coef,mw) sc[JJ] = 1. mw = ToVacuum(mw) weis1 = [] ccftot = [] for i in range(wavs.shape[0]): #plot(wavs[i],flxs[i]) scf = flxs[i] scw = wavs[i] J = np.where(scf!=0)[0] scw,scf = scw[J],scf[J] I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100)) tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini) #plot(mw[I],tmf) J = np.where(scf!=1)[0] if len(J)>100: ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep) #plot(ccv,ccf) #show() #ccf = np.array(ccf) wei1 = len(np.where(scf!=1)[0])**2 weis1.append(wei1) if len(ccftot)==0: ccftot = ccf.copy()*wei1 else: ccftot = np.vstack((ccftot,ccf.copy()*wei1)) #show() weis1 = np.array(weis1) ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1) p0 = [ccftot.min(),ccv[np.argmin(ccftot)],vsini,ccftot[0]] p1, success = scipy.optimize.leastsq(errfunc,p0, args=(ccv,ccftot)) return p1,ccv,ccftot,fitfunc(p1,ccv) def calc_bss2(vels,xc,coef, bot_i=0.15, bot_f=0.4, top_i=0.6, top_f=0.9, dt=0.01): try: I1 = np.where((vels>coef[1]-3*coef[2]) & (vels<coef[1]) )[0] I2 = np.where((vels<coef[1]+3*coef[2]) & (vels>coef[1]) )[0] I3 = np.where(vels<coef[1]-4*coef[2])[0] I4 = np.where(vels>coef[1]+4*coef[2])[0] I = np.hstack((I3,I4)) base = np.median(xc[I]) xc = base - xc xc /= xc.max() v1,x1 = vels[I1],xc[I1] v2,x2 = vels[I2],xc[I2] #plot(v1,x1) #plot(v2,x2) #show() dp = top_f vect = [] while dp >= top_i: lb = np.where(x1>dp)[0][0] m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1]) n = v1[lb] - m*x1[lb] bs1 = m*dp+n lb = np.where(x2>dp)[0][-1] m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1]) n = v2[lb] - m*x2[lb] bs2 = m*dp+n vect.append(0.5*(bs2+bs1)) dp-=dt vect = np.array(vect) dp = bot_f vecb = [] while dp >= bot_i: lb = np.where(x1>dp)[0][0] m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1]) n = v1[lb] - m*x1[lb] bs1 = m*dp+n lb = np.where(x2>dp)[0][-1] m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1]) n = v2[lb] - m*x2[lb] bs2 = m*dp+n vecb.append(0.5*(bs2+bs1)) dp-=dt vecb = np.array(vecb) return np.median(vecb) - np.median(vect) except: return -999.0 """ def lnlike(theta, W, F, Ferr): mw,sc = trilinear_interpolation(int(theta[0]),theta[1],theta[2]) sct = clean_strong_lines(mw,sc.copy()) #plot(mw,sc) #show() coef = normalize_model(mw,sct) sc /= np.polyval(coef,mw) #print gfd mw = ToVacuum(mw) mw *= 1 + theta[3]/299792.458 totD,totM,totE = np.array([]),np.array([]),np.array([]) for i in range(W.shape[0]): scf = F[i] scw = W[i] scfe = Ferr[i] J = np.where(scf!=0)[0] scw,scf,scfe = scw[J],scf[J],scfe[J] I = np.where((mw>scw[0]-10) & (mw<scw[-1]+10)) tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, theta[4]) tck = interpolate.splrep(mw[I],tmf,k=1) tmf = interpolate.splev(scw,tck) tmf = clean_strong_lines(scw,tmf.copy()) I = np.where(tmf!=1)[0] #plot(scw,tmf) #plot(scw[I],tmf[I]) #plot(scw[I],scf[I]) #show() #print gfd tmf = tmf[I] scf = scf[I] scfe = scfe[I] tmf /= np.sum(tmf) tsf = scf/np.sum(scf) tse = scfe*(np.sum(scf)**2) totD = np.hstack((totD,tsf)) totM = np.hstack((totM,tmf)) totE = np.hstack((totE,tse)) #plot(scw[I],tsf) #plot(scw[I],tmf) #plot(scw[I],tsf + 1./np.sqrt(tse)) #show() #print fds #print theta #show() #print gvfd #ret = -np.log(2*np.pi) + np.log(np.sum(np.exp(-0.5*((y-model)/yerr)**2)/yerr)) #ret = -0.5*(np.sum(inv_sigma2*(F-model)**2 - np.log(inv_sigma2))) ret = -0.5*(np.sum(totE*(totD-totM)**2 - np.log(totE))) #for i in range(len(F)): # errorbar(Y,F[i],yerr=Ferr[i],fmt='b') #for j in model: # plot(Y,j,'r') #show() #print theta, ret if np.isnan(ret): return -np.inf else: return ret def lnprior(theta): if 6000 < theta[0] < 9000 and 3.0 < theta[1] < 4.5 and -1 < theta[2] < 0.2 and -500 < theta[3] < 500 and 1. < theta[4] < 500.: return 0.0 return -np.inf def lnprob(theta, W,F,Ferr): lp = lnprior(theta) if not np.isfinite(lp): return -np.inf return lp + lnlike(theta,W,F,Ferr) """ def multiccf(pars): teff,logg,feh,vsini=pars[0],pars[1],pars[2],pars[3] vmin=-500 vmax=500. vstep=20. sc = get_model(teff,logg,feh) hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits') wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1'] try: sc = get_model(teff,logg,feh) hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits') mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1'] except: mw,sc = trilinear_interpolation(teff,logg,feh,model_path) sc = clean_strong_lines(mw,sc) II = np.where(sc != 1)[0] JJ = np.where(sc == 1)[0] coef = normalize_model(mw[II],sc[II]) sc /= np.polyval(coef,mw) sc[JJ] = 1. mw = ToVacuum(mw) weis1 = [] ccftot = [] for i in range(wavs.shape[0]): scf = flxs[i].copy() scw = wavs[i].copy() J = np.where(scf!=0)[0] scw,scf = scw[J],scf[J] I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100)) tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini) #plot(mw[I],tmf) J = np.where(scf!=1)[0] if len(J)>100: ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep) #ccv,ccf = ccf_fft(scw,scf,mw[I],tmf) #plot(ccv,ccf) #show() wei1 = len(np.where(scf!=1)[0])**2 weis1.append(wei1) if len(ccftot)==0: ccftot = ccf.copy()*wei1 else: ccftot = np.vstack((ccftot,ccf.copy()*wei1)) weis1 = np.array(weis1) ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1) #print gfds #ccftot = np.mean(ccftot,axis=0) #print pars, ccftot.min() return ccftot.min() def get_pars_fr(wavst,flxst,model_patht='../../data/COELHO2014/',npools=4,fixG=1.0): for order in range(len(flxst)): flxst[order] = clean_strong_lines(wavst[order],flxst[order],mode=1) t0 = time.time() global wavs,flxs global model_path wavs,flxs=wavst.copy(),flxst.copy() model_path=model_patht gt = np.array([6000,7000,8000,9000,10000]) gg = np.array([2.5,3.0,3.5,4.0,4.5]) if fixG != -1: gg = np.array([fixG]) gz = np.array([-1,-0.5,0.0,0.2]) gr = np.array([10.,50.,100.,150.,200.,250.,300.]) #""" tr = np.tile(gr,len(gt)*len(gg)*len(gz)) tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz)) tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr)) tt = np.repeat(gt,len(gg)*len(gr)*len(gz)) tot = np.vstack((tt,tg,tz,tr)).T #for pars in tot: # pars = [8000,4.0,-0.5,40.0] # print pars, multiccf(pars) p = Pool(npools) vals = np.array((p.map(multiccf, list(tot)))) p.terminate() I = np.argmin(vals) best_vals = tot[I] bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3] #""" t1 = time.time() print bt,bg,bz,br, (t1-t0)/60.,'mins' #bt,bg,bz,br = 7000.,4.5, 0.2, 100.0 gt = np.arange(bt-1000,bt+1001,250) I = np.where((gt>=6000) & (gt<=10000))[0] gt = gt[I] gr = np.arange(br-60.,br+61.,20.) I = np.where(gr>=10)[0] gr = gr[I] tr = np.tile(gr,len(gt)*len(gg)*len(gz)) tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz)) tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr)) tt = np.repeat(gt,len(gg)*len(gr)*len(gz)) tot = np.vstack((tt,tg,tz,tr)).T p = Pool(npools) vals = np.array((p.map(multiccf, list(tot)))) p.terminate() I = np.argmin(vals) best_vals = tot[I] bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3] t2 = time.time() print bt,bg,bz,br, (t2-t1)/60.,'mins' #np.savetxt('temp_grid.txt',vals) if fixG==-1: grid = np.reshape(vals,(len(gt),len(gg),len(gz),len(gr))) tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1) tckg = interpolate.splrep(gg,np.arange(len(gg)),k=1) tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1) tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1) itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1) itckg = interpolate.splrep(np.arange(len(gg)),gg,k=1) itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1) itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1) st = np.arange(gt[0],gt[-1]+1,10.) sg = np.arange(gg[0],gg[-1]+0.01,0.1) sz = np.arange(gz[0],gz[-1]+0.01,0.1) sr = np.arange(gr[0],gr[-1]+1.,5.) st = interpolate.splev(st,tckt) sg = interpolate.splev(sg,tckg) sz = interpolate.splev(sz,tckz) sr = interpolate.splev(sr,tckr) tr2 = np.tile(sr,len(st)*len(sg)*len(sz)) tg2 = np.repeat(np.tile(sg,len(st)),len(sr)*len(sz)) tz2 = np.repeat(np.tile(sz,len(st)*len(sg)),len(sr)) tt2 = np.repeat(st,len(sg)*len(sr)*len(sz)) tot2 = np.vstack((tt2,tg2,tz2,tr2)) zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest') I = np.argmin(zi) minval = tot2[:,I] mint = interpolate.splev(minval[0],itckt) ming = interpolate.splev(minval[1],itckg) minz = interpolate.splev(minval[2],itckz) minr = interpolate.splev(minval[3],itckr) else: grid = np.reshape(vals,(len(gt),len(gz),len(gr))) tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1) tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1) tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1) itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1) itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1) itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1) st = np.arange(gt[0],gt[-1]+1,10.) sz = np.arange(gz[0],gz[-1]+0.01,0.1) sr = np.arange(gr[0],gr[-1]+1.,5.) st = interpolate.splev(st,tckt) sz = interpolate.splev(sz,tckz) sr = interpolate.splev(sr,tckr) tr2 = np.tile(sr,len(st)*len(sz)) tz2 = np.repeat(np.tile(sz,len(st)),len(sr)) tt2 = np.repeat(st,len(sr)*len(sz)) tot2 = np.vstack((tt2,tz2,tr2)) zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest') I = np.argmin(zi) minval = tot2[:,I] mint = interpolate.splev(minval[0],itckt) ming = fixG minz = interpolate.splev(minval[1],itckz) minr = interpolate.splev(minval[2],itckr) #d = {'grid':grid, 'zi':zi, 'tot2':tot2, 'gt':gt, 'gg':gg, 'gz':gz, 'gr':gr} #pickle.dump(d,open('temp_dict.pkl')) return float(mint),float(ming),float(minz),float(minr) def plot_CCF_FR(xc_dict,path='XC.pdf'): vels = xc_dict['vels'] xc_av = xc_dict['xc_av'] XCmodelgau = xc_dict['XCmodelgau'] #refvel = xc_dict['refvel'] p1gau = xc_dict['p1gau'] f1 = figure() pp = PdfPages(path) ax1 = f1.add_subplot(111) ax1.plot(vels, xc_av,'b.', label='CCF') ax1.plot(vels, XCmodelgau,'r-',label='Gaussian fit') xlabel('Velocity (km/s)') ylabel('XC') ax1.axvline(p1gau[1],linestyle=':',color='r') ax1.axhline(0.0,linestyle='-') title('Average Cross-Correlation Function + Fit') handles, labels = ax1.get_legend_handles_labels() ax1.legend(handles[::-1], labels[::-1],prop={'size':6}) pp.savefig() pp.close() clf() pass """ def trans_chromosome(chromosome): teff = chromosome[0]*100.+chromosome[1]*10.+chromosome[2] m = (10000.- 6000.)/999. n = 6000. teff = teff*m + n logg = chromosome[3] + chromosome[4]*0.1 m = (4.5 - 3.0)/9.9 n = 3. logg = logg*m + n feh = chromosome[5] + chromosome[6]*0.1 m = (0.2 - -1.)/9.9 n = -1. feh = feh*m + n vsini = chromosome[7]*10. + chromosome[8] m = (300. - 10.)/99. n = 10. vsini = vsini*m + n return teff, logg, feh, vsini global wavs, flxs def find_pars_GA(wavs,flxs,model_path='../../data/COELHO2014/'): def eval_func(chromosome): print list(chromosome) teff, logg, feh, vsini = trans_chromosome(chromosome) print teff, logg, feh, vsini pt,vels,ccf,mod = RVforFR(wavs,flxs,teff=teff,logg=logg,feh=feh,vsini=vsini,model_path=model_path) score = -ccf.min() return score genome = G1DList.G1DList(9) genome.evaluator.set(eval_func) ga = GSimpleGA.GSimpleGA(genome, interactiveMode=True) ga.setGenerations(40) ga.setMutationRate(0.2) ga.setPopulationSize(20) #ga.setCrossoverRate(1.0) genome.setParams(rangemin=0, rangemax=9) #ga.setMultiProcessing(True) ga.evolve(freq_stats=10) print ga.bestIndividual() print trans_chromosome(ga.bestIndividual()) """
mit
-4,745,925,225,003,760,000
24.208835
131
0.602676
false
2.022012
false
false
false
nexcess/r1soft-admin-console
rac/forms.py
1
3719
# -*- coding: utf-8 -*- # Nexcess.net r1soft-admin-console # Copyright (C) 2015 Nexcess.net L.L.C. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from rac.models import R1softHost, PolicyTemplate from wtforms_alchemy import model_form_factory from flask.ext.wtf import Form from wtforms import IntegerField, BooleanField, StringField, RadioField, SelectMultipleField from wtforms.validators import NumberRange, InputRequired import wtforms.widgets ModelForm = model_form_factory(Form) class MultiCheckboxField(SelectMultipleField): widget = wtforms.widgets.ListWidget(prefix_label=False) option_widget = wtforms.widgets.CheckboxInput() class R1softHostForm(ModelForm): class Meta: model = R1softHost class PolicyTemplateForm(ModelForm): class Meta: model = PolicyTemplate class HostConfigurationForm(Form): hard_quota = IntegerField('Manager Disk Space (Hard Quota)', [NumberRange(min=1, max=99)]) soft_quota = IntegerField('Manager Disk Space (Soft Quota)', [NumberRange(min=1, max=99)]) task_history_limit = IntegerField('Days to retain Task History', [NumberRange(min=1, max=365)]) http_enabled = BooleanField('Enabled') http_port = IntegerField('Port', [NumberRange(min=1, max=65535)]) http_max_conn = IntegerField('Max Connections', [NumberRange(min=1, max=9999)]) https_enabled = BooleanField('Enabled') https_port = IntegerField('Port', [NumberRange(min=1, max=65535)]) https_keystore = StringField('Keystore Path') https_max_conn = IntegerField('Max Connections', [NumberRange(min=1, max=9999)]) class RestoreForm(Form): base_path = StringField('Base Path') file_names = SelectMultipleField('Files to Restore', choices=[]) restore_target = RadioField('Restore Target', default='original_host', choices=[ ('original_host', 'Original Host'), ('alt_host', 'Alternate Host')]) alt_restore_location = StringField('Alternate Location') alt_restore_host = StringField('Alternate Host', []) alt_restore_port = IntegerField('Alternate Host Port', [NumberRange(min=1, max=65535)]) overwrite_existing = BooleanField('Overwrite Existing Files', [InputRequired()]) use_compression = BooleanField('Use Compression', [InputRequired()]) estimate_size = BooleanField('Estimate Restore Size', [InputRequired()])
gpl-2.0
8,345,511,016,906,627,000
42.752941
92
0.59317
false
4.666248
false
false
false
teampheenix/StarCraft-Casting-Tool
scctool/matchgrabber/alpha.py
1
6950
"""Provide match grabber for AlphaTL.""" import logging from datetime import datetime, timedelta, timezone from urllib.request import urlopen, urlretrieve import scctool.settings import scctool.settings.translation from scctool.matchgrabber.custom import MatchGrabber as MatchGrabberParent # create logger module_logger = logging.getLogger(__name__) _ = scctool.settings.translation.gettext class MatchGrabber(MatchGrabberParent): """Grabs match data from Alpha SC2 Teamleague.""" _provider = "AlphaSC2" def __init__(self, *args): """Init match grabber.""" super().__init__(*args) self._urlprefix = "https://alpha.tl/match/" self._apiprefix = "https://alpha.tl/api?match=" def updateCountdown(self, datetime_str): if not datetime_str or not scctool.settings.config.parser.getboolean( "Countdown", "matchgrabber_update"): return dt_obj = datetime.strptime( datetime_str, '%Y-%m-%d %H:%M:%S') dt_obj = dt_obj.replace(tzinfo=timezone(timedelta(hours=0))) dt_obj = dt_obj.astimezone() self._controller.view.countdownTab.setFromTimestamp(dt_obj.timestamp()) def grabData(self, metaChange=False, logoManager=None): """Grab match data.""" data = self._getJson() if(data['code'] != 200): msg = 'API-Error: ' + data['error'] raise ValueError(msg) else: self._rawData = data overwrite = (metaChange or self._matchData.getURL().strip() != self.getURL().strip()) with self._matchData.emitLock(overwrite, self._matchData.metaChanged): self._matchData.setNoSets(5, 1, resetPlayers=overwrite) self._matchData.setMinSets(3) self._matchData.setSolo(False) self._matchData.setNoVetoes(0) self._matchData.resetLabels() if overwrite: self._matchData.resetSwap() self.updateCountdown(data.get('datetime', '')) league = data['tournament'] if not isinstance(league, str): league = "TBD" league = league.replace('Non-pro', 'Non-Pro') league = league.replace('Semi-pro', 'Semi-Pro') self._matchData.setLeague( self._matchData.setLeague(self._aliasLeague(league))) for idx, mapname in enumerate(data['maps']): if not isinstance(mapname, str): mapname = "TBD" self._matchData.setMap(idx, mapname) for team_idx in range(2): for set_idx, player in enumerate( data[f'lineup{team_idx + 1}']): try: playername = self._aliasPlayer(player['nickname']) if not isinstance(playername, str): playername = "TBD" self._matchData.setPlayer( self._matchData.getSwappedIdx(team_idx), set_idx, playername, str(player['race'])) except Exception: self._matchData.setPlayer( self._matchData.getSwappedIdx(team_idx), set_idx, 'TBD', 'Random') team = data[f'team{team_idx + 1}'] name, tag = team['name'], team['tag'] if not isinstance(name, str): name = "TBD" if not isinstance(tag, str): tag = "" self._matchData.setTeam( self._matchData.getSwappedIdx(team_idx), self._aliasTeam(name), tag) for set_idx in range(5): try: score = int(data['games'][set_idx]) * 2 - 3 except Exception: score = 0 self._matchData.setMapScore( set_idx, score, overwrite, True) self._matchData.setAllKill(False) self._matchData.autoSetMyTeam( swap=scctool.settings.config.parser.getboolean( "SCT", "swap_myteam")) if logoManager is not None: self.downloadLogos(logoManager) def downloadLogos(self, logoManager): """Download team logos.""" if self._rawData is None: raise ValueError( "Error: No raw data.") for idx in range(2): try: logo_idx = self._matchData.getSwappedIdx(idx) + 1 oldLogo = logoManager.getTeam(logo_idx) logo = logoManager.newLogo() url = self._rawData[f'team{idx + 1}']['logo'] if url: new_logo = logo.fromURL( self._rawData[f'team{idx + 1}']['logo'], localFile=oldLogo.getAbsFile()) if new_logo: logoManager.setTeamLogo(logo_idx, logo) else: module_logger.info("Logo download is not needed.") except Exception: module_logger.exception("message") def downloadBanner(self): """Download team logos.""" data_dir = scctool.settings.casting_data_dir transparent = scctool.settings.config.parser.getboolean( "SCT", "transparent_match_banner") if self._rawData is None: raise ValueError( "Error: No raw data.") fname = data_dir + "/matchbanner.png" url = "https://alpha.tl/announcement/"\ + str(self.getID()) if transparent: url = url + "?transparent" else: url = url + "?vs" localFile = scctool.settings.getAbsPath(fname) needs_download = True size = 1024 * 400 try: with open(localFile, "rb") as in_file: local_byte = in_file.read(size) file = urlopen(url) data = file.read(size) if(data == local_byte): needs_download = False except FileNotFoundError: module_logger.warning("Match banner not found.") except Exception: module_logger.exception("message") if needs_download: try: urlretrieve(url, scctool.settings.getAbsPath(fname)) except Exception: module_logger.exception("message") else: module_logger.info('No need to redownload match banner')
gpl-3.0
1,358,352,931,005,396,000
37.611111
79
0.501439
false
4.446577
false
false
false
five-elephants/a_game
pygame_hello.py
1
2613
#!/usr/bin/env python import pygame from pygame.locals import * import random class Wandering_text: def __init__(self, text, color, center, bbox): self.font = pygame.font.Font(None, 36) self.surface = self.font.render(text, 1, color) self.pos = self.surface.get_rect(center=center) self.bbox = bbox def draw(self, screen): screen.blit(self.surface, self.pos) def update(self): self.pos.left += (random.random() - 0.5) * 10.0 self.pos.top += (random.random() - 0.5) * 10.0 if self.pos.left < self.bbox.left: self.pos.left = self.bbox.left + (self.bbox.left - self.pos.left) elif self.pos.right > self.bbox.right: self.pos.right = self.bbox.right - (self.bbox.right - self.pos.right) elif self.pos.top < self.bbox.top: self.pos.top = self.bbox.top + (self.bbox.top - self.pos.top) elif self.pos.bottom > self.bbox.bottom: self.pos.bottom = self.bbox.bottom - (self.bbox.bottom - self.bbox.top) def show_fps(screen, fps): font = pygame.font.Font(None, 18) surf = font.render("fps: %.1f" % (fps), 1, (180, 10, 10)) frame = surf.get_rect() frame.right = screen.get_width() screen.blit(surf, frame) def main(): if not pygame.font: print "Error: no fonts" return if not pygame.mixer: print "Error: no sound" return random.seed() pygame.init() screen = pygame.display.set_mode((640, 480)) pygame.display.set_caption('Hello World') background = pygame.Surface(screen.get_size()) background = background.convert() background.fill((250, 250, 250)) screen.blit(background, (0,0)) #if pygame.font: #font = pygame.font.Font(None, 36) #text = font.render("Hello World", 1, (10, 10, 10)) #textpos = text.get_rect(centerx=background.get_width()/2) #background.blit(text, textpos) hello = Wandering_text("Hello World", (10, 10, 10), center=(screen.get_width()/2, screen.get_height()/2), bbox=screen.get_rect()) pygame.display.flip() clock = pygame.time.Clock() while True: clock.tick(60) for event in pygame.event.get(): if event.type == QUIT: return elif event.type == KEYDOWN and event.key == K_ESCAPE: return screen.blit(background, (0,0)) hello.update() hello.draw(screen) show_fps(screen, clock.get_fps()) pygame.display.flip() if __name__ == '__main__': main()
gpl-3.0
-6,722,202,947,777,309,000
28.693182
83
0.580176
false
3.371613
false
false
false
CitrineInformatics/refkit
lookup/arxiv.py
1
4957
""" Functions for working with the arXiv.org api. """ import urllib import requests from xml.etree import ElementTree from refkit.util import arxivid from refkit.metadata import Metadata from refkit.format import author def search(lookup): """ Search for a reference on arXiv.org given a lookup string. Since the arXiv.org api can return mutiple references for a single query, this function raises an error in the case that more than one reference was returned. :param lookup: String with the lookup to search for on arXiv.org :raises ValueError: If a reference with the specified lookup could not be found on arXiv.org :returns: Metadata object with information about the reference that was identified """ try: id = arxivid.extract(lookup) arxivData = _getMetadataFromArxivId(id) return _saveMetadata(arxivData) except Exception: raise ValueError('Could not match query to arXiv') def _getMetadataFromArxivId(id): """ Get metadata from arXiv.org given an arXiv identifier. :param id: arXiv identifier to look up :returns: Result from arXiv api query """ url = 'http://export.arxiv.org/api/query?id_list=' + id + '&start=0&max_results=2' return requests.get(url) def _saveMetadata(data): """ Convert the results of an arXiv api call to a Metadata object. :param data: Results of the arXiv api call :raises: ValueError if the metadata could not be saved :returns: Metadata object with the content of data """ try: root = ElementTree.fromstring(data.content) entry = _getEntry(root) return _saveMetadataFromEntry(entry) except Exception: raise def _getEntry(root): """ Get the node in the xml data that contains the result from the query to save. If multiple entries are found in the query result, this function raises an error. :param root: Root of the XML data from the arXiv query :raises: ValueError is the entry cannot be extracted from the XML data :returns: Node that contains the results from the query """ entry = None for i in root: if i.tag.endswith('entry'): if entry is not None: raise ValueError('Multiple entries in result') entry = i return entry def _saveMetadataFromEntry(entry): """ Save the metadata from an entry returned by an arXiv query. :param entry: Entry from which to save metadata :returns: Metadata object with the results in the entry """ metadata = Metadata() metadata.publisher = 'arXiv.org' _saveValue(metadata, 'title', entry, 'title') _saveValue(metadata, 'url', entry, 'id') _saveValue(metadata, 'doi', entry, 'doi') _saveYear(metadata, entry) _saveAuthors(metadata, entry) metadata.tidy() if len(metadata.author) == 0 and len(metadata.title) == 0: raise RuntimeError() return metadata def _saveValue(metadata, attribute, entry, tag): """ Extract a value from an XML object and save it in a Metadata object. :param metadata: Metadata object to save the value in :param attribute: Name of the attribute to save the value as in metadata :param entry: XML entry with the value to save :param tag: Tag of the value in entry to save """ for i in entry: if i.tag.endswith(tag): try: setattr(metadata, attribute, i.text) except Exception,e: pass break def _saveYear(metadata, entry): """ Extract the year in which the article was last updated. arXiv api query results include both the published and updated dates. This function saves the updated year. :param metadata: Metadata object to save the year in :param entry: XML entry with the value to save """ for i in entry: if i.tag.endswith('updated'): try: setattr(metadata, 'year', i.text.split('-')[0]) except Exception: pass break def _saveAuthors(metadata, entry): """ Extract the authors from an XML object and convert them to given and family names. :param metadata: Metadata object to save the authors in :param entry: XML entry with the authors to save """ for i in entry: if i.tag.endswith('author'): try: metadata.author.append(_getName(i)) except Exception: pass def _getName(entry): """ Extract the name for an XML object. :param entry: XML entry with the name to save :raises: ValueError if a name cannot be found :returns: Dictionary with the given and family name in the entry """ for i in entry: if i.tag.endswith('name'): try: return author.splitName(i.text) except Exception: raise
mit
-8,469,765,585,758,530,000
32.268456
116
0.643938
false
4.240376
false
false
false
xiaoweih/DLV
networks/imageNet.py
1
1666
import os, struct from array import array as pyarray from cvxopt.base import matrix import numpy as np import PIL.Image # FIXME: need actual class names def LABELS(index): ls = labels() if len(ls) > 0: return ls[index] else: return range(1000)[index] def labels(): file = open('networks/imageNet/caffe_ilsvrc12/synset_words.txt', 'r') data = file.readlines() ls = [] for line in data: words = line.split() ls.append(' '.join(words[1:])) return ls def save(layer,image,filename): """ """ import cv2 import copy image_cv = copy.deepcopy(image) image_cv = image_cv.transpose(1, 2, 0) image_cv[:,:,0] += 103.939 image_cv[:,:,1] += 116.779 image_cv[:,:,2] += 123.68 #print(np.amax(image_cv),np.amin(image_cv)) cv2.imwrite(filename, image_cv) # from matplotlib import pyplot # import matplotlib as mpl # fig = pyplot.figure() # ax = fig.add_subplot(1,1,1) # # image = image.reshape(3,32,32).transpose(1,2,0) # imgplot = ax.imshow(image.T, cmap=mpl.cm.Greys) # imgplot.set_interpolation('nearest') # ax.xaxis.set_ticks_position('top') # ax.yaxis.set_ticks_position('left') # pyplot.savefig(filename) def show(image): """ """ from matplotlib import pyplot import matplotlib as mpl fig = pyplot.figure() ax = fig.add_subplot(1,1,1) #image = image.reshape(3,32,32).transpose(1,2,0) imgplot = ax.imshow(image.T, cmap=mpl.cm.Greys) imgplot.set_interpolation('nearest') ax.xaxis.set_ticks_position('top') ax.yaxis.set_ticks_position('left') pyplot.show()
gpl-3.0
3,160,891,197,615,542,300
23.5
73
0.612845
false
3.131579
false
false
false
pidydx/grr
grr/lib/aff4.py
1
100047
#!/usr/bin/env python """AFF4 interface implementation. This contains an AFF4 data model implementation. """ import __builtin__ import abc import itertools import StringIO import threading import time import zlib import logging from grr.lib import access_control from grr.lib import config_lib from grr.lib import data_store from grr.lib import lexer from grr.lib import rdfvalue from grr.lib import registry from grr.lib import stats from grr.lib import type_info from grr.lib import utils from grr.lib.rdfvalues import aff4_rdfvalues from grr.lib.rdfvalues import crypto as rdf_crypto from grr.lib.rdfvalues import paths as rdf_paths from grr.lib.rdfvalues import protodict as rdf_protodict # Factor to convert from seconds to microseconds MICROSECONDS = 1000000 # Age specifications for opening AFF4 objects. NEWEST_TIME = "NEWEST_TIME" ALL_TIMES = "ALL_TIMES" # Just something to write on an index attribute to make it exist. EMPTY_DATA = "X" AFF4_PREFIXES = set(["aff4:", "metadata:"]) class Error(Exception): pass class LockError(Error): pass class OversizedRead(Error, IOError): pass class InstantiationError(Error, IOError): pass class ChunkNotFoundError(IOError): pass class BadGetAttributeError(Exception): pass class MissingChunksError(Exception): def __init__(self, message, missing_chunks=None): super(MissingChunksError, self).__init__(message) self.missing_chunks = missing_chunks or [] class DeletionPool(object): """Pool used to optimize deletion of large object hierarchies.""" def __init__(self, token=None): super(DeletionPool, self).__init__() if token is None: raise ValueError("token can't be None") self._objects_cache = {} self._children_lists_cache = {} self._urns_for_deletion = set() self._token = token def _ObjectKey(self, urn, mode): return u"%s:%s" % (mode, utils.SmartUnicode(urn)) def Open(self, urn, aff4_type=None, mode="r"): """Opens the named object. DeletionPool will only open the object if it's not in the pool already. Otherwise it will just return the cached version. Objects are cached based on their urn and mode. I.e. same object opened with mode="r" and mode="rw" will be actually opened two times and cached separately. DeletionPool's Open() also doesn't follow symlinks. Args: urn: The urn to open. aff4_type: If this parameter is set, we raise an IOError if the object is not an instance of this type. mode: The mode to open the file with. Returns: An AFF4Object instance. Raises: IOError: If the object is not of the required type. """ key = self._ObjectKey(urn, mode) try: obj = self._objects_cache[key] except KeyError: obj = FACTORY.Open( urn, mode=mode, follow_symlinks=False, token=self._token) self._objects_cache[key] = obj if aff4_type is not None and not isinstance(obj, aff4_type): raise InstantiationError( "Object %s is of type %s, but required_type is %s" % (urn, obj.__class__.__name__, aff4_type.__name__)) return obj def MultiOpen(self, urns, aff4_type=None, mode="r"): """Opens many urns efficiently, returning cached objects when possible.""" not_opened_urns = [] aff4_type = _ValidateAFF4Type(aff4_type) for urn in urns: key = self._ObjectKey(urn, mode) try: result = self._objects_cache[key] if aff4_type is not None and not isinstance(result, aff4_type): continue yield result except KeyError: not_opened_urns.append(urn) if not_opened_urns: for obj in FACTORY.MultiOpen( not_opened_urns, follow_symlinks=False, mode=mode, token=self._token): key = self._ObjectKey(obj.urn, mode) self._objects_cache[key] = obj if aff4_type is not None and not isinstance(obj, aff4_type): continue yield obj def ListChildren(self, urn): """Lists children of a given urn. Resulting list is cached.""" result = self.MultiListChildren([urn]) try: return result[urn] except KeyError: return [] def MultiListChildren(self, urns): """Lists children of a bunch of given urns. Results are cached.""" result = {} not_listed_urns = [] for urn in urns: try: result[urn] = self._children_lists_cache[urn] except KeyError: not_listed_urns.append(urn) if not_listed_urns: for urn, children in FACTORY.MultiListChildren( not_listed_urns, token=self._token): result[urn] = self._children_lists_cache[urn] = children for urn in not_listed_urns: self._children_lists_cache.setdefault(urn, []) result.setdefault(urn, []) return result def RecursiveMultiListChildren(self, urns): """Recursively lists given urns. Results are cached.""" result = {} checked_urns = set() not_cached_urns = [] urns_to_check = urns while True: found_children = [] for urn in urns_to_check: try: children = result[urn] = self._children_lists_cache[urn] found_children.extend(children) except KeyError: not_cached_urns.append(urn) checked_urns.update(urns_to_check) urns_to_check = set(found_children) - checked_urns if not urns_to_check: break for urn, children in FACTORY.RecursiveMultiListChildren( not_cached_urns, token=self._token): result[urn] = self._children_lists_cache[urn] = children return result def MarkForDeletion(self, urn): """Marks object and all of its children for deletion.""" self.MultiMarkForDeletion([urn]) def MultiMarkForDeletion(self, urns): """Marks multiple urns (and their children) for deletion.""" all_children_urns = self.RecursiveMultiListChildren(urns) urns += list(itertools.chain.from_iterable(all_children_urns.values())) self._urns_for_deletion.update(urns) for obj in self.MultiOpen(urns): obj.OnDelete(deletion_pool=self) @property def root_urns_for_deletion(self): """Roots of the graph of urns marked for deletion.""" roots = set() for urn in self._urns_for_deletion: new_root = True str_urn = utils.SmartUnicode(urn) fake_roots = [] for root in roots: str_root = utils.SmartUnicode(root) if str_urn.startswith(str_root): new_root = False break elif str_root.startswith(str_urn): fake_roots.append(root) if new_root: roots -= set(fake_roots) roots.add(urn) return roots @property def urns_for_deletion(self): """Urns marked for deletion.""" return self._urns_for_deletion def _ValidateAFF4Type(aff4_type): """Validates and normalizes aff4_type to class object.""" if aff4_type is None: return None # Check that we have the right type. if not isinstance(aff4_type, type): raise TypeError("aff4_type=%s must be a type" % aff4_type) if not issubclass(aff4_type, AFF4Object): raise TypeError("aff4_type=%s must be a subclass of AFF4Object." % aff4_type) return aff4_type class Factory(object): """A central factory for AFF4 objects.""" def __init__(self): self.intermediate_cache = utils.AgeBasedCache( max_size=config_lib.CONFIG["AFF4.intermediate_cache_max_size"], max_age=config_lib.CONFIG["AFF4.intermediate_cache_age"]) # Create a token for system level actions. This token is used by other # classes such as HashFileStore and NSRLFilestore to create entries under # aff4:/files, as well as to create top level paths like aff4:/foreman self.root_token = access_control.ACLToken( username="GRRSystem", reason="Maintenance").SetUID() self.notification_rules = [] self.notification_rules_timestamp = 0 @classmethod def ParseAgeSpecification(cls, age): """Parses an aff4 age and returns a datastore age specification.""" try: return (0, int(age)) except (ValueError, TypeError): pass if age == NEWEST_TIME: return data_store.DB.NEWEST_TIMESTAMP elif age == ALL_TIMES: return data_store.DB.ALL_TIMESTAMPS elif len(age) == 2: start, end = age return (int(start), int(end)) raise RuntimeError("Unknown age specification: %s" % age) def GetAttributes(self, urns, token=None, age=NEWEST_TIME): """Retrieves all the attributes for all the urns.""" urns = set([utils.SmartUnicode(u) for u in urns]) to_read = {urn: self._MakeCacheInvariant(urn, token, age) for urn in urns} # Urns not present in the cache we need to get from the database. if to_read: for subject, values in data_store.DB.MultiResolvePrefix( to_read, AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age), token=token, limit=None): # Ensure the values are sorted. values.sort(key=lambda x: x[-1], reverse=True) yield utils.SmartUnicode(subject), values def SetAttributes(self, urn, attributes, to_delete, add_child_index=True, mutation_pool=None, sync=False, token=None): """Sets the attributes in the data store.""" attributes[AFF4Object.SchemaCls.LAST] = [ rdfvalue.RDFDatetime.Now().SerializeToDataStore() ] to_delete.add(AFF4Object.SchemaCls.LAST) if mutation_pool: mutation_pool.MultiSet( urn, attributes, replace=False, to_delete=to_delete) else: data_store.DB.MultiSet( urn, attributes, token=token, replace=False, sync=sync, to_delete=to_delete) if add_child_index: self._UpdateChildIndex(urn, token, mutation_pool=mutation_pool) def _UpdateChildIndex(self, urn, token, mutation_pool=None): """Update the child indexes. This function maintains the index for direct child relations. When we set an AFF4 path, we always add an attribute like index:dir/%(childname)s to its parent. This is written asynchronously to its parent. In order to query for all direct children of an AFF4 object, we then simple get the attributes which match the regex index:dir/.+ which are the direct children. Args: urn: The AFF4 object for which we update the index. token: The token to use. mutation_pool: An optional MutationPool object to write to. If not given, the data_store is used directly. """ try: # Create navigation aids by touching intermediate subject names. while urn.Path() != "/": basename = urn.Basename() dirname = rdfvalue.RDFURN(urn.Dirname()) try: self.intermediate_cache.Get(urn) return except KeyError: attributes = { # This updates the directory index. "index:dir/%s" % utils.SmartStr(basename): [EMPTY_DATA], } # This is a performance optimization. On the root there is no point # setting the last access time since it gets accessed all the time. # TODO(user): Can we get rid of the index in the root node entirely? # It's too big to query anyways... if dirname != u"/": attributes[AFF4Object.SchemaCls.LAST] = [ rdfvalue.RDFDatetime.Now().SerializeToDataStore() ] if mutation_pool: mutation_pool.MultiSet(dirname, attributes, replace=True) else: data_store.DB.MultiSet( dirname, attributes, token=token, replace=True, sync=False) self.intermediate_cache.Put(urn, 1) urn = dirname except access_control.UnauthorizedAccess: pass def _DeleteChildFromIndex(self, urn, token, mutation_pool=None): if mutation_pool: pool = mutation_pool else: pool = data_store.DB.GetMutationPool(token=token) try: basename = urn.Basename() dirname = rdfvalue.RDFURN(urn.Dirname()) try: self.intermediate_cache.ExpireObject(urn.Path()) except KeyError: pass pool.DeleteAttributes(dirname, ["index:dir/%s" % utils.SmartStr(basename)]) to_set = { AFF4Object.SchemaCls.LAST: [rdfvalue.RDFDatetime.Now().SerializeToDataStore()] } pool.MultiSet(dirname, to_set, replace=True) if mutation_pool is None: pool.Flush() except access_control.UnauthorizedAccess: pass def _MakeCacheInvariant(self, urn, token, age): """Returns an invariant key for an AFF4 object. The object will be cached based on this key. This function is specifically extracted to ensure that we encapsulate all security critical aspects of the AFF4 object so that objects do not leak across security boundaries. Args: urn: The urn of the object. token: The access token used to receive the object. age: The age policy used to build this object. Should be one of ALL_TIMES, NEWEST_TIME or a range. Returns: A key into the cache. """ return "%s:%s:%s" % (utils.SmartStr(urn), utils.SmartStr(token), self.ParseAgeSpecification(age)) def CreateWithLock(self, urn, aff4_type, token=None, age=NEWEST_TIME, force_new_version=True, blocking=True, blocking_lock_timeout=10, blocking_sleep_interval=1, lease_time=100): """Creates a new object and locks it. Similar to OpenWithLock below, this creates a locked object. The difference is that when you call CreateWithLock, the object does not yet have to exist in the data store. Args: urn: The object to create. aff4_type: The desired type for this object. token: The Security Token to use for opening this item. age: The age policy used to build this object. Only makes sense when mode has "r". force_new_version: Forces the creation of a new object in the data_store. blocking: When True, wait and repeatedly try to grab the lock. blocking_lock_timeout: Maximum wait time when sync is True. blocking_sleep_interval: Sleep time between lock grabbing attempts. Used when blocking is True. lease_time: Maximum time the object stays locked. Lock will be considered released when this time expires. Returns: An AFF4 object of the desired type and mode. Raises: AttributeError: If the mode is invalid. """ transaction = self._AcquireLock( urn, token=token, blocking=blocking, blocking_lock_timeout=blocking_lock_timeout, blocking_sleep_interval=blocking_sleep_interval, lease_time=lease_time) # Since we now own the data store subject, we can simply create the aff4 # object in the usual way. return self.Create( urn, aff4_type, mode="rw", token=token, age=age, force_new_version=force_new_version, transaction=transaction) def OpenWithLock(self, urn, aff4_type=None, token=None, age=NEWEST_TIME, blocking=True, blocking_lock_timeout=10, blocking_sleep_interval=1, lease_time=100): """Open given urn and locks it. Opens an object and locks it for 'lease_time' seconds. OpenWithLock can only be used in 'with ...' statement. The lock is released when code execution leaves 'with ...' block. The urn is always opened in "rw" mode. Symlinks are not followed in OpenWithLock() due to possible race conditions. Args: urn: The urn to open. aff4_type: If this optional parameter is set, we raise an InstantiationError if the object exists and is not an instance of this type. This check is important when a different object can be stored in this location. token: The Security Token to use for opening this item. age: The age policy used to build this object. Should be one of NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in microseconds since Jan 1st, 1970. blocking: When True, wait and repeatedly try to grab the lock. blocking_lock_timeout: Maximum wait time when sync is True. blocking_sleep_interval: Sleep time between lock grabbing attempts. Used when blocking is True. lease_time: Maximum time the object stays locked. Lock will be considered released when this time expires. Raises: ValueError: The URN passed in is None. Returns: Context manager to be used in 'with ...' statement. """ transaction = self._AcquireLock( urn, token=token, blocking=blocking, blocking_lock_timeout=blocking_lock_timeout, blocking_sleep_interval=blocking_sleep_interval, lease_time=lease_time) # Since we now own the data store subject, we can simply read the aff4 # object in the usual way. return self.Open( urn, aff4_type=aff4_type, mode="rw", token=token, age=age, follow_symlinks=False, transaction=transaction) def _AcquireLock(self, urn, token=None, blocking=None, blocking_lock_timeout=None, lease_time=None, blocking_sleep_interval=None): """This actually acquires the lock for a given URN.""" if token is None: token = data_store.default_token if urn is None: raise ValueError("URN cannot be None") urn = rdfvalue.RDFURN(urn) try: return data_store.DB.LockRetryWrapper( urn, retrywrap_timeout=blocking_sleep_interval, retrywrap_max_timeout=blocking_lock_timeout, blocking=blocking, lease_time=lease_time, token=token) except data_store.DBSubjectLockError as e: raise LockError(e) def Copy(self, old_urn, new_urn, age=NEWEST_TIME, token=None, limit=None, update_timestamps=False, sync=False): """Make a copy of one AFF4 object to a different URN.""" if token is None: token = data_store.default_token new_urn = rdfvalue.RDFURN(new_urn) if update_timestamps and age != NEWEST_TIME: raise ValueError( "Can't update timestamps unless reading the latest version.") values = {} for predicate, value, ts in data_store.DB.ResolvePrefix( old_urn, AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age), token=token, limit=limit): if update_timestamps: values.setdefault(predicate, []).append((value, None)) else: values.setdefault(predicate, []).append((value, ts)) if values: data_store.DB.MultiSet( new_urn, values, token=token, replace=False, sync=sync) self._UpdateChildIndex(new_urn, token) def Open(self, urn, aff4_type=None, mode="r", token=None, local_cache=None, age=NEWEST_TIME, follow_symlinks=True, transaction=None): """Opens the named object. This instantiates the object from the AFF4 data store. Note that the root aff4:/ object is a container for all other objects. Opening it for reading will instantiate a AFF4Volume instance, even if the row does not exist. The mode parameter specifies, how the object should be opened. A read only mode will raise when calling Set() on it, while a write only object will never read from the data store. Note that its impossible to open an object with pure write support (since we have no idea what type it should be without reading the data base) - use Create() instead for purely write mode. Args: urn: The urn to open. aff4_type: If this parameter is set, we raise an IOError if the object is not an instance of this type. This check is important when a different object can be stored in this location. If mode is "w", this parameter will determine the type of the object and is mandatory. mode: The mode to open the file with. token: The Security Token to use for opening this item. local_cache: A dict containing a cache as returned by GetAttributes. If set, this bypasses the factory cache. age: The age policy used to build this object. Should be one of NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in microseconds since Jan 1st, 1970. follow_symlinks: If object opened is a symlink, follow it. transaction: A lock in case this object is opened under lock. Returns: An AFF4Object instance. Raises: IOError: If the object is not of the required type. AttributeError: If the requested mode is incorrect. """ aff4_type = _ValidateAFF4Type(aff4_type) if mode not in ["w", "r", "rw"]: raise AttributeError("Invalid mode %s" % mode) if mode == "w": if aff4_type is None: raise AttributeError("Need a type to open in write only mode.") return self.Create( urn, aff4_type, mode=mode, token=token, age=age, force_new_version=False, transaction=transaction) urn = rdfvalue.RDFURN(urn) if token is None: token = data_store.default_token if "r" in mode and (local_cache is None or urn not in local_cache): local_cache = dict(self.GetAttributes([urn], age=age, token=token)) # Read the row from the table. We know the object already exists if there is # some data in the local_cache already for this object. result = AFF4Object( urn, mode=mode, token=token, local_cache=local_cache, age=age, follow_symlinks=follow_symlinks, object_exists=bool(local_cache.get(urn)), transaction=transaction) result.aff4_type = aff4_type # Now we have a AFF4Object, turn it into the type it is currently supposed # to be as specified by Schema.TYPE. existing_type = result.Get(result.Schema.TYPE, default="AFF4Volume") if existing_type: try: result = result.Upgrade(AFF4Object.classes[existing_type]) except KeyError: raise InstantiationError("Unable to open %s, type %s unknown." % (urn, existing_type)) if aff4_type is not None and not isinstance(result, aff4_type): raise InstantiationError( "Object %s is of type %s, but required_type is %s" % (urn, result.__class__.__name__, aff4_type.__name__)) return result def MultiOpen(self, urns, mode="rw", token=None, aff4_type=None, age=NEWEST_TIME, follow_symlinks=True): """Opens a bunch of urns efficiently.""" if token is None: token = data_store.default_token if mode not in ["w", "r", "rw"]: raise RuntimeError("Invalid mode %s" % mode) symlinks = {} aff4_type = _ValidateAFF4Type(aff4_type) for urn, values in self.GetAttributes(urns, token=token, age=age): try: obj = self.Open( urn, mode=mode, token=token, local_cache={urn: values}, age=age, follow_symlinks=False) # We can't pass aff4_type to Open since it will raise on AFF4Symlinks. # Setting it here, if needed, so that BadGetAttributeError checking # works. if aff4_type: obj.aff4_type = aff4_type if follow_symlinks and isinstance(obj, AFF4Symlink): target = obj.Get(obj.Schema.SYMLINK_TARGET) if target is not None: symlinks.setdefault(target, []).append(obj.urn) elif aff4_type: if isinstance(obj, aff4_type): yield obj else: yield obj except IOError: pass if symlinks: for obj in self.MultiOpen( symlinks, mode=mode, token=token, aff4_type=aff4_type, age=age): to_link = symlinks[obj.urn] for additional_symlink in to_link[1:]: clone = obj.__class__(obj.urn, clone=obj) clone.symlink_urn = additional_symlink yield clone obj.symlink_urn = symlinks[obj.urn][0] yield obj def OpenDiscreteVersions(self, urn, mode="r", token=None, local_cache=None, age=ALL_TIMES, follow_symlinks=True): """Returns all the versions of the object as AFF4 objects. Args: urn: The urn to open. mode: The mode to open the file with. token: The Security Token to use for opening this item. local_cache: A dict containing a cache as returned by GetAttributes. If set, this bypasses the factory cache. age: The age policy used to build this object. Should be one of ALL_TIMES or a time range follow_symlinks: If object opened is a symlink, follow it. Yields: An AFF4Object for each version. Raises: IOError: On bad open or wrong time range specified. This iterates through versions of an object, returning the newest version first, then each older version until the beginning of time. Note that versions are defined by changes to the TYPE attribute, and this takes the version between two TYPE attributes. In many cases as a user you don't want this, as you want to be returned an object with as many attributes as possible, instead of the subset of them that were Set between these two times. """ if age == NEWEST_TIME or len(age) == 1: raise IOError("Bad age policy NEWEST_TIME for OpenDiscreteVersions.") if len(age) == 2: oldest_age = age[1] else: oldest_age = 0 aff4object = FACTORY.Open( urn, mode=mode, token=token, local_cache=local_cache, age=age, follow_symlinks=follow_symlinks) # TYPE is always written last so we trust it to bound the version. # Iterate from newest to oldest. type_iter = aff4object.GetValuesForAttribute(aff4object.Schema.TYPE) version_list = [(t.age, str(t)) for t in type_iter] version_list.append((oldest_age, None)) for i in range(0, len(version_list) - 1): age_range = (version_list[i + 1][0], version_list[i][0]) # Create a subset of attributes for use in the new object that represents # this version. clone_attrs = {} for k, values in aff4object.synced_attributes.iteritems(): reduced_v = [] for v in values: if v.age > age_range[0] and v.age <= age_range[1]: reduced_v.append(v) clone_attrs.setdefault(k, []).extend(reduced_v) obj_cls = AFF4Object.classes[version_list[i][1]] new_obj = obj_cls( urn, mode=mode, parent=aff4object.parent, clone=clone_attrs, token=token, age=age_range, local_cache=local_cache, follow_symlinks=follow_symlinks) new_obj.Initialize() # This is required to set local attributes. yield new_obj def Stat(self, urns, token=None): """Returns metadata about all urns. Currently the metadata include type, and last update time. Args: urns: The urns of the objects to open. token: The token to use. Yields: A dict of metadata. Raises: RuntimeError: A string was passed instead of an iterable. """ if token is None: token = data_store.default_token if isinstance(urns, basestring): raise RuntimeError("Expected an iterable, not string.") for subject, values in data_store.DB.MultiResolvePrefix( urns, ["aff4:type", "metadata:last"], token=token): res = dict(urn=rdfvalue.RDFURN(subject)) for v in values: if v[0] == "aff4:type": res["type"] = v elif v[0] == "metadata:last": res["last"] = rdfvalue.RDFDatetime(v[1]) yield res def Create(self, urn, aff4_type, mode="w", token=None, age=NEWEST_TIME, force_new_version=True, object_exists=False, mutation_pool=None, transaction=None): """Creates the urn if it does not already exist, otherwise opens it. If the urn exists and is of a different type, this will also promote it to the specified type. Args: urn: The object to create. aff4_type: The desired type for this object. mode: The desired mode for this object. token: The Security Token to use for opening this item. age: The age policy used to build this object. Only makes sense when mode has "r". force_new_version: Forces the creation of a new object in the data_store. object_exists: If we know the object already exists we can skip index creation. mutation_pool: An optional MutationPool object to write to. If not given, the data_store is used directly. transaction: For locked objects, a lock is passed to the object. Returns: An AFF4 object of the desired type and mode. Raises: AttributeError: If the mode is invalid. """ if mode not in ["w", "r", "rw"]: raise AttributeError("Invalid mode %s" % mode) if token is None: token = data_store.default_token if urn is not None: urn = rdfvalue.RDFURN(urn) aff4_type = _ValidateAFF4Type(aff4_type) if "r" in mode: # Check to see if an object already exists. try: existing = self.Open( urn, mode=mode, token=token, age=age, transaction=transaction) result = existing.Upgrade(aff4_type) # We can't pass aff4_type into the Open call since it will raise with a # type mismatch. We set it like this so BadGetAttributeError checking # works. if aff4_type: result.aff4_type = aff4_type.__name__ if force_new_version and existing.Get( result.Schema.TYPE) != aff4_type.__name__: result.ForceNewVersion() return result except IOError: pass result = aff4_type( urn, mode=mode, token=token, age=age, aff4_type=aff4_type.__name__, object_exists=object_exists, mutation_pool=mutation_pool, transaction=transaction) result.Initialize() if force_new_version: result.ForceNewVersion() return result def MultiDelete(self, urns, token=None): """Drop all the information about given objects. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urns: Urns of objects to remove. token: The Security Token to use for opening this item. Raises: RuntimeError: If one of the urns is too short. This is a safety check to ensure the root is not removed. """ urns = [rdfvalue.RDFURN(urn) for urn in urns] if token is None: token = data_store.default_token for urn in urns: if urn.Path() == "/": raise RuntimeError("Can't delete root URN. Please enter a valid URN") deletion_pool = DeletionPool(token=token) deletion_pool.MultiMarkForDeletion(urns) marked_root_urns = deletion_pool.root_urns_for_deletion marked_urns = deletion_pool.urns_for_deletion logging.debug(u"Found %d objects to remove when removing %s", len(marked_urns), utils.SmartUnicode(urns)) logging.debug(u"Removing %d root objects when removing %s: %s", len(marked_root_urns), utils.SmartUnicode(urns), utils.SmartUnicode(marked_root_urns)) pool = data_store.DB.GetMutationPool(token=token) for root in marked_root_urns: # Only the index of the parent object should be updated. Everything # below the target object (along with indexes) is going to be # deleted. self._DeleteChildFromIndex(root, token, mutation_pool=pool) for urn_to_delete in marked_urns: try: self.intermediate_cache.ExpireObject(urn_to_delete.Path()) except KeyError: pass pool.DeleteSubjects(marked_urns) pool.Flush() # Ensure this is removed from the cache as well. self.Flush() logging.debug("Removed %d objects", len(marked_urns)) def Delete(self, urn, token=None): """Drop all the information about this object. DANGEROUS! This recursively deletes all objects contained within the specified URN. Args: urn: The object to remove. token: The Security Token to use for opening this item. Raises: RuntimeError: If the urn is too short. This is a safety check to ensure the root is not removed. """ self.MultiDelete([urn], token=token) def MultiListChildren(self, urns, token=None, limit=None, age=NEWEST_TIME): """Lists bunch of directories efficiently. Args: urns: List of urns to list children. token: Security token. limit: Max number of children to list (NOTE: this is per urn). age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Yields: Tuples of Subjects and a list of children urns of a given subject. """ checked_subjects = set() index_prefix = "index:dir/" for subject, values in data_store.DB.MultiResolvePrefix( urns, index_prefix, token=token, timestamp=Factory.ParseAgeSpecification(age), limit=limit): checked_subjects.add(subject) subject_result = [] for predicate, _, timestamp in values: urn = rdfvalue.RDFURN(subject).Add(predicate[len(index_prefix):]) urn.age = rdfvalue.RDFDatetime(timestamp) subject_result.append(urn) yield subject, subject_result for subject in set(urns) - checked_subjects: yield subject, [] def ListChildren(self, urn, token=None, limit=None, age=NEWEST_TIME): """Lists bunch of directories efficiently. Args: urn: Urn to list children. token: Security token. limit: Max number of children to list. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Returns: RDFURNs instances of each child. """ _, children_urns = list( self.MultiListChildren([urn], token=token, limit=limit, age=age))[0] return children_urns def RecursiveMultiListChildren(self, urns, token=None, limit=None, age=NEWEST_TIME): """Recursively lists bunch of directories. Args: urns: List of urns to list children. token: Security token. limit: Max number of children to list (NOTE: this is per urn). age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Yields: (subject<->children urns) tuples. RecursiveMultiListChildren will fetch children lists for initial set of urns and then will fetch children's children, etc. For example, for the following objects structure: a-> b -> c -> d RecursiveMultiListChildren(['a']) will return: [('a', ['b']), ('b', ['c', 'd'])] """ checked_urns = set() urns_to_check = urns while True: found_children = [] for subject, values in self.MultiListChildren( urns_to_check, token=token, limit=limit, age=age): found_children.extend(values) yield subject, values checked_urns.update(urns_to_check) urns_to_check = set(found_children) - checked_urns if not urns_to_check: break def Flush(self): data_store.DB.Flush() self.intermediate_cache.Flush() class Attribute(object): """AFF4 schema attributes are instances of this class.""" description = "" # A global registry of attributes by name. This ensures we do not accidentally # define the same attribute with conflicting types. PREDICATES = {} # A human readable name to be used in filter queries. NAMES = {} def __init__(self, predicate, attribute_type=rdfvalue.RDFString, description="", name=None, _copy=False, default=None, index=None, versioned=True, lock_protected=False, creates_new_object_version=True): """Constructor. Args: predicate: The name of this attribute - must look like a URL (e.g. aff4:contains). Will be used to store the attribute. attribute_type: The RDFValue type of this attributes. description: A one line description of what this attribute represents. name: A human readable name for the attribute to be used in filters. _copy: Used internally to create a copy of this object without registering. default: A default value will be returned if the attribute is not set on an object. This can be a constant or a callback which receives the fd itself as an arg. index: The name of the index to use for this attribute. If None, the attribute will not be indexed. versioned: Should this attribute be versioned? Non-versioned attributes always overwrite other versions of the same attribute. lock_protected: If True, this attribute may only be set if the object was opened via OpenWithLock(). creates_new_object_version: If this is set, a write to this attribute will also write a new version of the parent attribute. This should be False for attributes where lots of entries are collected like logs. """ self.name = name self.predicate = predicate self.attribute_type = attribute_type self.description = description self.default = default self.index = index self.versioned = versioned self.lock_protected = lock_protected self.creates_new_object_version = creates_new_object_version # Field names can refer to a specific component of an attribute self.field_names = [] if not _copy: # Check the attribute registry for conflicts try: old_attribute = Attribute.PREDICATES[predicate] if old_attribute.attribute_type != attribute_type: msg = "Attribute %s defined with conflicting types (%s, %s)" % ( predicate, old_attribute.attribute_type.__class__.__name__, attribute_type.__class__.__name__) logging.error(msg) raise RuntimeError(msg) except KeyError: pass # Register self.PREDICATES[predicate] = self if name: self.NAMES[name] = self def Copy(self): """Return a copy without registering in the attribute registry.""" return Attribute( self.predicate, self.attribute_type, self.description, self.name, _copy=True) def __call__(self, semantic_value=None, **kwargs): """A shortcut allowing us to instantiate a new type from an attribute.""" result = semantic_value if semantic_value is None: result = self.attribute_type(**kwargs) # Coerce the value into the required type if needed. elif not isinstance(semantic_value, self.attribute_type): result = self.attribute_type(semantic_value, **kwargs) # We try to reuse the provided value and tag it as belonging to this # attribute. However, if the value is reused, we must make a copy. if getattr(result, "attribute_instance", None): result = result.Copy() result.attribute_instance = self return result def __str__(self): return self.predicate def __repr__(self): return "<Attribute(%s, %s)>" % (self.name, self.predicate) def __hash__(self): return hash(self.predicate) def __eq__(self, other): return str(self.predicate) == str(other) def __ne__(self, other): return str(self.predicate) != str(other) def __getitem__(self, item): result = self.Copy() result.field_names = item.split(".") return result def __len__(self): return len(self.field_names) def Fields(self): return self.attribute_type.Fields() @classmethod def GetAttributeByName(cls, name): # Support attribute names with a . in them: try: if "." in name: name, field = name.split(".", 1) return cls.NAMES[name][field] return cls.NAMES[name] except KeyError: raise AttributeError("Invalid attribute %s" % name) def GetRDFValueType(self): """Returns this attribute's RDFValue class.""" result = self.attribute_type for field_name in self.field_names: # Support the new semantic protobufs. try: result = result.type_infos.get(field_name).type except AttributeError: raise AttributeError("Invalid attribute %s" % field_name) return result def _GetSubField(self, value, field_names): for field_name in field_names: if value.HasField(field_name): value = getattr(value, field_name, None) else: value = None break if value is not None: yield value def GetSubFields(self, fd, field_names): """Gets all the subfields indicated by field_names. This resolves specifications like "Users.special_folders.app_data" where for each entry in the Users protobuf the corresponding app_data folder entry should be returned. Args: fd: The base RDFValue or Array. field_names: A list of strings indicating which subfields to get. Yields: All the subfields matching the field_names specification. """ if isinstance(fd, rdf_protodict.RDFValueArray): for value in fd: for res in self._GetSubField(value, field_names): yield res else: for res in self._GetSubField(fd, field_names): yield res def GetValues(self, fd): """Return the values for this attribute as stored in an AFF4Object.""" result = None for result in fd.new_attributes.get(self, []): # We need to interpolate sub fields in this rdfvalue. if self.field_names: for x in self.GetSubFields(result, self.field_names): yield x else: yield result for result in fd.synced_attributes.get(self, []): result = result.ToRDFValue() # We need to interpolate sub fields in this rdfvalue. if result is not None: if self.field_names: for x in self.GetSubFields(result, self.field_names): yield x else: yield result if result is None: default = self.GetDefault(fd) if default is not None: yield default def GetDefault(self, fd=None, default=None): """Returns a default attribute if it is not set.""" if callable(self.default): return self.default(fd) if self.default is not None: # We can't return mutable objects here or the default might change for all # objects of this class. if isinstance(self.default, rdfvalue.RDFValue): default = self.default.Copy() default.attribute_instance = self return self(default) else: return self(self.default) if isinstance(default, rdfvalue.RDFValue): default = default.Copy() default.attribute_instance = self return default class SubjectAttribute(Attribute): """An attribute which virtualises the subject.""" def __init__(self): Attribute.__init__(self, "aff4:subject", rdfvalue.Subject, "A subject pseudo attribute", "subject") def GetValues(self, fd): return [rdfvalue.Subject(fd.urn)] class AFF4Attribute(rdfvalue.RDFString): """An AFF4 attribute name.""" def Validate(self): try: Attribute.GetAttributeByName(self._value) except (AttributeError, KeyError): raise type_info.TypeValueError("Value %s is not an AFF4 attribute name" % self._value) class ClassProperty(property): """A property which comes from the class object.""" def __get__(self, _, owner): return self.fget.__get__(None, owner)() class ClassInstantiator(property): """A property which instantiates the class on getting.""" def __get__(self, _, owner): return self.fget() class LazyDecoder(object): """An object which delays serialize and unserialize as late as possible. The current implementation requires the proxied object to be immutable. """ def __init__(self, rdfvalue_cls=None, serialized=None, age=None, decoded=None): self.rdfvalue_cls = rdfvalue_cls self.serialized = serialized self.age = age self.decoded = decoded def ToRDFValue(self): if self.decoded is None: try: self.decoded = self.rdfvalue_cls.FromSerializedString( self.serialized, age=self.age) except rdfvalue.DecodeError: return None return self.decoded def FromRDFValue(self): return self.serialized class AFF4Object(object): """Base class for all objects.""" # We are a registered class. __metaclass__ = registry.MetaclassRegistry # This property is used in GUIs to define behaviours. These can take arbitrary # values as needed. Behaviours are read only and set in the class definition. _behaviours = frozenset() # Should this object be synced back to the data store. _dirty = False # The data store transaction this object uses while it is being locked. transaction = None @property def locked(self): """Is this object currently locked?""" return self.transaction is not None @ClassProperty @classmethod def behaviours(cls): # pylint: disable=g-bad-name return cls._behaviours # URN of the index for labels for generic AFF4Objects. labels_index_urn = rdfvalue.RDFURN("aff4:/index/labels/generic") # We define the parts of the schema for each AFF4 Object as an internal # class. As new objects extend this, they can add more attributes to their # schema by extending their parents. Note that the class must be named # SchemaCls. class SchemaCls(object): """The standard AFF4 schema.""" # We use child indexes to navigate the direct children of an object. # If the additional storage requirements for the indexes are not worth it # then ADD_CHILD_INDEX should be False. Note however that it will no longer # be possible to find all the children of the parent object. ADD_CHILD_INDEX = True TYPE = Attribute("aff4:type", rdfvalue.RDFString, "The name of the AFF4Object derived class.", "type") SUBJECT = SubjectAttribute() STORED = Attribute("aff4:stored", rdfvalue.RDFURN, "The AFF4 container inwhich this object is stored.") LAST = Attribute( "metadata:last", rdfvalue.RDFDatetime, "The last time any attribute of this object was written.", creates_new_object_version=False) # Note labels should not be Set directly but should be manipulated via # the AddLabels method. LABELS = Attribute( "aff4:labels_list", aff4_rdfvalues.AFF4ObjectLabelsList, "Any object can have labels applied to it.", "Labels", creates_new_object_version=False, versioned=False) LEASED_UNTIL = Attribute( "aff4:lease", rdfvalue.RDFDatetime, "The time until which the object is leased by a " "particular caller.", versioned=False, creates_new_object_version=False) LAST_OWNER = Attribute( "aff4:lease_owner", rdfvalue.RDFString, "The owner of the lease.", versioned=False, creates_new_object_version=False) def __init__(self, aff4_type=None): """Init. Args: aff4_type: aff4 type string e.g. 'VFSGRRClient' if specified by the user when the aff4 object was created. Or None. """ self.aff4_type = aff4_type @classmethod def ListAttributes(cls): for attr in dir(cls): attr = getattr(cls, attr) if isinstance(attr, Attribute): yield attr @classmethod def GetAttribute(cls, name): for i in cls.ListAttributes(): # Attributes are accessible by predicate or name if i.name == name or i.predicate == name: return i def __getattr__(self, attr): """Handle unknown attributes. Often the actual object returned is not the object that is expected. In those cases attempting to retrieve a specific named attribute would normally raise, e.g.: fd = aff4.FACTORY.Open(urn) fd.Get(fd.Schema.DOESNTEXIST, default_value) In this case we return None to ensure that the default is chosen. However, if the caller specifies a specific aff4_type, they expect the attributes of that object. If they are referencing a non-existent attribute this is an error and we should raise, e.g.: fd = aff4.FACTORY.Open(urn, aff4_type=module.SomeClass) fd.Get(fd.Schema.DOESNTEXIST, default_value) Args: attr: Some ignored attribute. Raises: BadGetAttributeError: if the object was opened with a specific type """ if self.aff4_type: raise BadGetAttributeError( "Attribute %s does not exist on object opened with aff4_type %s" % (utils.SmartStr(attr), self.aff4_type)) return None # Make sure that when someone references the schema, they receive an instance # of the class. @property def Schema(self): # pylint: disable=g-bad-name return self.SchemaCls(self.aff4_type) def __init__(self, urn, mode="r", parent=None, clone=None, token=None, local_cache=None, age=NEWEST_TIME, follow_symlinks=True, aff4_type=None, object_exists=False, mutation_pool=None, transaction=None): if urn is not None: urn = rdfvalue.RDFURN(urn) self.urn = urn self.mode = mode self.parent = parent self.token = token self.age_policy = age self.follow_symlinks = follow_symlinks self.lock = threading.RLock() self.mutation_pool = mutation_pool self.transaction = transaction if transaction and mutation_pool: raise ValueError("Cannot use a locked object with a mutation pool!") # If object was opened through a symlink, "symlink_urn" attribute will # contain a sylmink urn. self.symlink_urn = None # The object already exists in the data store - we do not need to update # indexes. self.object_exists = object_exists # This flag will be set whenever an attribute is changed that has the # creates_new_object_version flag set. self._new_version = False # Mark out attributes to delete when Flushing() self._to_delete = set() # If an explicit aff4 type is requested we store it here so we know to # verify aff4 attributes exist in the schema at Get() time. self.aff4_type = aff4_type # We maintain two attribute caches - self.synced_attributes reflects the # attributes which are synced with the data_store, while self.new_attributes # are new attributes which still need to be flushed to the data_store. When # this object is instantiated we populate self.synced_attributes with the # data_store, while the finish method flushes new changes. if clone is not None: if isinstance(clone, dict): # Just use these as the attributes, do not go to the data store. This is # a quick way of creating an object with data which was already fetched. self.new_attributes = {} self.synced_attributes = clone elif isinstance(clone, AFF4Object): # We were given another object to clone - we do not need to access the # data_store now. self.new_attributes = clone.new_attributes.copy() self.synced_attributes = clone.synced_attributes.copy() else: raise RuntimeError("Cannot clone from %s." % clone) else: self.new_attributes = {} self.synced_attributes = {} if "r" in mode: if local_cache: try: for attribute, value, ts in local_cache[utils.SmartUnicode(urn)]: self.DecodeValueFromAttribute(attribute, value, ts) except KeyError: pass else: # Populate the caches from the data store. for urn, values in FACTORY.GetAttributes( [urn], age=age, token=self.token): for attribute_name, value, ts in values: self.DecodeValueFromAttribute(attribute_name, value, ts) if clone is None: self.Initialize() def Initialize(self): """The method is called after construction to initialize the object. This will be called after construction, and each time the object is unserialized from the datastore. An AFF4 object contains attributes which can be populated from the database. This method is called to obtain a fully fledged object from a collection of attributes. """ def DecodeValueFromAttribute(self, attribute_name, value, ts): """Given a serialized value, decode the attribute. Only attributes which have been previously defined are permitted. Args: attribute_name: The string name of the attribute. value: The serialized attribute value. ts: The timestamp of this attribute. """ try: # Get the Attribute object from our schema. attribute = Attribute.PREDICATES[attribute_name] cls = attribute.attribute_type self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts), self.synced_attributes) except KeyError: pass except (ValueError, rdfvalue.DecodeError): logging.debug("%s: %s invalid encoding. Skipping.", self.urn, attribute_name) def _AddAttributeToCache(self, attribute_name, value, cache): """Helper to add a new attribute to a cache.""" # If there's another value in cache with the same timestamp, the last added # one takes precedence. This helps a lot in tests that use FakeTime. attribute_list = cache.setdefault(attribute_name, []) if attribute_list and attribute_list[-1].age == value.age: attribute_list.pop() attribute_list.append(value) def CheckLease(self): """Check if our lease has expired, return seconds left. Returns: int: seconds left in the lease, 0 if not locked or lease is expired """ if self.transaction: return self.transaction.CheckLease() return 0 def UpdateLease(self, duration): """Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired. """ if not self.locked: raise LockError("Object must be locked to update the lease: %s." % self.urn) if self.CheckLease() == 0: raise LockError("Can not update lease that has already expired.") self.transaction.UpdateLease(duration) def Flush(self, sync=True): """Syncs this object with the data store, maintaining object validity.""" if self.locked and self.CheckLease() == 0: raise LockError("Can not update lease that has already expired.") self._WriteAttributes(sync=sync) self._SyncAttributes() if self.parent: self.parent.Flush(sync=sync) def Close(self, sync=True): """Close and destroy the object. This is similar to Flush, but does not maintain object validity. Hence the object should not be interacted with after Close(). Args: sync: Write the attributes synchronously to the data store. Raises: LockError: The lease for this object has expired. """ if self.locked and self.CheckLease() == 0: raise LockError("Can not update lease that has already expired.") # Always sync when in a lock. if self.locked: sync = True self._WriteAttributes(sync=sync) # Releasing this lock allows another thread to own it. if self.locked: self.transaction.Release() if self.parent: self.parent.Close(sync=sync) # Interacting with a closed object is a bug. We need to catch this ASAP so # we remove all mode permissions from this object. self.mode = "" def OnDelete(self, deletion_pool=None): """Called when the object is about to be deleted. NOTE: If the implementation of this method has to list children or delete other dependent objects, make sure to use DeletionPool's API instead of a generic aff4.FACTORY one. DeletionPool is optimized for deleting large amounts of objects - it minimizes number of expensive data store calls, trying to group as many of them as possible into a single batch, and caches results of these calls. Args: deletion_pool: DeletionPool object used for this deletion operation. Raises: ValueError: if deletion pool is None. """ if deletion_pool is None: raise ValueError("deletion_pool can't be None") @utils.Synchronized def _WriteAttributes(self, sync=True): """Write the dirty attributes to the data store.""" # If the object is not opened for writing we do not need to flush it to the # data_store. if "w" not in self.mode: return if self.urn is None: raise RuntimeError("Storing of anonymous AFF4 objects not supported.") to_set = {} for attribute_name, value_array in self.new_attributes.iteritems(): to_set_list = to_set.setdefault(attribute_name, []) for value in value_array: to_set_list.append((value.SerializeToDataStore(), value.age)) if self._dirty: # We determine this object has a new version only if any of the versioned # attributes have changed. Non-versioned attributes do not represent a new # object version. The type of an object is versioned and represents a # version point in the life of the object. if self._new_version: to_set[self.Schema.TYPE] = [( utils.SmartUnicode(self.__class__.__name__), rdfvalue.RDFDatetime.Now())] # We only update indexes if the schema does not forbid it and we are not # sure that the object already exists. add_child_index = self.Schema.ADD_CHILD_INDEX if self.object_exists: add_child_index = False # Write the attributes to the Factory cache. FACTORY.SetAttributes( self.urn, to_set, self._to_delete, add_child_index=add_child_index, mutation_pool=self.mutation_pool, sync=sync, token=self.token) @utils.Synchronized def _SyncAttributes(self): """Sync the new attributes to the synced attribute cache. This maintains object validity. """ # This effectively moves all the values from the new_attributes to the # synced_attributes caches. for attribute, value_array in self.new_attributes.iteritems(): if not attribute.versioned or self.age_policy == NEWEST_TIME: # Store the latest version if there are multiple unsynced versions. value = value_array[-1] self.synced_attributes[attribute] = [ LazyDecoder(decoded=value, age=value.age) ] else: synced_value_array = self.synced_attributes.setdefault(attribute, []) for value in value_array: synced_value_array.append(LazyDecoder(decoded=value, age=value.age)) synced_value_array.sort(key=lambda x: x.age, reverse=True) self.new_attributes = {} self._to_delete.clear() self._dirty = False self._new_version = False def _CheckAttribute(self, attribute, value): """Check that the value is of the expected type. Args: attribute: An instance of Attribute(). value: An instance of RDFValue. Raises: ValueError: when the value is not of the expected type. AttributeError: When the attribute is not of type Attribute(). """ if not isinstance(attribute, Attribute): raise AttributeError("Attribute %s must be of type aff4.Attribute()", attribute) if not isinstance(value, attribute.attribute_type): raise ValueError("Value for attribute %s must be of type %s()" % (attribute, attribute.attribute_type.__name__)) def Copy(self, to_attribute, from_fd, from_attribute): values = from_fd.GetValuesForAttribute(from_attribute) for v in values: self.AddAttribute(to_attribute, v, age=v.age) def Set(self, attribute, value=None): """Set an attribute on this object. Set() is now a synonym for AddAttribute() since attributes are never deleted. Args: attribute: The attribute to set. value: The new value for this attribute. """ # Specifically ignore None here. This allows us to safely copy attributes # from one object to another: fd.Set(fd2.Get(..)) if attribute is None: return self.AddAttribute(attribute, value) def AddAttribute(self, attribute, value=None, age=None): """Add an additional attribute to this object. If value is None, attribute is expected to be already initialized with a value. For example: fd.AddAttribute(fd.Schema.CONTAINS("some data")) Args: attribute: The attribute name or an RDFValue derived from the attribute. value: The value the attribute will be set to. age: Age (timestamp) of the attribute. If None, current time is used. Raises: IOError: If this object is read only. """ if "w" not in self.mode: raise IOError("Writing attribute %s to read only object." % attribute) if value is None: value = attribute attribute = value.attribute_instance # Check if this object should be locked in order to add the attribute. # NOTE: We don't care about locking when doing blind writes. if self.mode != "w" and attribute.lock_protected and not self.transaction: raise IOError("Object must be locked to write attribute %s." % attribute) self._CheckAttribute(attribute, value) # Does this represent a new version? if attribute.versioned: if attribute.creates_new_object_version: self._new_version = True # Update the time of this new attribute. if age: value.age = age else: value.age = rdfvalue.RDFDatetime.Now() # Non-versioned attributes always replace previous versions and get written # at the earliest timestamp (so they appear in all objects). else: self._to_delete.add(attribute) self.synced_attributes.pop(attribute, None) self.new_attributes.pop(attribute, None) value.age = 0 self._AddAttributeToCache(attribute, value, self.new_attributes) self._dirty = True @utils.Synchronized def DeleteAttribute(self, attribute): """Clears the attribute from this object.""" if "w" not in self.mode: raise IOError("Deleting attribute %s from read only object." % attribute) # Check if this object should be locked in order to delete the attribute. # NOTE: We don't care about locking when doing blind writes. if self.mode != "w" and attribute.lock_protected and not self.transaction: raise IOError("Object must be locked to delete attribute %s." % attribute) if attribute in self.synced_attributes: self._to_delete.add(attribute) del self.synced_attributes[attribute] if attribute in self.new_attributes: del self.new_attributes[attribute] # Does this represent a new version? if attribute.versioned and attribute.creates_new_object_version: self._new_version = True self._dirty = True def IsAttributeSet(self, attribute): """Determine if the attribute is set. Args: attribute: The attribute to check. Returns: True if set, otherwise False. Checking Get against None doesn't work as Get will return a default attribute value. This determines if the attribute has been manually set. """ return (attribute in self.synced_attributes or attribute in self.new_attributes) def Get(self, attribute, default=None): """Gets the attribute from this object.""" if attribute is None: return default # Allow the user to specify the attribute by name. elif isinstance(attribute, str): attribute = Attribute.GetAttributeByName(attribute) # We can't read attributes from the data_store unless read mode was # specified. It is ok to read new attributes though. if "r" not in self.mode and (attribute not in self.new_attributes and attribute not in self.synced_attributes): raise IOError("Fetching %s from object not opened for reading." % attribute) for result in self.GetValuesForAttribute(attribute, only_one=True): try: # The attribute may be a naked string or int - i.e. not an RDFValue at # all. result.attribute_instance = attribute except AttributeError: pass return result return attribute.GetDefault(self, default) def GetValuesForAttribute(self, attribute, only_one=False): """Returns a list of values from this attribute.""" if not only_one and self.age_policy == NEWEST_TIME: raise RuntimeError("Attempting to read all attribute versions for an " "object opened for NEWEST_TIME. This is probably " "not what you want.") if attribute is None: return [] elif isinstance(attribute, basestring): attribute = Attribute.GetAttributeByName(attribute) return attribute.GetValues(self) def Update(self, attribute=None, user=None, priority=None): """Requests the object refresh an attribute from the Schema.""" def Upgrade(self, aff4_class): """Upgrades this object to the type specified. AFF4 Objects can be upgraded on the fly to other type - As long as the new type is derived from the current type. This feature allows creation of placeholder objects which can later be upgraded to the fully featured object. Note: It is not allowed to downgrade an object if that would result in a loss of information (since the new object has a smaller schema). This method tries to store the new object with its new attributes and will fail if any attributes can not be mapped. Args: aff4_class: A string representing the new class. Returns: an instance of the new class with all the same attributes as this current object. Raises: RuntimeError: When the object to upgrade is locked. AttributeError: When the new object can not accept some of the old attributes. InstantiationError: When we cannot instantiate the object type class. """ aff4_class = _ValidateAFF4Type(aff4_class) # We are already of the required type if self.__class__ == aff4_class: return self # Check that we have the right type. if not isinstance(aff4_class, type): raise InstantiationError("aff4_class=%s must be a type" % aff4_class) if not issubclass(aff4_class, AFF4Object): raise InstantiationError( "aff4_class=%s must be a subclass of AFF4Object." % aff4_class) # It's not allowed to downgrade the object if isinstance(self, aff4_class): # TODO(user): check what we should do here: # 1) Nothing # 2) raise # 3) return self # Option 3) seems ok, but we need to be sure that we don't use # Create(mode='r') anywhere where code actually expects the object to be # downgraded. return self # NOTE: It is possible for attributes to become inaccessible here if the old # object has an attribute which the new object does not have in its # schema. The values of these attributes will not be available any longer in # the new object - usually because old attributes do not make sense in the # context of the new object. # Instantiate the class result = aff4_class( self.urn, mode=self.mode, clone=self, parent=self.parent, token=self.token, age=self.age_policy, object_exists=self.object_exists, follow_symlinks=self.follow_symlinks, aff4_type=self.aff4_type, mutation_pool=self.mutation_pool, transaction=self.transaction) result.symlink_urn = self.urn result.Initialize() return result def ForceNewVersion(self): self._dirty = True self._new_version = True def __repr__(self): return "<%s@%X = %s>" % (self.__class__.__name__, hash(self), self.urn) # The following are used to ensure a bunch of AFF4Objects can be sorted on # their URNs. def __gt__(self, other): return self.urn > other def __lt__(self, other): return self.urn < other def __nonzero__(self): """We override this because we don't want to fall back to __len__. We want to avoid the case where a nonzero check causes iteration over all items. Subclasses may override as long as their implementation is efficient. Returns: True always """ return True # Support the with protocol. def __enter__(self): return self def __exit__(self, unused_type, unused_value, unused_traceback): try: self.Close() except Exception: # pylint: disable=broad-except # If anything bad happens here, we must abort the lock or the # object will stay locked. if self.transaction: self.transaction.Release() raise def AddLabels(self, *labels_names, **kwargs): """Add labels to the AFF4Object.""" if not self.token and "owner" not in kwargs: raise RuntimeError("Can't set label: No owner specified and " "no access token available.") owner = kwargs.get("owner") or self.token.username current_labels = self.Get(self.Schema.LABELS, self.Schema.LABELS()) for label_name in labels_names: label = aff4_rdfvalues.AFF4ObjectLabel( name=label_name, owner=owner, timestamp=rdfvalue.RDFDatetime.Now()) current_labels.AddLabel(label) self.Set(current_labels) def RemoveLabels(self, *labels_names, **kwargs): """Remove specified labels from the AFF4Object.""" if not self.token and "owner" not in kwargs: raise RuntimeError("Can't remove label: No owner specified and " "no access token available.") owner = kwargs.get("owner") or self.token.username current_labels = self.Get(self.Schema.LABELS) for label_name in labels_names: label = aff4_rdfvalues.AFF4ObjectLabel(name=label_name, owner=owner) current_labels.RemoveLabel(label) self.Set(self.Schema.LABELS, current_labels) def SetLabels(self, *labels_names, **kwargs): self.ClearLabels() self.AddLabels(*labels_names, **kwargs) def ClearLabels(self): self.Set(self.Schema.LABELS, aff4_rdfvalues.AFF4ObjectLabelsList()) def GetLabels(self): return self.Get(self.Schema.LABELS, aff4_rdfvalues.AFF4ObjectLabelsList()).labels def GetLabelsNames(self, owner=None): labels = self.Get(self.Schema.LABELS, aff4_rdfvalues.AFF4ObjectLabelsList()) return labels.GetLabelNames(owner=owner) class AttributeExpression(lexer.Expression): """An expression which is used to filter attributes.""" def SetAttribute(self, attribute): """Checks that attribute is a valid Attribute() instance.""" # Grab the attribute registered for this name self.attribute = attribute self.attribute_obj = Attribute.GetAttributeByName(attribute) if self.attribute_obj is None: raise lexer.ParseError("Attribute %s not defined" % attribute) def SetOperator(self, operator): """Sets the operator for this expression.""" self.operator = operator # Find the appropriate list of operators for this attribute attribute_type = self.attribute_obj.GetRDFValueType() operators = attribute_type.operators # Do we have such an operator? self.number_of_args, self.operator_method = operators.get(operator, (0, None)) if self.operator_method is None: raise lexer.ParseError("Operator %s not defined on attribute '%s'" % (operator, self.attribute)) self.operator_method = getattr(attribute_type, self.operator_method) def Compile(self, filter_implemention): """Returns the data_store filter implementation from the attribute.""" return self.operator_method(self.attribute_obj, filter_implemention, *self.args) class AFF4Volume(AFF4Object): """Volumes contain other objects. The AFF4 concept of a volume abstracts away how objects are stored. We simply define an AFF4 volume as a container of other AFF4 objects. The volume may implement any storage mechanism it likes, including virtualizing the objects contained within it. """ _behaviours = frozenset(["Container"]) class SchemaCls(AFF4Object.SchemaCls): CONTAINS = Attribute("aff4:contains", rdfvalue.RDFURN, "An AFF4 object contained in this container.") def ListChildren(self, limit=1000000, age=NEWEST_TIME): """Yields RDFURNs of all the children of this object. Args: limit: Total number of items we will attempt to retrieve. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range in microseconds. Yields: RDFURNs instances of each child. """ # Just grab all the children from the index. index_prefix = "index:dir/" for predicate, _, timestamp in data_store.DB.ResolvePrefix( self.urn, index_prefix, token=self.token, timestamp=Factory.ParseAgeSpecification(age), limit=limit): urn = self.urn.Add(predicate[len(index_prefix):]) urn.age = rdfvalue.RDFDatetime(timestamp) yield urn def OpenChildren(self, children=None, mode="r", limit=1000000, chunk_limit=100000, age=NEWEST_TIME): """Yields AFF4 Objects of all our direct children. This method efficiently returns all attributes for our children directly, in a few data store round trips. We use the directory indexes to query the data store. Args: children: A list of children RDFURNs to open. If None open all our children. mode: The mode the files should be opened with. limit: Total number of items we will attempt to retrieve. chunk_limit: Maximum number of items to retrieve at a time. age: The age of the items to retrieve. Should be one of ALL_TIMES, NEWEST_TIME or a range. Yields: Instances for each direct child. """ if children is None: subjects = list(self.ListChildren(limit=limit, age=age)) else: subjects = list(children) subjects.sort() # Read at most limit children at a time. while subjects: to_read = subjects[:chunk_limit] subjects = subjects[chunk_limit:] for child in FACTORY.MultiOpen( to_read, mode=mode, token=self.token, age=age): yield child @property def real_pathspec(self): """Returns a pathspec for an aff4 object even if there is none stored.""" pathspec = self.Get(self.Schema.PATHSPEC) stripped_components = [] parent = self # TODO(user): this code is potentially slow due to multiple separate # aff4.FACTORY.Open() calls. OTOH the loop below is executed very rarely - # only when we deal with deep files that got fetched alone and then # one of the directories in their path gets updated. while not pathspec and len(parent.urn.Split()) > 1: # We try to recurse up the tree to get a real pathspec. # These directories are created automatically without pathspecs when a # deep directory is listed without listing the parents. # Note /fs/os or /fs/tsk won't be updateable so we will raise IOError # if we try. stripped_components.append(parent.urn.Basename()) pathspec = parent.Get(parent.Schema.PATHSPEC) parent = FACTORY.Open(parent.urn.Dirname(), token=self.token) if pathspec: if stripped_components: # We stripped pieces of the URL, time to add them back. new_path = utils.JoinPath(*reversed(stripped_components[:-1])) pathspec.Append( rdf_paths.PathSpec(path=new_path, pathtype=pathspec.last.pathtype)) else: raise IOError("Item has no pathspec.") return pathspec class AFF4Root(AFF4Volume): """The root of the VFS.""" class AFF4Symlink(AFF4Object): """This is a symlink to another AFF4 object. This means that opening this object will return the linked to object. To create a symlink, one must open the symlink for writing and set the Schema.SYMLINK_TARGET attribute. Opening the object for reading will return the linked to object. """ class SchemaCls(AFF4Object.SchemaCls): SYMLINK_TARGET = Attribute("aff4:symlink_target", rdfvalue.RDFURN, "The target of this link.") def __new__(cls, unused_urn, mode="r", clone=None, token=None, age=NEWEST_TIME, follow_symlinks=True, **_): # When first created, the symlink object is exposed. if mode == "w" or not follow_symlinks: return super(AFF4Symlink, cls).__new__(cls) elif clone is not None: # Get the real object (note, clone shouldn't be None during normal # object creation process): target_urn = clone.Get(cls.SchemaCls.SYMLINK_TARGET) result = FACTORY.Open(target_urn, mode=mode, age=age, token=token) result.symlink_urn = clone.urn return result else: raise RuntimeError("Unable to open symlink.") class AFF4Stream(AFF4Object): """An abstract stream for reading data.""" __metaclass__ = abc.ABCMeta # The read pointer offset. offset = 0 size = 0 class SchemaCls(AFF4Object.SchemaCls): # Note that a file on the remote system might have stat.st_size > 0 but if # we do not have any of the data available to read: size = 0. SIZE = Attribute( "aff4:size", rdfvalue.RDFInteger, "The total size of available data for this stream.", "size", default=0) HASH = Attribute("aff4:hashobject", rdf_crypto.Hash, "Hash object containing all known hash digests for" " the object.") MULTI_STREAM_CHUNK_SIZE = 1024 * 1024 * 8 @classmethod def _MultiStream(cls, fds): """Method overriden by subclasses to optimize the MultiStream behavior.""" for fd in fds: fd.Seek(0) while True: chunk = fd.Read(cls.MULTI_STREAM_CHUNK_SIZE) if not chunk: break yield fd, chunk, None @classmethod def MultiStream(cls, fds): """Effectively streams data from multiple opened AFF4Stream objects. Args: fds: A list of opened AFF4Stream (or AFF4Stream descendants) objects. Yields: Tuples (chunk, fd) where chunk is a binary blob of data and fd is an object from the fds argument. Chunks within one file are not shuffled: every file's chunks are yielded in order and the file is never truncated. The files themselves are grouped by their type and the order of the groups is non-deterministic. The order of the files within a single type group is the same as in the fds argument. Raises: ValueError: If one of items in the fds list is not an AFF4Stream. MissingChunksError: if one or more chunks are missing. This exception is only raised after all the files are read and their respective chunks are yielded. MultiStream does its best to skip the file entirely if one of its chunks is missing, but in case of very large files it's still possible to yield a truncated file. """ for fd in fds: if not isinstance(fd, AFF4Stream): raise ValueError("All object to be streamed have to inherit from " "AFF4Stream (found one inheriting from %s)." % (fd.__class__.__name__)) classes_map = {} for fd in fds: classes_map.setdefault(fd.__class__, []).append(fd) for fd_class, fds in classes_map.items(): # pylint: disable=protected-access for fd, chunk, exception in fd_class._MultiStream(fds): yield fd, chunk, exception # pylint: enable=protected-access def __len__(self): return self.size def Initialize(self): super(AFF4Stream, self).Initialize() # This is the configurable default length for allowing Read to be called # without a specific length. self.max_unbound_read = config_lib.CONFIG["Server.max_unbound_read_size"] @abc.abstractmethod def Read(self, length): pass @abc.abstractmethod def Write(self, data): pass @abc.abstractmethod def Tell(self): pass @abc.abstractmethod def Seek(self, offset, whence=0): pass # These are file object conformant namings for library functions that # grr uses, and that expect to interact with 'real' file objects. def read(self, length=None): # pylint: disable=invalid-name if length is None: length = self.size - self.offset if length > self.max_unbound_read: raise OversizedRead("Attempted to read file of size %s when " "Server.max_unbound_read_size is %s" % (self.size, self.max_unbound_read)) return self.Read(length) def GetContentAge(self): return self.Get(self.Schema.TYPE).age seek = utils.Proxy("Seek") tell = utils.Proxy("Tell") close = utils.Proxy("Close") write = utils.Proxy("Write") flush = utils.Proxy("Flush") class AFF4MemoryStreamBase(AFF4Stream): """A stream which keeps all data in memory. This is an abstract class, subclasses must define the CONTENT attribute in the Schema to be versioned or unversioned. """ def Initialize(self): """Try to load the data from the store.""" super(AFF4MemoryStreamBase, self).Initialize() contents = "" if "r" in self.mode: contents = self.Get(self.Schema.CONTENT) try: if contents is not None: contents = zlib.decompress(utils.SmartStr(contents)) except zlib.error: pass self.fd = StringIO.StringIO(contents) self.size = len(contents) self.offset = 0 def Truncate(self, offset=None): if offset is None: offset = self.offset self.fd = StringIO.StringIO(self.fd.getvalue()[:offset]) self.size.Set(offset) def Read(self, length): return self.fd.read(int(length)) def Write(self, data): if isinstance(data, unicode): raise IOError("Cannot write unencoded string.") self._dirty = True self.fd.write(data) self.size = max(self.size, self.fd.tell()) def Tell(self): return self.fd.tell() def Seek(self, offset, whence=0): self.fd.seek(offset, whence) def Flush(self, sync=True): if self._dirty: compressed_content = zlib.compress(self.fd.getvalue()) self.Set(self.Schema.CONTENT(compressed_content)) self.Set(self.Schema.SIZE(self.size)) super(AFF4MemoryStreamBase, self).Flush(sync=sync) def Close(self, sync=True): if self._dirty: compressed_content = zlib.compress(self.fd.getvalue()) self.Set(self.Schema.CONTENT(compressed_content)) self.Set(self.Schema.SIZE(self.size)) super(AFF4MemoryStreamBase, self).Close(sync=sync) def OverwriteAndClose(self, compressed_data, size, sync=True): """Directly overwrite the current contents. Replaces the data currently in the stream with compressed_data, and closes the object. Makes it possible to avoid recompressing the data. Args: compressed_data: The data to write, must be zlib compressed. size: The uncompressed size of the data. sync: Whether the close should be synchronous. """ self.Set(self.Schema.CONTENT(compressed_data)) self.Set(self.Schema.SIZE(size)) super(AFF4MemoryStreamBase, self).Close(sync=sync) def GetContentAge(self): return self.Get(self.Schema.CONTENT).age class AFF4MemoryStream(AFF4MemoryStreamBase): """A versioned stream which keeps all data in memory.""" class SchemaCls(AFF4MemoryStreamBase.SchemaCls): CONTENT = Attribute( "aff4:content", rdfvalue.RDFBytes, "Total content of this file.", default="") class AFF4UnversionedMemoryStream(AFF4MemoryStreamBase): """An unversioned stream which keeps all data in memory.""" class SchemaCls(AFF4MemoryStreamBase.SchemaCls): CONTENT = Attribute( "aff4:content", rdfvalue.RDFBytes, "Total content of this file.", default="", versioned=False) class ChunkCache(utils.FastStore): """A cache which closes its objects when they expire.""" def __init__(self, kill_cb=None, *args, **kw): self.kill_cb = kill_cb super(ChunkCache, self).__init__(*args, **kw) def KillObject(self, obj): if self.kill_cb: self.kill_cb(obj) def __getstate__(self): if self.kill_cb: raise NotImplementedError("Can't pickle callback.") return self.__dict__ class AFF4ImageBase(AFF4Stream): """An AFF4 Image is stored in segments. We are both an Image here and a volume (since we store the segments inside us). This is an abstract class, subclasses choose the type to use for chunks. """ NUM_RETRIES = 10 CHUNK_ID_TEMPLATE = "%010X" # This is the chunk size of each chunk. The chunksize can not be changed once # the object is created. chunksize = 64 * 1024 # Subclasses should set the name of the type of stream to use for chunks. STREAM_TYPE = None # How many chunks should be cached. LOOK_AHEAD = 10 class SchemaCls(AFF4Stream.SchemaCls): """The schema for AFF4ImageBase.""" _CHUNKSIZE = Attribute( "aff4:chunksize", rdfvalue.RDFInteger, "Total size of each chunk.", default=64 * 1024) # Note that we can't use CONTENT.age in place of this, since some types # (specifically, AFF4Image) do not have a CONTENT attribute, since they're # stored in chunks. Rather than maximising the last updated time over all # chunks, we store it and update it as an attribute here. CONTENT_LAST = Attribute( "metadata:content_last", rdfvalue.RDFDatetime, "The last time any content was written.", creates_new_object_version=False) @classmethod def _GenerateChunkPaths(cls, fds): for fd in fds: num_chunks = fd.size / fd.chunksize + 1 for chunk in xrange(num_chunks): yield fd.urn.Add(fd.CHUNK_ID_TEMPLATE % chunk), fd MULTI_STREAM_CHUNKS_READ_AHEAD = 1000 @classmethod def _MultiStream(cls, fds): """Effectively streams data from multiple opened AFF4ImageBase objects. Args: fds: A list of opened AFF4Stream (or AFF4Stream descendants) objects. Yields: Tuples (chunk, fd, exception) where chunk is a binary blob of data and fd is an object from the fds argument. If one or more chunks are missing, exception will be a MissingChunksError while chunk will be None. _MultiStream does its best to skip the file entirely if one of its chunks is missing, but in case of very large files it's still possible to yield a truncated file. """ missing_chunks_by_fd = {} for chunk_fd_pairs in utils.Grouper( cls._GenerateChunkPaths(fds), cls.MULTI_STREAM_CHUNKS_READ_AHEAD): chunks_map = dict(chunk_fd_pairs) contents_map = {} for chunk_fd in FACTORY.MultiOpen( chunks_map, mode="r", token=fds[0].token): if isinstance(chunk_fd, AFF4Stream): fd = chunks_map[chunk_fd.urn] contents_map[chunk_fd.urn] = chunk_fd.read() for chunk_urn, fd in chunk_fd_pairs: if chunk_urn not in contents_map or not contents_map[chunk_urn]: missing_chunks_by_fd.setdefault(fd, []).append(chunk_urn) for chunk_urn, fd in chunk_fd_pairs: if fd in missing_chunks_by_fd: continue yield fd, contents_map[chunk_urn], None for fd, missing_chunks in missing_chunks_by_fd.iteritems(): e = MissingChunksError( "%d missing chunks (multi-stream)." % len(missing_chunks), missing_chunks=missing_chunks) yield fd, None, e def Initialize(self): """Build a cache for our chunks.""" super(AFF4ImageBase, self).Initialize() self.offset = 0 # A cache for segments. self.chunk_cache = ChunkCache(self._WriteChunk, 100) if "r" in self.mode: self.size = int(self.Get(self.Schema.SIZE)) # pylint: disable=protected-access self.chunksize = int(self.Get(self.Schema._CHUNKSIZE)) # pylint: enable=protected-access self.content_last = self.Get(self.Schema.CONTENT_LAST) else: self.size = 0 self.content_last = None def SetChunksize(self, chunksize): # pylint: disable=protected-access self.Set(self.Schema._CHUNKSIZE(chunksize)) # pylint: enable=protected-access self.chunksize = int(chunksize) self.Truncate(0) def Seek(self, offset, whence=0): # This stream does not support random writing in "w" mode. When the stream # is opened in "w" mode we can not read from the data store and therefore we # can not merge writes with existing data. It only makes sense to append to # existing streams. if self.mode == "w": # Seeking to the end of the stream is ok. if not (whence == 2 and offset == 0): raise IOError("Can not seek with an AFF4Image opened for write only.") if whence == 0: self.offset = offset elif whence == 1: self.offset += offset elif whence == 2: self.offset = long(self.size) + offset def Tell(self): return self.offset def Truncate(self, offset=0): self._dirty = True self.size = offset self.offset = offset self.chunk_cache.Flush() def _ReadChunk(self, chunk): self._ReadChunks([chunk]) return self.chunk_cache.Get(chunk) def _ReadChunks(self, chunks): chunk_names = { self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk): chunk for chunk in chunks } for child in FACTORY.MultiOpen( chunk_names, mode="rw", token=self.token, age=self.age_policy): if isinstance(child, AFF4Stream): fd = StringIO.StringIO(child.read()) fd.dirty = False fd.chunk = chunk_names[child.urn] self.chunk_cache.Put(fd.chunk, fd) def _WriteChunk(self, chunk): if chunk.dirty: chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk.chunk) with FACTORY.Create( chunk_name, self.STREAM_TYPE, mode="rw", token=self.token) as fd: fd.write(chunk.getvalue()) def _GetChunkForWriting(self, chunk): """Opens a chunk for writing, creating a new one if it doesn't exist yet.""" try: chunk = self.chunk_cache.Get(chunk) chunk.dirty = True return chunk except KeyError: pass try: chunk = self._ReadChunk(chunk) chunk.dirty = True return chunk except KeyError: pass fd = StringIO.StringIO() fd.chunk = chunk fd.dirty = True self.chunk_cache.Put(chunk, fd) return fd def _GetChunkForReading(self, chunk): """Returns the relevant chunk from the datastore and reads ahead.""" try: return self.chunk_cache.Get(chunk) except KeyError: pass # We don't have this chunk already cached. The most common read # access pattern is contiguous reading so since we have to go to # the data store already, we read ahead to reduce round trips. missing_chunks = [] for chunk_number in range(chunk, chunk + self.LOOK_AHEAD): if chunk_number not in self.chunk_cache: missing_chunks.append(chunk_number) self._ReadChunks(missing_chunks) # This should work now - otherwise we just give up. try: return self.chunk_cache.Get(chunk) except KeyError: raise ChunkNotFoundError("Cannot open chunk %s" % chunk) def _ReadPartial(self, length): """Read as much as possible, but not more than length.""" chunk = self.offset / self.chunksize chunk_offset = self.offset % self.chunksize available_to_read = min(length, self.chunksize - chunk_offset) retries = 0 while retries < self.NUM_RETRIES: fd = self._GetChunkForReading(chunk) if fd: break # Arriving here means we know about blobs that cannot be found in the db. # The most likely reason is that they have not been synced yet so we # retry a couple of times just in case they come in eventually. logging.warning("Chunk not found.") time.sleep(1) retries += 1 if retries >= self.NUM_RETRIES: raise IOError("Chunk not found for reading.") fd.seek(chunk_offset) result = fd.read(available_to_read) self.offset += len(result) return result def Read(self, length): """Read a block of data from the file.""" result = "" # The total available size in the file length = int(length) length = min(length, self.size - self.offset) while length > 0: data = self._ReadPartial(length) if not data: break length -= len(data) result += data return result def _WritePartial(self, data): """Writes at most one chunk of data.""" chunk = self.offset / self.chunksize chunk_offset = self.offset % self.chunksize data = utils.SmartStr(data) available_to_write = min(len(data), self.chunksize - chunk_offset) fd = self._GetChunkForWriting(chunk) fd.seek(chunk_offset) fd.write(data[:available_to_write]) self.offset += available_to_write return data[available_to_write:] def Write(self, data): self._dirty = True if isinstance(data, unicode): raise IOError("Cannot write unencoded string.") while data: data = self._WritePartial(data) self.size = max(self.size, self.offset) self.content_last = rdfvalue.RDFDatetime.Now() def Flush(self, sync=True): """Sync the chunk cache to storage.""" if self._dirty: self.Set(self.Schema.SIZE(self.size)) if self.content_last is not None: self.Set(self.Schema.CONTENT_LAST, self.content_last) # Flushing the cache will write all chunks to the blob store. self.chunk_cache.Flush() super(AFF4ImageBase, self).Flush(sync=sync) def Close(self, sync=True): """This method is called to sync our data into storage. Args: sync: Should flushing be synchronous. """ self.Flush(sync=sync) def GetContentAge(self): # TODO(user): make CONTENT_LAST reliable. For some reason, sometimes # CONTENT_LAST gets set even though file's data is not downloaded from the # client. return self.content_last def __getstate__(self): # We can't pickle the callback. if "chunk_cache" in self.__dict__: self.chunk_cache.Flush() res = self.__dict__.copy() del res["chunk_cache"] return res return self.__dict__ def __setstate__(self, state): self.__dict__ = state self.chunk_cache = ChunkCache(self._WriteChunk, 100) class AFF4Image(AFF4ImageBase): """An AFF4 Image containing a versioned stream.""" STREAM_TYPE = AFF4MemoryStream class AFF4UnversionedImage(AFF4ImageBase): """An AFF4 Image containing an unversioned stream.""" STREAM_TYPE = AFF4UnversionedMemoryStream # Utility functions class AFF4InitHook(registry.InitHook): pre = ["ACLInit", "DataStoreInit"] def Run(self): """Delayed loading of aff4 plugins to break import cycles.""" # pylint: disable=unused-variable,global-statement,g-import-not-at-top from grr.lib import aff4_objects global FACTORY FACTORY = Factory() # pylint: disable=g-bad-name # pylint: enable=unused-variable,global-statement,g-import-not-at-top stats.STATS.RegisterCounterMetric("aff4_cache_hits") stats.STATS.RegisterCounterMetric("aff4_cache_misses") class AFF4Filter(object): """A simple filtering system to be used with Query().""" __metaclass__ = registry.MetaclassRegistry def __init__(self, *args): self.args = args @abc.abstractmethod def FilterOne(self, fd): """Filter a single aff4 object.""" def Filter(self, subjects): """A generator which filters the subjects. Args: subjects: An iterator of aff4 objects. Yields: The Objects which pass the filter. """ for subject in subjects: if self.FilterOne(subject): yield subject @classmethod def GetFilter(cls, filter_name): return cls.classes[filter_name] # A global registry of all AFF4 classes FACTORY = None ROOT_URN = rdfvalue.RDFURN("aff4:/") def issubclass(obj, cls): # pylint: disable=redefined-builtin,g-bad-name """A sane implementation of issubclass. See http://bugs.python.org/issue10569 Python bare issubclass must be protected by an isinstance test first since it can only work on types and raises when provided something which is not a type. Args: obj: Any object or class. cls: The class to check against. Returns: True if obj is a subclass of cls and False otherwise. """ return isinstance(obj, type) and __builtin__.issubclass(obj, cls) def AuditLogBase(): return ROOT_URN.Add("audit").Add("logs") def CurrentAuditLog(): """Get the rdfurn of the current audit log.""" now_sec = rdfvalue.RDFDatetime.Now().AsSecondsFromEpoch() rollover = config_lib.CONFIG["Logging.aff4_audit_log_rollover"] # This gives us a filename that only changes every # Logging.aff4_audit_log_rollover seconds, but is still a valid timestamp. current_log = (now_sec // rollover) * rollover return AuditLogBase().Add(str(current_log))
apache-2.0
-3,943,746,568,183,437,300
31.335811
80
0.645377
false
4.031552
false
false
false
CranleighAD/isams-tools
settings_example.py
1
2947
# enable or disable the whole program ENABLED = True # if we're in testing mode, output more debug and allow testers to add their own email DEBUG = True # used with above, you can check the output of emails that would have been sent SEND_EMAILS = True # iSAMS Batch API key API_KEY = "11D497FF-A7D9-4646-A6B8-D9D1B8718FAC" # iSAMS URL URL = 'https://isams.school.com' # Choose which connection method from: JSON, XML, MSSQL CONNECTION_METHOD = 'JSON' # Database settings DATABASE = '' DATABASE_SERVER = '' DATABASE_USER = '' DATABASE_PASSWORD = '' # specify your own dates to use when testing, e.g. a date that has already had the register taken for DEBUG_START_DATE = '2016-09-18' DEBUG_END_DATE = '2016-09-19' # allows you to specify a file with XML or JSON content to test with rather tha using live data DEBUG_DATA = 'test_data.xml' # outgoing SMTP details EMAIL = { 'server': 'smtp.example.com', 'port': 465, 'username': '[email protected]', 'password': 'p455w0rd', 'subject': 'Register not completed', 'from': '[email protected]', 'to': '[email protected]', 'cc': '[email protected]', 'bcc': '[email protected]' } # whether to log into the SMTP server EMAIL_LOGIN = True # whether to create an SSL connection or not EMAIL_SSL = True # Default: Monday - Friday, 0 = Mon, 6 = Sun WORKING_DAYS = (0, 1, 2, 3, 4) # weekdays which are not school days # for help generating these: # import pandas # pandas.bdate_range('2016-12-15', '2017-01-07') HOLIDAYS = ( # Winter break '2016-12-15', '2016-12-16', '2016-12-19', '2016-12-20', '2016-12-21', '2016-12-22', '2016-12-23', '2016-12-26', '2016-12-27', '2016-12-28', '2016-12-29', '2016-12-30', '2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05', '2017-01-06', ) # email templates FIRST_EMAIL = """ Dear Teacher, This is a friendly reminder to complete your register. One or more of your students has not yet been registered. If you are having problems completing it, please email XXX If this message is in error, please forward to the helpdesk. Regards, iSAMS Bot """ SECOND_EMAIL = """ Dear Teacher, There are still one or more of your students has not yet been registered. If you are having problems completing it, please email XXX If this message is in error, please forward to the helpdesk. Regards, iSAMS Bot """ # You can use %list_of_missing_registers% for a list in the template FINAL_EMAIL = """ Here is a list of forms that still are oustanding: %list_of_missing_registers% Regards, iSAMS Bot """ # separate with commas if you want more than one recipient FINAL_EMAIL_TO = "[email protected]" ####################### # Data Check Settings # ####################### DATA_CHECK_ENABED = True # who to email when it fails DATA_CHECK_FAIL_EMAIL = "[email protected]" # list of subjects to ignore from checks in single quotes DATA_CHECK_IGNORE_SUBJECTS = ["Games", "Physical Education"]
gpl-3.0
5,770,833,458,472,531,000
23.773109
112
0.687479
false
3.060228
false
false
false
cliixtech/bigorna
tests/tasks/test_sched.py
1
1892
from unittest import TestCase from unittest.mock import create_autospec from nose.tools import istest from bigorna.commons import Event from bigorna.tasks import TaskScheduler, TaskDefinition, task_status_changed_evt from bigorna.tasks.executor import Executor from bigorna.commons import Config class TaskSchedulerTest(TestCase): def setUp(self): self.config_mock = create_autospec(Config) self.config_mock.concurrent_tasks = 1 self.executor_mock = create_autospec(Executor) self.task_def = TaskDefinition("ls -la", '.') self.scheduler = TaskScheduler(self.config_mock, self.executor_mock) @istest def submit_calls_executor_and_submit_if_no_tasks(self): self.executor_mock.running_tasks_counter = 0 self.scheduler.submit(self.task_def) self.executor_mock.submit.assert_called_once_with(self.task_def) @istest def submit_dont_submit_to_executor_if_too_many_tasks(self): self.executor_mock.running_tasks_counter = 1 self.scheduler.submit(self.task_def) self.executor_mock.submit.assert_not_called() @istest def handle_event_and_submit_to_executor(self): self.executor_mock.running_tasks_counter = 1 self.scheduler.submit(self.task_def) other_task = TaskDefinition("ls -la", None) self.scheduler.submit(other_task) self.executor_mock.running_tasks_counter = 0 self.scheduler._event_handler(Event(task_status_changed_evt, None)) self.executor_mock.submit.assert_called_once_with(self.task_def) self.assertEqual(self.scheduler.pending_tasks_counter, 1) @istest def handle_event_and_not_pending_tasks(self): self.executor_mock.running_tasks_counter = 0 self.scheduler._event_handler(Event(task_status_changed_evt, None)) self.executor_mock.submit.assert_not_called()
gpl-3.0
3,351,970,099,618,253,000
32.192982
80
0.705603
false
3.610687
true
false
false
Runscope/pysaml2
example/idp2/idp.py
1
35623
#!/usr/bin/env python import argparse import base64 import importlib import logging import os import re import socket import time from Cookie import SimpleCookie from hashlib import sha1 from urlparse import parse_qs from saml2 import BINDING_HTTP_ARTIFACT from saml2 import BINDING_URI from saml2 import BINDING_PAOS from saml2 import BINDING_SOAP from saml2 import BINDING_HTTP_REDIRECT from saml2 import BINDING_HTTP_POST from saml2 import server from saml2 import time_util from saml2.authn_context import AuthnBroker from saml2.authn_context import PASSWORD from saml2.authn_context import UNSPECIFIED from saml2.authn_context import authn_context_class_ref from saml2.httputil import Response from saml2.httputil import NotFound from saml2.httputil import geturl from saml2.httputil import get_post from saml2.httputil import Redirect from saml2.httputil import Unauthorized from saml2.httputil import BadRequest from saml2.httputil import ServiceError from saml2.ident import Unknown from saml2.metadata import create_metadata_string from saml2.profile import ecp from saml2.s_utils import rndstr from saml2.s_utils import exception_trace from saml2.s_utils import UnknownPrincipal from saml2.s_utils import UnsupportedBinding from saml2.s_utils import PolicyError from saml2.sigver import verify_redirect_signature from saml2.sigver import encrypt_cert_from_item from idp_user import USERS from idp_user import EXTRA from mako.lookup import TemplateLookup logger = logging.getLogger("saml2.idp") logger.setLevel(logging.WARNING) class Cache(object): def __init__(self): self.user2uid = {} self.uid2user = {} def _expiration(timeout, tformat="%a, %d-%b-%Y %H:%M:%S GMT"): """ :param timeout: :param tformat: :return: """ if timeout == "now": return time_util.instant(tformat) elif timeout == "dawn": return time.strftime(tformat, time.gmtime(0)) else: # validity time should match lifetime of assertions return time_util.in_a_while(minutes=timeout, format=tformat) # ----------------------------------------------------------------------------- def dict2list_of_tuples(d): return [(k, v) for k, v in d.items()] # ----------------------------------------------------------------------------- class Service(object): def __init__(self, environ, start_response, user=None): self.environ = environ logger.debug("ENVIRON: %s" % environ) self.start_response = start_response self.user = user def unpack_redirect(self): if "QUERY_STRING" in self.environ: _qs = self.environ["QUERY_STRING"] return dict([(k, v[0]) for k, v in parse_qs(_qs).items()]) else: return None def unpack_post(self): _dict = parse_qs(get_post(self.environ)) logger.debug("unpack_post:: %s" % _dict) try: return dict([(k, v[0]) for k, v in _dict.items()]) except Exception: return None def unpack_soap(self): try: query = get_post(self.environ) return {"SAMLRequest": query, "RelayState": ""} except Exception: return None def unpack_either(self): if self.environ["REQUEST_METHOD"] == "GET": _dict = self.unpack_redirect() elif self.environ["REQUEST_METHOD"] == "POST": _dict = self.unpack_post() else: _dict = None logger.debug("_dict: %s" % _dict) return _dict def operation(self, saml_msg, binding): logger.debug("_operation: %s" % saml_msg) if not saml_msg or not 'SAMLRequest' in saml_msg: resp = BadRequest('Error parsing request or no request') return resp(self.environ, self.start_response) else: try: _encrypt_cert = encrypt_cert_from_item( saml_msg["req_info"].message) return self.do(saml_msg["SAMLRequest"], binding, saml_msg["RelayState"], encrypt_cert=_encrypt_cert) except KeyError: # Can live with no relay state return self.do(saml_msg["SAMLRequest"], binding) def artifact_operation(self, saml_msg): if not saml_msg: resp = BadRequest("Missing query") return resp(self.environ, self.start_response) else: # exchange artifact for request request = IDP.artifact2message(saml_msg["SAMLart"], "spsso") try: return self.do(request, BINDING_HTTP_ARTIFACT, saml_msg["RelayState"]) except KeyError: return self.do(request, BINDING_HTTP_ARTIFACT) def response(self, binding, http_args): resp = None if binding == BINDING_HTTP_ARTIFACT: resp = Redirect() elif http_args["data"]: resp = Response(http_args["data"], headers=http_args["headers"]) else: for header in http_args["headers"]: if header[0] == "Location": resp = Redirect(header[1]) if not resp: resp = ServiceError("Don't know how to return response") return resp(self.environ, self.start_response) def do(self, query, binding, relay_state="", encrypt_cert=None): pass def redirect(self): """ Expects a HTTP-redirect request """ _dict = self.unpack_redirect() return self.operation(_dict, BINDING_HTTP_REDIRECT) def post(self): """ Expects a HTTP-POST request """ _dict = self.unpack_post() return self.operation(_dict, BINDING_HTTP_POST) def artifact(self): # Can be either by HTTP_Redirect or HTTP_POST _dict = self.unpack_either() return self.artifact_operation(_dict) def soap(self): """ Single log out using HTTP_SOAP binding """ logger.debug("- SOAP -") _dict = self.unpack_soap() logger.debug("_dict: %s" % _dict) return self.operation(_dict, BINDING_SOAP) def uri(self): _dict = self.unpack_either() return self.operation(_dict, BINDING_SOAP) def not_authn(self, key, requested_authn_context): ruri = geturl(self.environ, query=False) return do_authentication(self.environ, self.start_response, authn_context=requested_authn_context, key=key, redirect_uri=ruri) # ----------------------------------------------------------------------------- REPOZE_ID_EQUIVALENT = "uid" FORM_SPEC = """<form name="myform" method="post" action="%s"> <input type="hidden" name="SAMLResponse" value="%s" /> <input type="hidden" name="RelayState" value="%s" /> </form>""" # ----------------------------------------------------------------------------- # === Single log in ==== # ----------------------------------------------------------------------------- class AuthenticationNeeded(Exception): def __init__(self, authn_context=None, *args, **kwargs): Exception.__init__(*args, **kwargs) self.authn_context = authn_context class SSO(Service): def __init__(self, environ, start_response, user=None): Service.__init__(self, environ, start_response, user) self.binding = "" self.response_bindings = None self.resp_args = {} self.binding_out = None self.destination = None self.req_info = None self.op_type = "" def verify_request(self, query, binding): """ :param query: The SAML query, transport encoded :param binding: Which binding the query came in over """ resp_args = {} if not query: logger.info("Missing QUERY") resp = Unauthorized('Unknown user') return resp_args, resp(self.environ, self.start_response) if not self.req_info: self.req_info = IDP.parse_authn_request(query, binding) logger.info("parsed OK") _authn_req = self.req_info.message logger.debug("%s" % _authn_req) try: self.binding_out, self.destination = IDP.pick_binding( "assertion_consumer_service", bindings=self.response_bindings, entity_id=_authn_req.issuer.text, request=_authn_req) except Exception as err: logger.error("Couldn't find receiver endpoint: %s" % err) raise logger.debug("Binding: %s, destination: %s" % (self.binding_out, self.destination)) resp_args = {} try: resp_args = IDP.response_args(_authn_req) _resp = None except UnknownPrincipal as excp: _resp = IDP.create_error_response(_authn_req.id, self.destination, excp) except UnsupportedBinding as excp: _resp = IDP.create_error_response(_authn_req.id, self.destination, excp) return resp_args, _resp def do(self, query, binding_in, relay_state="", encrypt_cert=None): """ :param query: The request :param binding_in: Which binding was used when receiving the query :param relay_state: The relay state provided by the SP :param encrypt_cert: Cert to use for encryption :return: A response """ try: resp_args, _resp = self.verify_request(query, binding_in) except UnknownPrincipal as excp: logger.error("UnknownPrincipal: %s" % (excp,)) resp = ServiceError("UnknownPrincipal: %s" % (excp,)) return resp(self.environ, self.start_response) except UnsupportedBinding as excp: logger.error("UnsupportedBinding: %s" % (excp,)) resp = ServiceError("UnsupportedBinding: %s" % (excp,)) return resp(self.environ, self.start_response) if not _resp: identity = USERS[self.user].copy() #identity["eduPersonTargetedID"] = get_eptid(IDP, query, session) logger.info("Identity: %s" % (identity,)) if REPOZE_ID_EQUIVALENT: identity[REPOZE_ID_EQUIVALENT] = self.user try: try: metod = self.environ["idp.authn"] except KeyError: pass else: resp_args["authn"] = metod _resp = IDP.create_authn_response( identity, userid=self.user, encrypt_cert=encrypt_cert, **resp_args) except Exception as excp: logging.error(exception_trace(excp)) resp = ServiceError("Exception: %s" % (excp,)) return resp(self.environ, self.start_response) logger.info("AuthNResponse: %s" % _resp) if self.op_type == "ecp": kwargs = {"soap_headers": [ ecp.Response( assertion_consumer_service_url=self.destination)]} else: kwargs = {} http_args = IDP.apply_binding(self.binding_out, "%s" % _resp, self.destination, relay_state, response=True, **kwargs) logger.debug("HTTPargs: %s" % http_args) return self.response(self.binding_out, http_args) def _store_request(self, saml_msg): logger.debug("_store_request: %s" % saml_msg) key = sha1(saml_msg["SAMLRequest"]).hexdigest() # store the AuthnRequest IDP.ticket[key] = saml_msg return key def redirect(self): """ This is the HTTP-redirect endpoint """ logger.info("--- In SSO Redirect ---") saml_msg = self.unpack_redirect() try: _key = saml_msg["key"] saml_msg = IDP.ticket[_key] self.req_info = saml_msg["req_info"] del IDP.ticket[_key] except KeyError: try: self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"], BINDING_HTTP_REDIRECT) except KeyError: resp = BadRequest("Message signature verification failure") return resp(self.environ, self.start_response) _req = self.req_info.message if "SigAlg" in saml_msg and "Signature" in saml_msg: # Signed request issuer = _req.issuer.text _certs = IDP.metadata.certs(issuer, "any", "signing") verified_ok = False for cert in _certs: if verify_redirect_signature(saml_msg, cert): verified_ok = True break if not verified_ok: resp = BadRequest("Message signature verification failure") return resp(self.environ, self.start_response) if self.user: if _req.force_authn: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_REDIRECT) else: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_REDIRECT) def post(self): """ The HTTP-Post endpoint """ logger.info("--- In SSO POST ---") saml_msg = self.unpack_either() self.req_info = IDP.parse_authn_request( saml_msg["SAMLRequest"], BINDING_HTTP_POST) _req = self.req_info.message if self.user: if _req.force_authn: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) else: return self.operation(saml_msg, BINDING_HTTP_POST) else: saml_msg["req_info"] = self.req_info key = self._store_request(saml_msg) return self.not_authn(key, _req.requested_authn_context) # def artifact(self): # # Can be either by HTTP_Redirect or HTTP_POST # _req = self._store_request(self.unpack_either()) # if isinstance(_req, basestring): # return self.not_authn(_req) # return self.artifact_operation(_req) def ecp(self): # The ECP interface logger.info("--- ECP SSO ---") resp = None try: authz_info = self.environ["HTTP_AUTHORIZATION"] if authz_info.startswith("Basic "): _info = base64.b64decode(authz_info[6:]) logger.debug("Authz_info: %s" % _info) try: (user, passwd) = _info.split(":") if PASSWD[user] != passwd: resp = Unauthorized() self.user = user self.environ[ "idp.authn"] = AUTHN_BROKER.get_authn_by_accr( PASSWORD) except ValueError: resp = Unauthorized() else: resp = Unauthorized() except KeyError: resp = Unauthorized() if resp: return resp(self.environ, self.start_response) _dict = self.unpack_soap() self.response_bindings = [BINDING_PAOS] # Basic auth ?! self.op_type = "ecp" return self.operation(_dict, BINDING_SOAP) # ----------------------------------------------------------------------------- # === Authentication ==== # ----------------------------------------------------------------------------- def do_authentication(environ, start_response, authn_context, key, redirect_uri): """ Display the login form """ logger.debug("Do authentication") auth_info = AUTHN_BROKER.pick(authn_context) if len(auth_info): method, reference = auth_info[0] logger.debug("Authn chosen: %s (ref=%s)" % (method, reference)) return method(environ, start_response, reference, key, redirect_uri) else: resp = Unauthorized("No usable authentication method") return resp(environ, start_response) # ----------------------------------------------------------------------------- PASSWD = { "daev0001": "qwerty", "haho0032": "qwerty", "roland": "dianakra", "babs": "howes", "upper": "crust"} def username_password_authn(environ, start_response, reference, key, redirect_uri): """ Display the login form """ logger.info("The login page") headers = [] resp = Response(mako_template="login.mako", template_lookup=LOOKUP, headers=headers) argv = { "action": "/verify", "login": "", "password": "", "key": key, "authn_reference": reference, "redirect_uri": redirect_uri } logger.info("do_authentication argv: %s" % argv) return resp(environ, start_response, **argv) def verify_username_and_password(dic): global PASSWD # verify username and password if PASSWD[dic["login"][0]] == dic["password"][0]: return True, dic["login"][0] else: return False, "" def do_verify(environ, start_response, _): query = parse_qs(get_post(environ)) logger.debug("do_verify: %s" % query) try: _ok, user = verify_username_and_password(query) except KeyError: _ok = False user = None if not _ok: resp = Unauthorized("Unknown user or wrong password") else: uid = rndstr(24) IDP.cache.uid2user[uid] = user IDP.cache.user2uid[user] = uid logger.debug("Register %s under '%s'" % (user, uid)) kaka = set_cookie("idpauthn", "/", uid, query["authn_reference"][0]) lox = "%s?id=%s&key=%s" % (query["redirect_uri"][0], uid, query["key"][0]) logger.debug("Redirect => %s" % lox) resp = Redirect(lox, headers=[kaka], content="text/html") return resp(environ, start_response) def not_found(environ, start_response): """Called if no URL matches.""" resp = NotFound() return resp(environ, start_response) # ----------------------------------------------------------------------------- # === Single log out === # ----------------------------------------------------------------------------- #def _subject_sp_info(req_info): # # look for the subject # subject = req_info.subject_id() # subject = subject.text.strip() # sp_entity_id = req_info.message.issuer.text.strip() # return subject, sp_entity_id class SLO(Service): def do(self, request, binding, relay_state="", encrypt_cert=None): logger.info("--- Single Log Out Service ---") try: _, body = request.split("\n") logger.debug("req: '%s'" % body) req_info = IDP.parse_logout_request(body, binding) except Exception as exc: logger.error("Bad request: %s" % exc) resp = BadRequest("%s" % exc) return resp(self.environ, self.start_response) msg = req_info.message if msg.name_id: lid = IDP.ident.find_local_id(msg.name_id) logger.info("local identifier: %s" % lid) if lid in IDP.cache.user2uid: uid = IDP.cache.user2uid[lid] if uid in IDP.cache.uid2user: del IDP.cache.uid2user[uid] del IDP.cache.user2uid[lid] # remove the authentication try: IDP.session_db.remove_authn_statements(msg.name_id) except KeyError as exc: logger.error("ServiceError: %s" % exc) resp = ServiceError("%s" % exc) return resp(self.environ, self.start_response) resp = IDP.create_logout_response(msg, [binding]) try: hinfo = IDP.apply_binding(binding, "%s" % resp, "", relay_state) except Exception as exc: logger.error("ServiceError: %s" % exc) resp = ServiceError("%s" % exc) return resp(self.environ, self.start_response) #_tlh = dict2list_of_tuples(hinfo["headers"]) delco = delete_cookie(self.environ, "idpauthn") if delco: hinfo["headers"].append(delco) logger.info("Header: %s" % (hinfo["headers"],)) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) # ---------------------------------------------------------------------------- # Manage Name ID service # ---------------------------------------------------------------------------- class NMI(Service): def do(self, query, binding, relay_state="", encrypt_cert=None): logger.info("--- Manage Name ID Service ---") req = IDP.parse_manage_name_id_request(query, binding) request = req.message # Do the necessary stuff name_id = IDP.ident.handle_manage_name_id_request( request.name_id, request.new_id, request.new_encrypted_id, request.terminate) logger.debug("New NameID: %s" % name_id) _resp = IDP.create_manage_name_id_response(request) # It's using SOAP binding hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "", relay_state, response=True) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) # ---------------------------------------------------------------------------- # === Assertion ID request === # ---------------------------------------------------------------------------- # Only URI binding class AIDR(Service): def do(self, aid, binding, relay_state="", encrypt_cert=None): logger.info("--- Assertion ID Service ---") try: assertion = IDP.create_assertion_id_request_response(aid) except Unknown: resp = NotFound(aid) return resp(self.environ, self.start_response) hinfo = IDP.apply_binding(BINDING_URI, "%s" % assertion, response=True) logger.debug("HINFO: %s" % hinfo) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) def operation(self, _dict, binding, **kwargs): logger.debug("_operation: %s" % _dict) if not _dict or "ID" not in _dict: resp = BadRequest('Error parsing request or no request') return resp(self.environ, self.start_response) return self.do(_dict["ID"], binding, **kwargs) # ---------------------------------------------------------------------------- # === Artifact resolve service === # ---------------------------------------------------------------------------- class ARS(Service): def do(self, request, binding, relay_state="", encrypt_cert=None): _req = IDP.parse_artifact_resolve(request, binding) msg = IDP.create_artifact_response(_req, _req.artifact.text) hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "", response=True) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) # ---------------------------------------------------------------------------- # === Authn query service === # ---------------------------------------------------------------------------- # Only SOAP binding class AQS(Service): def do(self, request, binding, relay_state="", encrypt_cert=None): logger.info("--- Authn Query Service ---") _req = IDP.parse_authn_query(request, binding) _query = _req.message msg = IDP.create_authn_query_response(_query.subject, _query.requested_authn_context, _query.session_index) logger.debug("response: %s" % msg) hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "", response=True) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) # ---------------------------------------------------------------------------- # === Attribute query service === # ---------------------------------------------------------------------------- # Only SOAP binding class ATTR(Service): def do(self, request, binding, relay_state="", encrypt_cert=None): logger.info("--- Attribute Query Service ---") _req = IDP.parse_attribute_query(request, binding) _query = _req.message name_id = _query.subject.name_id uid = name_id.text logger.debug("Local uid: %s" % uid) identity = EXTRA[uid] # Comes in over SOAP so only need to construct the response args = IDP.response_args(_query, [BINDING_SOAP]) msg = IDP.create_attribute_response(identity, name_id=name_id, **args) logger.debug("response: %s" % msg) hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "", response=True) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) # ---------------------------------------------------------------------------- # Name ID Mapping service # When an entity that shares an identifier for a principal with an identity # provider wishes to obtain a name identifier for the same principal in a # particular format or federation namespace, it can send a request to # the identity provider using this protocol. # ---------------------------------------------------------------------------- class NIM(Service): def do(self, query, binding, relay_state="", encrypt_cert=None): req = IDP.parse_name_id_mapping_request(query, binding) request = req.message # Do the necessary stuff try: name_id = IDP.ident.handle_name_id_mapping_request( request.name_id, request.name_id_policy) except Unknown: resp = BadRequest("Unknown entity") return resp(self.environ, self.start_response) except PolicyError: resp = BadRequest("Unknown entity") return resp(self.environ, self.start_response) info = IDP.response_args(request) _resp = IDP.create_name_id_mapping_response(name_id, **info) # Only SOAP hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "", "", response=True) resp = Response(hinfo["data"], headers=hinfo["headers"]) return resp(self.environ, self.start_response) # ---------------------------------------------------------------------------- # Cookie handling # ---------------------------------------------------------------------------- def info_from_cookie(kaka): logger.debug("KAKA: %s" % kaka) if kaka: cookie_obj = SimpleCookie(kaka) morsel = cookie_obj.get("idpauthn", None) if morsel: try: key, ref = base64.b64decode(morsel.value).split(":") return IDP.cache.uid2user[key], ref except KeyError: return None, None else: logger.debug("No idpauthn cookie") return None, None def delete_cookie(environ, name): kaka = environ.get("HTTP_COOKIE", '') logger.debug("delete KAKA: %s" % kaka) if kaka: cookie_obj = SimpleCookie(kaka) morsel = cookie_obj.get(name, None) cookie = SimpleCookie() cookie[name] = "" cookie[name]['path'] = "/" logger.debug("Expire: %s" % morsel) cookie[name]["expires"] = _expiration("dawn") return tuple(cookie.output().split(": ", 1)) return None def set_cookie(name, _, *args): cookie = SimpleCookie() cookie[name] = base64.b64encode(":".join(args)) cookie[name]['path'] = "/" cookie[name]["expires"] = _expiration(5) # 5 minutes from now logger.debug("Cookie expires: %s" % cookie[name]["expires"]) return tuple(cookie.output().split(": ", 1)) # ---------------------------------------------------------------------------- # map urls to functions AUTHN_URLS = [ # sso (r'sso/post$', (SSO, "post")), (r'sso/post/(.*)$', (SSO, "post")), (r'sso/redirect$', (SSO, "redirect")), (r'sso/redirect/(.*)$', (SSO, "redirect")), (r'sso/art$', (SSO, "artifact")), (r'sso/art/(.*)$', (SSO, "artifact")), # slo (r'slo/redirect$', (SLO, "redirect")), (r'slo/redirect/(.*)$', (SLO, "redirect")), (r'slo/post$', (SLO, "post")), (r'slo/post/(.*)$', (SLO, "post")), (r'slo/soap$', (SLO, "soap")), (r'slo/soap/(.*)$', (SLO, "soap")), # (r'airs$', (AIDR, "uri")), (r'ars$', (ARS, "soap")), # mni (r'mni/post$', (NMI, "post")), (r'mni/post/(.*)$', (NMI, "post")), (r'mni/redirect$', (NMI, "redirect")), (r'mni/redirect/(.*)$', (NMI, "redirect")), (r'mni/art$', (NMI, "artifact")), (r'mni/art/(.*)$', (NMI, "artifact")), (r'mni/soap$', (NMI, "soap")), (r'mni/soap/(.*)$', (NMI, "soap")), # nim (r'nim$', (NIM, "soap")), (r'nim/(.*)$', (NIM, "soap")), # (r'aqs$', (AQS, "soap")), (r'attr$', (ATTR, "soap")) ] NON_AUTHN_URLS = [ #(r'login?(.*)$', do_authentication), (r'verify?(.*)$', do_verify), (r'sso/ecp$', (SSO, "ecp")), ] # ---------------------------------------------------------------------------- def metadata(environ, start_response): try: path = args.path if path is None or len(path) == 0: path = os.path.dirname(os.path.abspath( __file__ )) if path[-1] != "/": path += "/" metadata = create_metadata_string(path+args.config, IDP.config, args.valid, args.cert, args.keyfile, args.id, args.name, args.sign) start_response('200 OK', [('Content-Type', "text/xml")]) return metadata except Exception as ex: logger.error("An error occured while creating metadata:" + ex.message) return not_found(environ, start_response) def staticfile(environ, start_response): try: path = args.path if path is None or len(path) == 0: path = os.path.dirname(os.path.abspath(__file__)) if path[-1] != "/": path += "/" path += environ.get('PATH_INFO', '').lstrip('/') start_response('200 OK', [('Content-Type', "text/xml")]) return open(path, 'r').read() except Exception as ex: logger.error("An error occured while creating metadata:" + ex.message) return not_found(environ, start_response) def application(environ, start_response): """ The main WSGI application. Dispatch the current request to the functions from above and store the regular expression captures in the WSGI environment as `myapp.url_args` so that the functions from above can access the url placeholders. If nothing matches, call the `not_found` function. :param environ: The HTTP application environment :param start_response: The application to run when the handling of the request is done :return: The response as a list of lines """ path = environ.get('PATH_INFO', '').lstrip('/') if path == "metadata": return metadata(environ, start_response) kaka = environ.get("HTTP_COOKIE", None) logger.info("<application> PATH: %s" % path) if kaka: logger.info("= KAKA =") user, authn_ref = info_from_cookie(kaka) if authn_ref: environ["idp.authn"] = AUTHN_BROKER[authn_ref] else: try: query = parse_qs(environ["QUERY_STRING"]) logger.debug("QUERY: %s" % query) user = IDP.cache.uid2user[query["id"][0]] except KeyError: user = None url_patterns = AUTHN_URLS if not user: logger.info("-- No USER --") # insert NON_AUTHN_URLS first in case there is no user url_patterns = NON_AUTHN_URLS + url_patterns for regex, callback in url_patterns: match = re.search(regex, path) if match is not None: try: environ['myapp.url_args'] = match.groups()[0] except IndexError: environ['myapp.url_args'] = path logger.debug("Callback: %s" % (callback,)) if isinstance(callback, tuple): cls = callback[0](environ, start_response, user) func = getattr(cls, callback[1]) return func() return callback(environ, start_response, user) if re.search(r'static/.*', path) is not None: return staticfile(environ, start_response) return not_found(environ, start_response) # ---------------------------------------------------------------------------- if __name__ == '__main__': from wsgiref.simple_server import make_server parser = argparse.ArgumentParser() parser.add_argument('-p', dest='path', help='Path to configuration file.') parser.add_argument('-v', dest='valid', help="How long, in days, the metadata is valid from the time of creation") parser.add_argument('-c', dest='cert', help='certificate') parser.add_argument('-i', dest='id', help="The ID of the entities descriptor") parser.add_argument('-k', dest='keyfile', help="A file with a key to sign the metadata with") parser.add_argument('-n', dest='name') parser.add_argument('-s', dest='sign', action='store_true', help="sign the metadata") parser.add_argument('-m', dest='mako_root', default="./") parser.add_argument(dest="config") args = parser.parse_args() AUTHN_BROKER = AuthnBroker() AUTHN_BROKER.add(authn_context_class_ref(PASSWORD), username_password_authn, 10, "http://%s" % socket.gethostname()) AUTHN_BROKER.add(authn_context_class_ref(UNSPECIFIED), "", 0, "http://%s" % socket.gethostname()) CONFIG = importlib.import_module(args.config) IDP = server.Server(args.config, cache=Cache()) IDP.ticket = {} _rot = args.mako_root LOOKUP = TemplateLookup(directories=[_rot + 'templates', _rot + 'htdocs'], module_directory=_rot + 'modules', input_encoding='utf-8', output_encoding='utf-8') HOST = CONFIG.HOST PORT = CONFIG.PORT SRV = make_server(HOST, PORT, application) print "IdP listening on %s:%s" % (HOST, PORT) SRV.serve_forever()
bsd-2-clause
900,716,577,187,012,400
34.551896
98
0.528703
false
4.037516
false
false
false
docusign/docusign-python-client
docusign_esign/apis/workspaces_api.py
1
66547
# coding: utf-8 """ DocuSign REST API The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501 OpenAPI spec version: v2.1 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..client.configuration import Configuration from ..client.api_client import ApiClient class WorkspacesApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def create_workspace(self, account_id, **kwargs): """ Create a Workspace Creates a new workspace. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_workspace(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param Workspace workspace: :return: Workspace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_workspace_with_http_info(account_id, **kwargs) else: (data) = self.create_workspace_with_http_info(account_id, **kwargs) return data def create_workspace_with_http_info(self, account_id, **kwargs): """ Create a Workspace Creates a new workspace. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_workspace_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param Workspace workspace: :return: Workspace If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'workspace'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_workspace" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `create_workspace`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'workspace' in params: body_params = params['workspace'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Workspace', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_workspace_file(self, account_id, folder_id, workspace_id, **kwargs): """ Creates a workspace file. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_workspace_file(account_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: WorkspaceItem If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_workspace_file_with_http_info(account_id, folder_id, workspace_id, **kwargs) else: (data) = self.create_workspace_file_with_http_info(account_id, folder_id, workspace_id, **kwargs) return data def create_workspace_file_with_http_info(self, account_id, folder_id, workspace_id, **kwargs): """ Creates a workspace file. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_workspace_file_with_http_info(account_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: WorkspaceItem If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'folder_id', 'workspace_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_workspace_file" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `create_workspace_file`") # verify the required parameter 'folder_id' is set if ('folder_id' not in params) or (params['folder_id'] is None): raise ValueError("Missing the required parameter `folder_id` when calling `create_workspace_file`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `create_workspace_file`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'folder_id' in params: path_params['folderId'] = params['folder_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WorkspaceItem', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_workspace(self, account_id, workspace_id, **kwargs): """ Delete Workspace Deletes an existing workspace (logically). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_workspace(account_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: Workspace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_workspace_with_http_info(account_id, workspace_id, **kwargs) else: (data) = self.delete_workspace_with_http_info(account_id, workspace_id, **kwargs) return data def delete_workspace_with_http_info(self, account_id, workspace_id, **kwargs): """ Delete Workspace Deletes an existing workspace (logically). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_workspace_with_http_info(account_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: Workspace If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'workspace_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_workspace" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `delete_workspace`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `delete_workspace`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Workspace', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_workspace_folder_items(self, account_id, folder_id, workspace_id, **kwargs): """ Deletes workspace one or more specific files/folders from the given folder or root. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_workspace_folder_items(account_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param WorkspaceItemList workspace_item_list: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs) else: (data) = self.delete_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs) return data def delete_workspace_folder_items_with_http_info(self, account_id, folder_id, workspace_id, **kwargs): """ Deletes workspace one or more specific files/folders from the given folder or root. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param WorkspaceItemList workspace_item_list: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'folder_id', 'workspace_id', 'workspace_item_list'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_workspace_folder_items" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `delete_workspace_folder_items`") # verify the required parameter 'folder_id' is set if ('folder_id' not in params) or (params['folder_id'] is None): raise ValueError("Missing the required parameter `folder_id` when calling `delete_workspace_folder_items`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `delete_workspace_folder_items`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'folder_id' in params: path_params['folderId'] = params['folder_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'workspace_item_list' in params: body_params = params['workspace_item_list'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_workspace(self, account_id, workspace_id, **kwargs): """ Get Workspace Retrives properties about a workspace given a unique workspaceId. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_workspace(account_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: Workspace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_workspace_with_http_info(account_id, workspace_id, **kwargs) else: (data) = self.get_workspace_with_http_info(account_id, workspace_id, **kwargs) return data def get_workspace_with_http_info(self, account_id, workspace_id, **kwargs): """ Get Workspace Retrives properties about a workspace given a unique workspaceId. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_workspace_with_http_info(account_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: Workspace If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'workspace_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_workspace" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_workspace`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `get_workspace`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Workspace', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_workspace_file(self, account_id, file_id, folder_id, workspace_id, **kwargs): """ Get Workspace File Retrieves a workspace file (the binary). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_workspace_file(account_id, file_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str file_id: Specifies the room file ID GUID. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param str is_download: When set to **true**, the Content-Disposition header is set in the response. The value of the header provides the filename of the file. Default is **false**. :param str pdf_version: When set to **true** the file returned as a PDF. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs) else: (data) = self.get_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs) return data def get_workspace_file_with_http_info(self, account_id, file_id, folder_id, workspace_id, **kwargs): """ Get Workspace File Retrieves a workspace file (the binary). This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str file_id: Specifies the room file ID GUID. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param str is_download: When set to **true**, the Content-Disposition header is set in the response. The value of the header provides the filename of the file. Default is **false**. :param str pdf_version: When set to **true** the file returned as a PDF. :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'file_id', 'folder_id', 'workspace_id', 'is_download', 'pdf_version'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_workspace_file" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `get_workspace_file`") # verify the required parameter 'file_id' is set if ('file_id' not in params) or (params['file_id'] is None): raise ValueError("Missing the required parameter `file_id` when calling `get_workspace_file`") # verify the required parameter 'folder_id' is set if ('folder_id' not in params) or (params['folder_id'] is None): raise ValueError("Missing the required parameter `folder_id` when calling `get_workspace_file`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `get_workspace_file`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files/{fileId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'file_id' in params: path_params['fileId'] = params['file_id'] if 'folder_id' in params: path_params['folderId'] = params['folder_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} if 'is_download' in params: query_params['is_download'] = params['is_download'] if 'pdf_version' in params: query_params['pdf_version'] = params['pdf_version'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_workspace_file_pages(self, account_id, file_id, folder_id, workspace_id, **kwargs): """ List File Pages Retrieves a workspace file as rasterized pages. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_workspace_file_pages(account_id, file_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str file_id: Specifies the room file ID GUID. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param str count: The maximum number of results to be returned by this request. :param str dpi: Number of dots per inch for the resulting image. The default if not used is 94. The range is 1-310. :param str max_height: Sets the maximum height (in pixels) of the returned image. :param str max_width: Sets the maximum width (in pixels) of the returned image. :param str start_position: The position within the total result set from which to start returning values. The value **thumbnail** may be used to return the page image. :return: PageImages If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_workspace_file_pages_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs) else: (data) = self.list_workspace_file_pages_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs) return data def list_workspace_file_pages_with_http_info(self, account_id, file_id, folder_id, workspace_id, **kwargs): """ List File Pages Retrieves a workspace file as rasterized pages. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_workspace_file_pages_with_http_info(account_id, file_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str file_id: Specifies the room file ID GUID. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param str count: The maximum number of results to be returned by this request. :param str dpi: Number of dots per inch for the resulting image. The default if not used is 94. The range is 1-310. :param str max_height: Sets the maximum height (in pixels) of the returned image. :param str max_width: Sets the maximum width (in pixels) of the returned image. :param str start_position: The position within the total result set from which to start returning values. The value **thumbnail** may be used to return the page image. :return: PageImages If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'file_id', 'folder_id', 'workspace_id', 'count', 'dpi', 'max_height', 'max_width', 'start_position'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_workspace_file_pages" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `list_workspace_file_pages`") # verify the required parameter 'file_id' is set if ('file_id' not in params) or (params['file_id'] is None): raise ValueError("Missing the required parameter `file_id` when calling `list_workspace_file_pages`") # verify the required parameter 'folder_id' is set if ('folder_id' not in params) or (params['folder_id'] is None): raise ValueError("Missing the required parameter `folder_id` when calling `list_workspace_file_pages`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `list_workspace_file_pages`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files/{fileId}/pages'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'file_id' in params: path_params['fileId'] = params['file_id'] if 'folder_id' in params: path_params['folderId'] = params['folder_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} if 'count' in params: query_params['count'] = params['count'] if 'dpi' in params: query_params['dpi'] = params['dpi'] if 'max_height' in params: query_params['max_height'] = params['max_height'] if 'max_width' in params: query_params['max_width'] = params['max_width'] if 'start_position' in params: query_params['start_position'] = params['start_position'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PageImages', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_workspace_folder_items(self, account_id, folder_id, workspace_id, **kwargs): """ List Workspace Folder Contents Retrieves workspace folder contents, which can include sub folders and files. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_workspace_folder_items(account_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param str count: The maximum number of results to be returned by this request. :param str include_files: When set to **true**, file information is returned in the response along with folder information. The default is **false**. :param str include_sub_folders: When set to **true**, information about the sub-folders of the current folder is returned. The default is **false**. :param str include_thumbnails: When set to **true**, thumbnails are returned as part of the response. The default is **false**. :param str include_user_detail: Set to **true** to return extended details about the user. The default is **false**. :param str start_position: The position within the total result set from which to start returning values. :param str workspace_user_id: If set, then the results are filtered to those associated with the specified userId. :return: WorkspaceFolderContents If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs) else: (data) = self.list_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs) return data def list_workspace_folder_items_with_http_info(self, account_id, folder_id, workspace_id, **kwargs): """ List Workspace Folder Contents Retrieves workspace folder contents, which can include sub folders and files. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param str count: The maximum number of results to be returned by this request. :param str include_files: When set to **true**, file information is returned in the response along with folder information. The default is **false**. :param str include_sub_folders: When set to **true**, information about the sub-folders of the current folder is returned. The default is **false**. :param str include_thumbnails: When set to **true**, thumbnails are returned as part of the response. The default is **false**. :param str include_user_detail: Set to **true** to return extended details about the user. The default is **false**. :param str start_position: The position within the total result set from which to start returning values. :param str workspace_user_id: If set, then the results are filtered to those associated with the specified userId. :return: WorkspaceFolderContents If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'folder_id', 'workspace_id', 'count', 'include_files', 'include_sub_folders', 'include_thumbnails', 'include_user_detail', 'start_position', 'workspace_user_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_workspace_folder_items" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `list_workspace_folder_items`") # verify the required parameter 'folder_id' is set if ('folder_id' not in params) or (params['folder_id'] is None): raise ValueError("Missing the required parameter `folder_id` when calling `list_workspace_folder_items`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `list_workspace_folder_items`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'folder_id' in params: path_params['folderId'] = params['folder_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} if 'count' in params: query_params['count'] = params['count'] if 'include_files' in params: query_params['include_files'] = params['include_files'] if 'include_sub_folders' in params: query_params['include_sub_folders'] = params['include_sub_folders'] if 'include_thumbnails' in params: query_params['include_thumbnails'] = params['include_thumbnails'] if 'include_user_detail' in params: query_params['include_user_detail'] = params['include_user_detail'] if 'start_position' in params: query_params['start_position'] = params['start_position'] if 'workspace_user_id' in params: query_params['workspace_user_id'] = params['workspace_user_id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WorkspaceFolderContents', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def list_workspaces(self, account_id, **kwargs): """ List Workspaces Gets information about the Workspaces that have been created. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_workspaces(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :return: WorkspaceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.list_workspaces_with_http_info(account_id, **kwargs) else: (data) = self.list_workspaces_with_http_info(account_id, **kwargs) return data def list_workspaces_with_http_info(self, account_id, **kwargs): """ List Workspaces Gets information about the Workspaces that have been created. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.list_workspaces_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :return: WorkspaceList If the method is called asynchronously, returns the request thread. """ all_params = ['account_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method list_workspaces" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `list_workspaces`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WorkspaceList', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_workspace(self, account_id, workspace_id, **kwargs): """ Update Workspace Updates information about a specific workspace. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_workspace(account_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param Workspace workspace: :return: Workspace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_workspace_with_http_info(account_id, workspace_id, **kwargs) else: (data) = self.update_workspace_with_http_info(account_id, workspace_id, **kwargs) return data def update_workspace_with_http_info(self, account_id, workspace_id, **kwargs): """ Update Workspace Updates information about a specific workspace. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_workspace_with_http_info(account_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :param Workspace workspace: :return: Workspace If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'workspace_id', 'workspace'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_workspace" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `update_workspace`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `update_workspace`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None if 'workspace' in params: body_params = params['workspace'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Workspace', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_workspace_file(self, account_id, file_id, folder_id, workspace_id, **kwargs): """ Update Workspace File Metadata Updates workspace item metadata for one or more specific files/folders. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_workspace_file(account_id, file_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str file_id: Specifies the room file ID GUID. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: WorkspaceItem If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs) else: (data) = self.update_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs) return data def update_workspace_file_with_http_info(self, account_id, file_id, folder_id, workspace_id, **kwargs): """ Update Workspace File Metadata Updates workspace item metadata for one or more specific files/folders. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str account_id: The external account number (int) or account ID Guid. (required) :param str file_id: Specifies the room file ID GUID. (required) :param str folder_id: The ID of the folder being accessed. (required) :param str workspace_id: Specifies the workspace ID GUID. (required) :return: WorkspaceItem If the method is called asynchronously, returns the request thread. """ all_params = ['account_id', 'file_id', 'folder_id', 'workspace_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_workspace_file" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError("Missing the required parameter `account_id` when calling `update_workspace_file`") # verify the required parameter 'file_id' is set if ('file_id' not in params) or (params['file_id'] is None): raise ValueError("Missing the required parameter `file_id` when calling `update_workspace_file`") # verify the required parameter 'folder_id' is set if ('folder_id' not in params) or (params['folder_id'] is None): raise ValueError("Missing the required parameter `folder_id` when calling `update_workspace_file`") # verify the required parameter 'workspace_id' is set if ('workspace_id' not in params) or (params['workspace_id'] is None): raise ValueError("Missing the required parameter `workspace_id` when calling `update_workspace_file`") collection_formats = {} resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files/{fileId}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['accountId'] = params['account_id'] if 'file_id' in params: path_params['fileId'] = params['file_id'] if 'folder_id' in params: path_params['folderId'] = params['folder_id'] if 'workspace_id' in params: path_params['workspaceId'] = params['workspace_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WorkspaceItem', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
mit
-6,547,864,770,429,699,000
47.788123
197
0.578058
false
4.696662
false
false
false
xuweiliang/Codelibrary
openstack_dashboard/dashboards/admin/access_and_security/keypairs/urls.py
1
1463
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import url from openstack_dashboard.dashboards.admin.access_and_security.keypairs \ import views urlpatterns = [ url(r'^create/$', views.CreateView.as_view(), name='create'), url(r'^import/$', views.ImportView.as_view(), name='import'), url(r'^(?P<keypair_name>[^/]+)/download/$', views.DownloadView.as_view(), name='download'), url(r'^(?P<keypair_name>[^/]+)/generate/$', views.GenerateView.as_view(), name='generate'), url(r'^(?P<keypair_name>[^/]+)/(?P<optional>[^/]+)/generate/$', views.GenerateView.as_view(), name='generate'), url(r'^(?P<keypair_name>[^/]+)/$', views.DetailView.as_view(), name='detail'), ]
apache-2.0
-5,623,635,435,713,008,000
39.638889
78
0.678742
false
3.839895
false
false
false
FlintHill/SUAS-Competition
UpdatedImageProcessing/UpdatedImageProcessing/ShapeDetection/utils/bounding_box.py
1
3445
import cv2 import numpy from alpha_fill import alpha_fill class BoundingBox(object): def __init__(self, pil_img, show_plot=False): self.mask_img = alpha_fill(pil_img) self.set_bounding_box() self.set_side_lengths() self.set_areas() if show_plot: self.show_plot() def set_bounding_box(self): _,contours,_ = cv2.findContours(self.mask_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) self.contour = contours[0] rect = cv2.minAreaRect(self.contour) self.box = cv2.boxPoints(rect) #self.box = numpy.int0(box) def set_side_lengths(self): point1 = self.box[0] point2 = self.box[1] point3 = self.box[2] diffx = numpy.abs(point1[0]-point2[0]) diffy = numpy.abs(point1[1]-point2[1]) side1 = numpy.hypot(diffx,diffy) diffx = numpy.abs(point2[0]-point3[0]) diffy = numpy.abs(point2[1]-point3[1]) side2 = numpy.hypot(diffx,diffy) self.bounding_box_side_lengths = (side1,side2) """ def set_areas(self): self.hull = cv2.convexHull(self.contour) self.unpacked_hull = [] for i in range(len(self.hull)): self.unpacked_hull.append((self.hull[i][0][0],self.hull[i][0][1])) self.hull_area = self.polygon_area(self.unpacked_hull) self.bounding_box_area = self.bounding_box_side_lengths[0]*self.bounding_box_side_lengths[1] """ def set_areas(self): epsilon = 0.001*cv2.arcLength(self.contour,True) self.contour_approx = cv2.approxPolyDP(self.contour,epsilon,True) self.unpacked_contour_approx = [] for i in range(len(self.contour_approx)): self.unpacked_contour_approx.append((self.contour_approx[i][0][0],self.contour_approx[i][0][1])) self.contour_approx_area = self.polygon_area(self.unpacked_contour_approx) self.bounding_box_area = self.bounding_box_side_lengths[0]*self.bounding_box_side_lengths[1] def show_plot(self): color_img = cv2.cvtColor(self.mask_img, cv2.COLOR_GRAY2RGB) #cv2.drawContours(color_img,self.hull,0,(0,0,255),4) cv2.drawContours(color_img,[self.contour_approx],0,(0,0,255),1) cv2.drawContours(color_img,[numpy.int0(self.box)],0,(0,255,0),1) cv2.imshow('image',color_img) cv2.waitKey(0) cv2.destroyAllWindows() def get_box(self): return self.box def get_side_lengths(self): return self.bounding_box_side_lengths def get_side_length_difference(self): return numpy.abs(side_lengths[0]-side_lengths[1]) def get_area_difference(self): return numpy.abs(self.contour_approx_area-self.bounding_box_area) def polygon_area(self, corners): n = len(corners) cx = float(sum(x for x, y in corners)) / n cy = float(sum(y for x, y in corners)) / n cornersWithAngles = [] for x, y in corners: an = (numpy.arctan2(y - cy, x - cx) + 2.0 * numpy.pi) % (2.0 * numpy.pi) cornersWithAngles.append((x, y, an)) cornersWithAngles.sort(key = lambda tup: tup[2]) corners = map(lambda (x, y, an): (x, y), cornersWithAngles) area = 0.0 for i in range(n): j = (i + 1) % n area += corners[i][0] * corners[j][1] area -= corners[j][0] * corners[i][1] area = abs(area) / 2.0 return area
mit
5,891,784,764,528,450,000
33.45
108
0.598549
false
3.035242
false
false
false
locuslab/dreaml
dreaml/dataframe/transform.py
1
5833
from abc import ABCMeta, abstractmethod from threading import Thread from time import sleep, time class Transform(object): __metaclass__ = ABCMeta def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs @abstractmethod def apply(self,target_df=None): """ Apply func(df, ...) and return the resulting dataframe Some restrictions: 1. func must take in df as the first argument (this is to allow for automated generation of entries when necessary) 2. func must return a dataframe """ pass # return self.func(target_df,*self.args, **self.kwargs) # def apply_init(self,target_df=None): # if self.init_func is not None: # self.init_func(target_df,*self.args,**self.kwargs) def _apply_function_to(self, target, f): val = f(target, *self.args, **self.kwargs) if val == None: return i_j = target._row_query,target._col_query # If the transform returns something, it should not be empty. if val.shape[0] == 0 or val.shape[1] == 0: raise ValueError # TODO: Remove this code, and require the user to specify the reshaping # in the init function? # If the target is empty, nothing depends on it yet and we can treat it # as a new partition # TODO: If the target is a matrix or an integer, we should handle that # as well. # If the target is empty, then we can just set the value. if target.empty(): target.set_dataframe(val) # If the target is non-empty and shapes are wrong, then extend it elif val.shape != target.shape: for k_l in target._top_df._get_all_dependents(i_j): target._top_df._propogate_stop(k_l) # Extend the DF, set the value, and set to green. target._extend(val._row_index.keys(),val._col_index.keys()) target.set_dataframe(val) target._top_df._graph.node[i_j]["status"] = target.STATUS_GREEN # Now restart all the rest for k_l in target._top_df._get_all_dependents(i_j): target._top_df._propogate_start(k_l,ignore=i_j) # If the target is non-empty but the value matches, then set the data else: target.set_dataframe(val) # def apply_continuous(self, target): # """ Apply a function continuously in a thread, and return the thread. # """ # # Run at least once # print "running continuously" # thread = Thread(target = self._continuous_wrapper, args=(target,)) # thread.start() # return thread class BatchTransform(Transform): def apply(self, target_df): self._apply_function_to(target_df,self.func) @abstractmethod def func(self, target_df, *args, **kwargs): pass class ContinuousTransform(Transform): def __init__(self, *args, **kwargs): self.threaded = kwargs.pop('threaded',True) self.delay = kwargs.pop('delay',0) self.max_iters = kwargs.pop('max_iters',None) super(ContinuousTransform,self).__init__(*args,**kwargs) def apply(self, target_df): self.init_func(target_df, *self.args, **self.kwargs) if self.threaded: thread = Thread(target = self._continuous_wrapper, args=(target_df,)) thread.start() return thread else: self._continuous_wrapper(target_df) @abstractmethod def init_func(self, target_df, *args, **kwargs): pass @abstractmethod def continuous_func(self, target_df, *args, **kwargs): pass def _continuous_wrapper(self, target_df): i_j = (target_df._row_query,target_df._col_query) graph = target_df._top_df._graph niters = 0 while(graph.node[i_j]["status"] is not target_df.STATUS_RED): # Apply the function self._apply_function_to(target_df,self.continuous_func) # Increment iteration counter if max_iters is set, and break if # threshold is exceeded. if self.max_iters != None: niters += 1 if niters >= self.max_iters: graph.node[i_j]["status"] = target_df.STATUS_RED # If delay requested, sleep for the desired amount of time. if self.delay > 0: sleep(self.delay) else: while(graph.node[i_j]["status"] is not target_df.STATUS_RED): self._apply_function_to(target_df,self.continuous_func) def _eval_init_func(self,target): return self.init_func(target,*self.args,**self.kwargs) def _eval_continuous_func(self,target): return self.continuous_func(target,*self.args,**self.kwargs) Transform.register(BatchTransform) Transform.register(ContinuousTransform) from bokeh.client import push_session from bokeh.io import curdoc from bokeh.embed import autoload_server class FigureTransform(ContinuousTransform): def apply(self,target_df): self.init_func(target_df, *self.args, **self.kwargs) self.session = push_session(curdoc()) tag = autoload_server(self.p,session_id=self.session.id) target_df._top_df._plots.append(tag) thread = Thread(target = self._continuous_wrapper, args=(target_df,)) thread.start() return thread def init_func(self,target_df,*args,**kwargs): self.p = self.create_figure(target_df,*args,**kwargs) @abstractmethod def create_figure(self, target_df, *args, **kwargs): pass def continuous_func(self,target_df,*args,**kwargs): self.update(self.p) @abstractmethod def update(self,p): pass
apache-2.0
-404,568,103,778,613,500
33.116959
81
0.60312
false
3.852708
false
false
false
synclab/radclock
python-module/setup.py
1
2076
# Copyright (C) 2006-2011 Julien Ridoux <[email protected]> # # This file is part of the radclock program. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA from distutils.core import setup, Extension import os, re OS_LINUX = 'Linux' OS_FREEBSD = 'FreeBSD' os_uname = os.uname() OS = os_uname[0] module_radclock_linux = Extension('radclock', include_dirs = ['../libradclock'], libraries = ['radclock', 'nl'], library_dirs = ['/usr/local/lib'], sources = [ 'radclockmodule.c' ] ) module_radclock_freebsd = Extension('radclock', include_dirs = ['../libradclock'], libraries = ['radclock'], library_dirs = ['/usr/local/lib'], sources = [ 'radclockmodule.c' ] ) if OS == OS_LINUX: module_radclock = module_radclock_linux if OS == OS_FREEBSD: module_radclock = module_radclock_freebsd setup ( name = 'python-radclock', version = '0.2.2', description = 'This package provides python bindings to the libradclock C library.', author = 'Julien Ridoux', author_email = '[email protected]', url = 'http://www.synclab.org/tscclock/', long_description = ''' This package provides python bindings to the libradclock C library. It provides ways of creating a radclock instance and get the time as created by the radclock. It provides all basic functions of the libradclock library: absolute clock, difference clock, clock status and system data. ''', ext_modules = [module_radclock] )
bsd-2-clause
8,208,464,072,412,713,000
28.657143
86
0.71869
false
3.269291
false
false
false
hsoft/musicguru
core/manualfs.py
1
9237
# Created By: Virgil Dupras # Created On: 2004-12-27 # Copyright 2010 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license import hsfs as fs from jobprogress import job from hscommon.util import nonone from hscommon.conflict import get_conflicted_name, is_conflicted class _CopyOf: #--- Public def copy(self, refnode): self.copyof = refnode def detach_copy(self, keep_original_files=False, keep_original_dirs=False): if self.is_container: keep = keep_original_dirs else: keep = keep_original_files if keep: self.copyof = self.original else: self.copyof = None for child in self: child.detach_copy(keep_original_files,keep_original_dirs) #--- Properties copyof = None @property def original(self): if hasattr(self.copyof, 'original'): return self.copyof.original else: return nonone(self.copyof, self) class Node(fs.Node): #--- Override def __init__(self, parent=None, name=''): try: super(Node, self).__init__(parent,name) except fs.AlreadyExistsError: newname = parent._resolve_conflict(parent[name], self, name) if newname: if isinstance(newname, str): super(Node, self).__init__(parent, newname) else: raise def _set_name(self, newname): try: super(Node, self)._set_name(newname) except fs.AlreadyExistsError: newname = self.parent._resolve_conflict(self.parent[newname], self, newname) if newname: if isinstance(newname, str): super(Node, self)._set_name(newname) else: raise #--- Public def delete(self): self.parent = None def move(self, dest, newname=None): dest.add_child(self, newname) def rename(self, newname): self.name = newname class File(fs.File, Node, _CopyOf): #--- Public def copy(self, reffile): super(File,self).copy(reffile) for attrname in reffile.INITIAL_INFO: if attrname in reffile.__dict__: setattr(self, attrname, getattr(reffile, attrname)) self.INITIAL_INFO = reffile.INITIAL_INFO class Directory(fs.Directory, Node, _CopyOf): """A Directory that you can manipulate at will This is the opposite of auto.Directory. When you subclass this, you have to manually add/delete/move everything. Littles notes: You might notice that some AlreadyExistsError are raised in this unit. You might think "hey, fs.Directory covers all possible occurance of AlreadyExistsError, why do you duplicate code here?" It is true that fs.Directory takes care of all this. However, if you look at the code after the raise (in this unit), you will see that , first, it is only in move. And what's special about move funcs is that you can change the name as you move. And to do this, you must delete the child from it's former parent before you add it in it's new parent. If you don't check for conflict *before* and there's is a conflict occuring, you're left with a parent less child. """ #--- Class Attributes cls_file_class = File #--- Overrides def __init__(self, parent=None, dirname=''): if isinstance(parent, Directory): self.__case_sensitive = parent.case_sensitive else: self.__case_sensitive = True self._attrs_to_read = None super(Directory, self).__init__(parent, dirname) def _do_hash(self, value): if (not self.case_sensitive) and isinstance(value, str): return value.lower() else: return value #--- Protected def _conflict_check(self, name, node): if name in self: newname = self._resolve_conflict(self[name], node, name) if newname: return newname else: raise fs.AlreadyExistsError(name, self) else: return name def _resolve_conflict(self, offended, offender, conflicted_name): # Virtual """Override this to automatically resolve a name conflict instead of raising an AlreadyExistsError. If you return something else than None or '', there will be a second try to add name. There is no third try. if the result of ResolveConflict is also conflictual, an error will be raised. You can also return a True value that is not a string, and it will cancel the exception raise, but not make a second try. """ #--- Public def add_child(self, child, newname=None): if child in self: return child if not newname: newname = child.name newname = self._conflict_check(newname, child) if not isinstance(newname, str): return child #Just don't perform the add, _resolve_conflict has taken #care of everything child.parent = None child.name = newname child.parent = self if isinstance(child, Directory): child.case_sensitive = self.case_sensitive return child def add_dir_copy(self, refdir, newname='', job=job.nulljob): if not newname: newname = refdir.name result = self._create_sub_dir(newname, False) result.copy(refdir, job) self.add_child(result) return result def add_file_copy(self, reffile, newname=''): if not newname: newname = reffile.name reffile._read_all_info(self._attrs_to_read) result = self._create_sub_file(newname, False) result.copy(reffile) self.add_child(result) return result def add_path(self, path): """ Creates the first item of path (a tuple), and calls _AddPath in this new directory. If the directory already exists, uses this directory. Returns the added (or found) directory. """ if not path: return self else: try: founddir = self[path[0]] if not isinstance(founddir, Directory): raise fs.InvalidPath(founddir) except KeyError: founddir = self._create_sub_dir(path[0]) return founddir.add_path(path[1:]) def clean_empty_dirs(self): for directory in self.dirs: directory.clean_empty_dirs() to_delete = (d for d in self.dirs if not len(d)) for directory in to_delete: directory.delete() def copy(self, refdir, job=job.nulljob): super(Directory, self).copy(refdir) filecount = refdir.filecount dircount = refdir.dircount if filecount > 0: job = job.start_subjob(dircount + 1) job.start_job(filecount) else: job = job.start_subjob(dircount) for myfile in refdir.files: self.add_file_copy(myfile) job.add_progress() for directory in refdir.dirs: self.add_dir_copy(directory, '', job) def new_directory(self, name): return self._create_sub_dir(name) def new_file(self, name): return self._create_sub_file(name) #--- Properties @property def case_sensitive(self): return self.__case_sensitive @case_sensitive.setter def case_sensitive(self, value): if value != self.__case_sensitive: self.__case_sensitive = value self._rebuild_hashes() for subdir in self: if isinstance(subdir, Directory): subdir.case_sensitive = value class AutoResolve(Directory): #---Override def _resolve_conflict(self, offended, offender, conflicted_name): if offended.is_container and offender.is_container: should_merge = self.on_should_merge(offender, offended) if should_merge: # There's a circular reference problem from .fs_utils import smart_move smart_move(offender, offended) offender.delete() return True return get_conflicted_name(self, conflicted_name) #---Events def on_should_merge(self, source, dest): if (self.parent is not None) and hasattr(self.parent, 'on_should_merge'): return self.parent.on_should_merge(source, dest) #---Properties @property def allconflicts(self): return self.get_stat('conflicts', []) @property def conflicts(self): return [y for y in self.files if is_conflicted(y.name)] class AutoMerge(AutoResolve): def on_should_merge(self, source, dest): return True
bsd-3-clause
-26,157,494,945,306,680
32.835165
88
0.591642
false
4.212038
false
false
false
almarklein/imageio
imageio/plugins/npz.py
1
3389
# -*- coding: utf-8 -*- # Copyright (c) 2014, imageio contributors # imageio is distributed under the terms of the (new) BSD License. """ Storage of image data in npz format. Not a great format, but at least it supports volumetric data. And its less than 100 lines. """ from __future__ import absolute_import, print_function, division import numpy as np from imageio import formats from imageio.core import Format class NpzFormat(Format): """ NPZ is a file format by numpy that provides storage of array data using gzip compression. This imageio plugin supports data of any shape, and also supports multiple images per file. However, the npz format does not provide streaming; all data is read/saved at once. Further, there is no support for meta data. Beware that the numpy npz format has a bug on a certain combination of Python 2.7 and numpy, which can cause the resulting files to become unreadable on Python 3. Also, this format is not available on Pypy. Parameters for reading ---------------------- None Parameters for saving --------------------- None """ def _can_read(self, request): if request.filename.lower().endswith('.npz'): return True # We support any kind of image data else: return False def _can_save(self, request): if request.filename.lower().endswith('.npz'): return True # We support any kind of image data else: return False # -- reader class Reader(Format.Reader): def _open(self): # Load npz file, which provides another file like object self._npz = np.load(self.request.get_file()) assert isinstance(self._npz, np.lib.npyio.NpzFile) # Get list of names, ordered by name, but smarter sorter = lambda x: x.split('_')[-1] self._names = sorted(self._npz.files, key=sorter) def _close(self): self._npz.close() def _get_length(self): return len(self._names) def _get_data(self, index): # Get data if index < 0 or index >= len(self._names): raise IndexError('Index out of range while reading from nzp') im = self._npz[self._names[index]] # Return array and empty meta data return im, {} def _get_meta_data(self, index): # Get the meta data for the given index raise RuntimeError('The npz format does not support meta data.') # -- writer class Writer(Format.Writer): def _open(self): # Npz is not such a great format. We cannot stream to the file. # So we remember all images and write them to file at the end. self._images = [] def _close(self): # Write everything np.savez_compressed(self.request.get_file(), *self._images) def _append_data(self, im, meta): self._images.append(im) # discart meta data def set_meta_data(self, meta): raise RuntimeError('The npz format does not support meta data.') # Register format = NpzFormat('npz', "Numpy's compressed array format", 'npz', 'iIvV') formats.add_format(format)
bsd-2-clause
-7,003,940,651,712,403,000
31.902913
77
0.590145
false
4.262893
false
false
false
xelphene/swaf
swaf/wrap.py
1
3089
import types import logging import inspect import pprint import operator import re from operator import isCallable from resp import * import swaf.error import swaf.misc from swaf.misc import isListOfFuncs DEBUG=1 def wrapFilter(f, filter_name, filters): newf = chainWrap(f) newf.__name__ = f.__name__ newf.__module__ = f.__module__ newf = debugWrap(newf) newf.swaf_orig = f newf.swaf_filter_name = filter_name newf.swaf_filters = filters return newf def makeWrappedNext(chain): '''given a list of callables (chain), return a function which takes one param (req). When this returned function is called, it will call chain[0]( req, chain[1:] ). ''' assert isListOfFuncs(chain) def next(req): nextf = chain.pop(0) return nextf(req, chain) next.chain = chain return next def chainWrap(handler): def newhandler(req, arg2): if isListOfFuncs(arg2): arg2 = makeWrappedNext(arg2) return handler(req, arg2) return newhandler def debugWrap(f): description = swaf.misc.describeObject(f) pp = pprint.PrettyPrinter() logger = logging.getLogger('swaf.wrap') def debug(req, chain): if DEBUG: if not hasattr(debug,'_traceIndent'): debug._traceIndent = 0 if len(chain)>0: chain[0]._traceIndent = debug._traceIndent+1 indent = '. '*debug._traceIndent cn = [fi.__name__ for fi in chain] cn = ' -> '.join(cn) logger.debug('%s| about to call %s' % (indent, description)) logger.debug('%s| chain=%s' % (indent, cn)) logger.debug('%s| req=%s' % (indent, repr(req) ) ) rv = f(req,chain) logger.debug('%s| %s.%s returned with %s' % (indent, f.__module__, f.__name__, repr(rv))) return rv else: return f(req, chain) debug.__name__ = f.__name__ debug.__module__ = f.__module__ debug.swaf_orig = f if hasattr(f,'swaf_description'): debug.swaf_description = f.swaf_description if hasattr(f,'swaf_route'): debug.swaf_route = f.swaf_route if hasattr(f,'swaf_filters'): debug.swaf_filters = f.swaf_filters return debug def wrapHandler(handler, route, filters, description): if type(handler)==types.InstanceType: (args, varargs, varkw, defaults) = inspect.getargspec(handler.__call__) # remove the 'self' arg from __call__ assert len(args)>0 args = args[1:] elif type(handler)==types.MethodType: (args, varargs, varkw, defaults) = inspect.getargspec(handler) # remove the 'self' arg assert len(args)>0 args = args[1:] else: (args, varargs, varkw, defaults) = inspect.getargspec(handler) def callHandler(req, next): if varkw==None: req = swaf.misc.filterDict(req, args) if set(req.keys()) != set(args): raise swaf.error.LeafUnknownParamError(handler,req) return handler(**req) else: return handler(**req) callHandler.swaf_orig = handler callHandler.swaf_route = route callHandler.swaf_filters = filters callHandler.swaf_description = description callHandler = debugWrap(callHandler) callHandler.swaf_orig = handler callHandler.swaf_route = route callHandler.swaf_filters = filters callHandler.swaf_description = description return callHandler
gpl-3.0
8,237,832,620,152,168,000
25.177966
92
0.690838
false
2.978785
false
false
false