repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
ambikeshwar1991/gnuradio-3.7.4 | gr-atsc/python/atsc/__init__.py | 57 | 1161 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# The presence of this file turns this directory into a Python package
'''
Blocks and utilities for ATSC (Advanced Television Systems Committee) module.
'''
import os
try:
from atsc_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from atsc_swig import *
| gpl-3.0 | 4,108,158,103,008,780,300 | 5,170,766,065,559,399,000 | 33.147059 | 77 | 0.732989 | false |
skoppisetty/secret-bugfixes | lib/cherrypy/lib/caching.py | 88 | 17413 | """
CherryPy implements a simple caching system as a pluggable Tool. This tool tries
to be an (in-process) HTTP/1.1-compliant cache. It's not quite there yet, but
it's probably good enough for most sites.
In general, GET responses are cached (along with selecting headers) and, if
another request arrives for the same resource, the caching Tool will return 304
Not Modified if possible, or serve the cached response otherwise. It also sets
request.cached to True if serving a cached representation, and sets
request.cacheable to False (so it doesn't get cached again).
If POST, PUT, or DELETE requests are made for a cached resource, they invalidate
(delete) any cached response.
Usage
=====
Configuration file example::
[/]
tools.caching.on = True
tools.caching.delay = 3600
You may use a class other than the default
:class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config
entry ``cache_class``; supply the full dotted name of the replacement class
as the config value. It must implement the basic methods ``get``, ``put``,
``delete``, and ``clear``.
You may set any attribute, including overriding methods, on the cache
instance by providing them in config. The above sets the
:attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example.
"""
import datetime
import sys
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
from cherrypy._cpcompat import copyitems, ntob, set_daemon, sorted
class Cache(object):
"""Base class for Cache implementations."""
def get(self):
"""Return the current variant if in the cache, else None."""
raise NotImplemented
def put(self, obj, size):
"""Store the current variant in the cache."""
raise NotImplemented
def delete(self):
"""Remove ALL cached variants of the current resource."""
raise NotImplemented
def clear(self):
"""Reset the cache to its initial, empty state."""
raise NotImplemented
# ------------------------------- Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
"""A storage system for cached items which reduces stampede collisions."""
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None, and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading._Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading._Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in ``self.store[uri].selecting_headers``.
The items contained in ``self.store[uri]`` have keys which are tuples of
request header values (in the same order as the names in its
selecting_headers), and values which are the actual responses.
"""
maxobjects = 1000
"""The maximum number of cached objects; defaults to 1000."""
maxobj_size = 100000
"""The maximum size of each cached object in bytes; defaults to 100 KB."""
maxsize = 10000000
"""The maximum size of the entire cache in bytes; defaults to 10 MB."""
delay = 600
"""Seconds until the cached content expires; defaults to 600 (10 minutes)."""
antistampede_timeout = 5
"""Seconds to wait for other threads to release a cache lock."""
expire_freq = 0.1
"""Seconds to sleep between cache expiration sweeps."""
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
set_daemon(t, True)
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
"""Continuously examine cached objects, expiring stale ones.
This function is designed to be run in its own daemon thread,
referenced at ``self.expiration_thread``.
"""
# It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in copyitems(self.expirations):
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][tuple(sel_header_values)]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
variant = uricache.wait(key=tuple(sorted(header_values)),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
uricache[tuple(sorted(header_values))] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, "_cache"):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop("cache_class", MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(400, "Invalid Cache-Control header")
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log('Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers["Age"] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect:
x = sys.exc_info()[1]
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
"""Tee response output to cache storage. Internal."""
# Used by CachingTool by attaching to request.hooks
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# save the cache data
body = ntob('').join(output)
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
secs
Must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to response.time + secs.
If secs is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
* Pragma: no-cache
* Cache-Control': no-cache, must-revalidate
force
If False, the following headers are checked:
* Etag
* Last-Modified
* Age
* Expires
If any are already present, none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ("Pragma" not in headers):
headers["Pragma"] = "no-cache"
if cherrypy.serving.request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
| gpl-3.0 | -9,031,835,959,777,974,000 | 1,234,860,714,220,528,000 | 36.447312 | 83 | 0.582726 | false |
sekaiamber/thefuck | thefuck/rules/ssh_known_hosts.py | 11 | 1044 | import re
patterns = [
r'WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!',
r'WARNING: POSSIBLE DNS SPOOFING DETECTED!',
r"Warning: the \S+ host key for '([^']+)' differs from the key for the IP address '([^']+)'",
]
offending_pattern = re.compile(
r'(?:Offending (?:key for IP|\S+ key)|Matching host key) in ([^:]+):(\d+)',
re.MULTILINE)
commands = ['ssh', 'scp']
def match(command, settings):
if not command.script:
return False
if not command.script.split()[0] in commands:
return False
if not any([re.findall(pattern, command.stderr) for pattern in patterns]):
return False
return True
def get_new_command(command, settings):
return command.script
def side_effect(command, settings):
offending = offending_pattern.findall(command.stderr)
for filepath, lineno in offending:
with open(filepath, 'r') as fh:
lines = fh.readlines()
del lines[int(lineno) - 1]
with open(filepath, 'w') as fh:
fh.writelines(lines)
| mit | -3,783,820,167,844,992,500 | -6,552,601,658,363,466,000 | 28 | 97 | 0.628352 | false |
opps/opps | opps/api/__init__.py | 4 | 4177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.contrib.auth import authenticate
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from piston.handler import BaseHandler as Handler
from piston.emitters import JSONEmitter, Emitter
from opps.api.models import ApiKey
class UncachedEmitter(JSONEmitter):
""" In websites running under varnish or another cache
caching the api can mess the results and return the wrong data
this emmitter injects No-Cache headers in response"""
def render(self, request):
content = super(UncachedEmitter, self).render(request)
response = HttpResponse(content)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response['Content-Type'] = 'application/json; charset=utf-8'
response['Pragma'] = 'no-cache'
response['Expires'] = 0
return response
Emitter.register('json', UncachedEmitter, 'application/json; charset=utf-8')
class BaseHandler(Handler):
limit = 20
limit_arg = 'paginate_limit'
meta = {}
blackfield = ['num_pages', 'page_range', 'total_objects', 'per_page',
'page', 'has_next', 'has_previous', 'has_other_pages',
'end_index', 'start_index', 'start_index']
def include_meta(self, d):
obj = {'meta': self.meta, 'objects': d}
return obj
def paginate_queryset(self, queryset, request):
limit = request.GET.get(self.limit_arg, self.meta.get(self.limit_arg))
paginator = Paginator(queryset, limit or self.limit)
self.meta['num_pages'] = paginator.num_pages
self.meta['page_range'] = paginator.page_range
self.meta['total_objects'] = paginator.count
self.meta['per_page'] = paginator.per_page
page = self.meta.get('page', request.GET.get('page', 1))
try:
results = paginator.page(page)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
self.meta['has_next'] = results.has_next()
self.meta['has_previous'] = results.has_previous()
self.meta['has_other_pages'] = results.has_other_pages()
self.meta['end_index'] = results.end_index()
self.meta['start_index'] = results.start_index()
self.meta['page_number'] = results.number
return results
def read(self, request):
base = self.model.objects
if request.GET.items():
items = request.GET.dict()
self.meta[self.limit_arg] = items.pop(self.limit_arg, None)
self.meta['page'] = items.pop('page', 1)
qs = base.filter(**items)
else:
qs = base.all()
self.meta['total_objects'] = qs.count()
return qs
def _limit(self, request):
limit = request.GET.get(self.limit_arg, self.limit)
return int(limit) * int(request.GET.get('page', 1))
def _page(self, request):
page = int(request.GET.get('page', 1))
if page == 1:
return 0
limit = int(request.GET.get(self.limit_arg, self.limit))
return limit * page - page
def appendModel(Model, Filters):
m = Model.objects.filter(**Filters)
l = []
for i in m:
l.append(i.__dict__)
return l
class ApiKeyAuthentication(object):
def __init__(self, auth_func=authenticate, method=['GET']):
self.auth_func = auth_func
self.method = method
def is_authenticated(self, request):
if request.method == 'GET' and 'GET' in self.method:
return True
try:
method = getattr(request, request.method)
except:
method = request.GET
try:
ApiKey.objects.get(
user__username=method.get('api_username'),
key=method.get('api_key'))
except ApiKey.DoesNotExist:
return False
return True
def challenge(self):
resp = HttpResponse("Authorization Required")
resp.status_code = 401
return resp
| mit | -3,402,773,335,074,756,600 | 887,009,849,889,845,100 | 31.379845 | 78 | 0.604501 | false |
ShalY/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | ExamplesFromChapters/Chapter3/ClusteringWithGaussians.py | 90 | 1034 | import numpy as np
import pymc as pm
data = np.loadtxt("../../Chapter3_MCMC/data/mixture_data.csv", delimiter=",")
p = pm.Uniform("p", 0, 1)
assignment = pm.Categorical("assignment", [p, 1 - p], size=data.shape[0])
taus = 1.0 / pm.Uniform("stds", 0, 100, size=2) ** 2 # notice the size!
centers = pm.Normal("centers", [150, 150], [0.001, 0.001], size=2)
"""
The below deterministic functions map a assingment, in this case 0 or 1,
to a set of parameters, located in the (1,2) arrays `taus` and `centers.`
"""
@pm.deterministic
def center_i(assignment=assignment, centers=centers):
return centers[assignment]
@pm.deterministic
def tau_i(assignment=assignment, taus=taus):
return taus[assignment]
# and to combine it with the observations:
observations = pm.Normal("obs", center_i, tau_i,
value=data, observed=True)
# below we create a model class
model = pm.Model([p, assignment, taus, centers])
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(100000, 50000)
| mit | -2,297,191,644,570,411,300 | -2,827,020,274,873,748,000 | 24.219512 | 78 | 0.667311 | false |
googleapis/python-container | google/cloud/container_v1beta1/services/cluster_manager/transports/__init__.py | 2 | 1194 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ClusterManagerTransport
from .grpc import ClusterManagerGrpcTransport
from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]]
_transport_registry["grpc"] = ClusterManagerGrpcTransport
_transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport
__all__ = (
"ClusterManagerTransport",
"ClusterManagerGrpcTransport",
"ClusterManagerGrpcAsyncIOTransport",
)
| apache-2.0 | -860,788,326,812,010,200 | 6,695,835,525,433,714,000 | 35.181818 | 85 | 0.777219 | false |
lepistone/stock-logistics-warehouse | __unported__/stock_reserve_sale/model/stock_reserve.py | 4 | 1887 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class stock_reservation(orm.Model):
_inherit = 'stock.reservation'
_columns = {
'sale_line_id': fields.many2one(
'sale.order.line',
string='Sale Order Line',
ondelete='cascade'),
'sale_id': fields.related(
'sale_line_id', 'order_id',
type='many2one',
relation='sale.order',
string='Sale Order')
}
def release(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'sale_line_id': False}, context=context)
return super(stock_reservation, self).release(
cr, uid, ids, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['sale_line_id'] = False
return super(stock_reservation, self).copy_data(
cr, uid, id, default=default, context=context)
| agpl-3.0 | -6,747,506,836,104,621,000 | -3,616,926,951,223,473,000 | 36.74 | 78 | 0.582936 | false |
rhjdjong/SlipLib | src/sliplib/slipsocket.py | 2 | 10133 | # Copyright (c) 2020. Ruud de Jong
# This file is part of the SlipLib project which is released under the MIT license.
# See https://github.com/rhjdjong/SlipLib for details.
"""
SlipSocket
----------
.. autoclass:: SlipSocket(sock)
:show-inheritance:
Class :class:`SlipSocket` offers the following methods in addition to the methods
offered by its base class :class:`SlipWrapper`:
.. automethod:: accept
.. automethod:: create_connection
.. note::
The :meth:`accept` and :meth:`create_connection` methods
do not magically turn the
socket at the remote address into a SlipSocket.
For the connection to work properly,
the remote socket must already
have been configured to use the SLIP protocol.
The following commonly used :class:`socket.socket` methods are exposed through
a :class:`SlipSocket` object.
These methods are simply delegated to the wrapped `socket` instance.
.. automethod:: bind
.. automethod:: close
.. automethod:: connect
.. automethod:: connect_ex
.. automethod:: getpeername
.. automethod:: getsockname
.. automethod:: listen([backlog])
.. automethod:: shutdown
Since the wrapped socket is available as the :attr:`socket` attribute,
any other :class:`socket.socket`
method can be invoked through that attribute.
.. warning::
Avoid using :class:`socket.socket`
methods that affect the bytes that are sent or received through the socket.
Doing so will invalidate the internal state of the enclosed :class:`Driver` instance,
resulting in corrupted SLIP messages.
In particular, do not use any of the :meth:`recv*` or :meth:`send*` methods
on the :attr:`socket` attribute.
A :class:`SlipSocket` instance has the following attributes in addition to the attributes
offered by its base class :class:`SlipWrapper`:
.. attribute:: socket
The wrapped `socket`.
This is actually just an alias for the :attr:`stream` attribute in the base class.
.. autoattribute:: family
.. autoattribute:: type
.. autoattribute:: proto
"""
import socket
import warnings
from typing import Optional, Tuple
from .slipwrapper import SlipWrapper
class SlipSocket(SlipWrapper):
"""Class that wraps a TCP :class:`socket` with a :class:`Driver`
:class:`SlipSocket` combines a :class:`Driver` instance with a
:class:`socket`.
The :class:`SlipStream` class has all the methods from its base class :class:`SlipWrapper`.
In addition it directly exposes all methods and attributes of
the contained :obj:`socket`, except for the following:
* :meth:`send*` and :meth:`recv*`. These methods are not
supported, because byte-oriented send and receive operations
would invalidate the internal state maintained by :class:`SlipSocket`.
* Similarly, :meth:`makefile` is not supported, because byte- or line-oriented
read and write operations would invalidate the internal state.
* :meth:`share` (Windows only) and :meth:`dup`. The internal state of
the :class:`SlipSocket` would have to be duplicated and shared to make these methods meaningful.
Because of the lack of a convincing use case for this, sharing and duplication is
not supported.
* The :meth:`accept` method is delegated to the contained :class:`socket`,
but the socket that is returned by the :class:`socket`'s :meth:`accept` method
is automatically wrapped in a :class:`SlipSocket` object.
In stead of the :class:`socket`'s :meth:`send*` and :meth:`recv*` methods
a :class:`SlipSocket` provides the method :meth:`send_msg` and :meth:`recv_msg`
to send and receive SLIP-encoded messages.
.. deprecated:: 0.6
Direct access to the methods and attributes of the contained :obj:`socket`
other than `family`, `type`, and `proto` will be removed in version 1.0
Only TCP sockets are supported. Using the SLIP protocol on
UDP sockets is not supported for the following reasons:
* UDP is datagram-based. Using SLIP with UDP therefore
introduces ambiguity: should SLIP packets be allowed to span
multiple UDP datagrams or not?
* UDP does not guarantee delivery, and does not guarantee that
datagrams are delivered in the correct order.
"""
_chunk_size = 4096
def __init__(self, sock: socket.SocketType):
# pylint: disable=missing-raises-doc
"""
To instantiate a :class:`SlipSocket`, the user must provide
a pre-constructed TCP `socket`.
An alternative way to instantiate s SlipSocket is to use the
class method :meth:`create_connection`.
Args:
sock (socket.socket): An existing TCP socket, i.e.
a socket with type :const:`socket.SOCK_STREAM`
"""
if not isinstance(sock, socket.socket) or sock.type != socket.SOCK_STREAM:
raise ValueError('Only sockets with type SOCK_STREAM are supported')
super().__init__(sock)
self.socket = self.stream
def send_bytes(self, packet: bytes) -> None:
"""See base class"""
self.socket.sendall(packet)
def recv_bytes(self) -> bytes:
"""See base class"""
return self.socket.recv(self._chunk_size)
def accept(self) -> Tuple['SlipSocket', Tuple]:
"""Accepts an incoming connection.
Returns:
Tuple[:class:`~SlipSocket`, Tuple]: A (`SlipSocket`, remote_address) pair.
The :class:`SlipSocket` object
can be used to exchange SLIP-encoded data with the socket at the `remote_address`.
See Also:
:meth:`socket.socket.accept`
"""
conn, address = self.socket.accept()
return self.__class__(conn), address
def bind(self, address: Tuple) -> None:
"""Bind the `SlipSocket` to `address`.
Args:
address: The IP address to bind to.
See Also:
:meth:`socket.socket.bind`
"""
self.socket.bind(address)
def close(self) -> None:
"""Close the `SlipSocket`.
See Also:
:meth:`socket.socket.close`
"""
self.socket.close()
def connect(self, address: Tuple) -> None:
"""Connect `SlipSocket` to a remote socket at `address`.
Args:
address: The IP address of the remote socket.
See Also:
:meth:`socket.socket.connect`
"""
self.socket.connect(address)
def connect_ex(self, address: Tuple) -> None:
"""Connect `SlipSocket` to a remote socket at `address`.
Args:
address: The IP address of the remote socket.
See Also:
:meth:`socket.socket.connect_ex`
"""
self.socket.connect_ex(address)
def getpeername(self) -> Tuple:
"""Get the IP address of the remote socket to which `SlipSocket` is connected.
Returns:
The remote IP address.
See Also:
:meth:`socket.socket.getpeername`
"""
return self.socket.getpeername()
def getsockname(self) -> Tuple:
"""Get `SlipSocket`'s own address.
Returns:
The local IP address.
See Also:
:meth:`socket.socket.getsockname`
"""
return self.socket.getsockname()
def listen(self, backlog: Optional[int] = None) -> None:
"""Enable a `SlipSocket` server to accept connections.
Args:
backlog (int): The maximum number of waiting connections.
See Also:
:meth:`socket.socket.listen`
"""
if backlog is None:
self.socket.listen()
else:
self.socket.listen(backlog)
def shutdown(self, how: int) -> None:
"""Shutdown the connection.
Args:
how: Flag to indicate which halves of the connection must be shut down.
See Also:
:meth:`socket.socket.shutdown`
"""
self.socket.shutdown(how)
@property
def family(self) -> int:
# pylint: disable=line-too-long
"""The wrapped socket's address family. Usually :const:`socket.AF_INET` (IPv4) or :const:`socket.AF_INET6` (IPv6)."""
return self.socket.family
@property
def type(self) -> int:
"""The wrapped socket's type. Always :const:`socket.SOCK_STREAM`."""
return self.socket.type
@property
def proto(self) -> int:
"""The wrapped socket's protocol number. Usually 0."""
return self.socket.proto
def __getattr__(self, attribute):
if attribute.startswith('recv') or attribute.startswith('send') or attribute in (
'makefile', 'share', 'dup',
):
raise AttributeError("'{}' object has no attribute '{}'".
format(self.__class__.__name__, attribute))
warnings.warn("Direct access to the enclosed socket attributes and methods will be removed in version 1.0",
DeprecationWarning, stacklevel=2)
return getattr(self.socket, attribute)
@classmethod
def create_connection(cls, address: Tuple, timeout: Optional[float] = None,
source_address: Optional[Tuple] = None) -> 'SlipSocket':
"""Create a SlipSocket connection.
This convenience method creates a connection to a socket at the specified address
using the :func:`socket.create_connection` function.
The socket that is returned from that call is automatically wrapped in
a :class:`SlipSocket` object.
Args:
address (Address): The remote address.
timeout (float): Optional timeout value.
source_address (Address): Optional local address for the near socket.
Returns:
:class:`~SlipSocket`: A `SlipSocket` that is connected to the socket at the remote address.
See Also:
:func:`socket.create_connection`
"""
sock = socket.create_connection(address[0:2], timeout, source_address) # type: ignore
return cls(sock)
| mit | -2,195,961,916,809,133,300 | 5,635,094,292,373,387,000 | 33.941379 | 125 | 0.633376 | false |
SublimeLinter/SublimeLinter3 | tests/test_loose_lintmatch.py | 1 | 5173 | from unittesting import DeferrableTestCase
from SublimeLinter.lint.linter import LintMatch
class TestLooseLintMatch(DeferrableTestCase):
def test_attribute_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv.match, m)
self.assertEqual(rv.line, 1)
self.assertEqual(rv.col, 2)
self.assertEqual(rv.error, "error_txt")
self.assertEqual(rv.warning, "warning_txt")
self.assertEqual(rv.message, "message_txt")
self.assertEqual(rv.near, "near_txt")
def test_attribute_access_returns_defaults_for_missing_common_names(self):
rv = LintMatch()
for k in (
"match", "line", "col", "error", "warning", "message", "near",
"filename", "error_type", "code",
):
self.assertEqual(getattr(rv, k), '' if k == 'message' else None)
def test_unknown_keys_raise_on_attribute_access(self):
rv = LintMatch()
try:
rv.foo
except AttributeError as e:
self.assertEqual(str(e), "'LintMatch' object has no attribute 'foo'")
except Exception:
self.fail('Should have thrown AttributeError.')
else:
self.fail('Should have thrown AttributeError.')
def test_self_repr(self):
rv = LintMatch(foo='bar')
self.assertEqual(str(rv), "LintMatch({'foo': 'bar'})")
self.assertEqual(eval(repr(rv)), rv)
def test_copy_lint_match(self):
rv = LintMatch(foo='bar')
self.assertEqual(rv.copy(), rv)
self.assertEqual(type(rv.copy()), LintMatch)
def test_double_star_unpacking_to_dict(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
expected = LintMatch(match)
actual = dict(**expected)
self.assertEqual(actual, expected)
def test_tuple_like_unpacking(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
match, line, col, error, warning, message, near = rv
self.assertEqual(match, m)
self.assertEqual(line, 1)
self.assertEqual(col, 2)
self.assertEqual(error, "error_txt")
self.assertEqual(warning, "warning_txt")
self.assertEqual(message, "message_txt")
self.assertEqual(near, "near_txt")
def test_tuple_like_index_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv[0], m)
self.assertEqual(rv[1], 1)
self.assertEqual(rv[2], 2)
self.assertEqual(rv[3], "error_txt")
self.assertEqual(rv[4], "warning_txt")
self.assertEqual(rv[5], "message_txt")
self.assertEqual(rv[6], "near_txt")
self.assertRaises(IndexError, lambda: rv[7])
def test_namedtuple_like_mutating(self):
rv = LintMatch({'foo': 'bar'})
rv2 = rv._replace(foo='baz')
self.assertEqual(rv2.foo, 'baz')
# unlike namedtuple LintMatch is mutable
self.assertEqual(rv.foo, 'baz')
def test_standard_items_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv['match'], m)
self.assertEqual(rv['line'], 1)
self.assertEqual(rv['col'], 2)
self.assertEqual(rv['error'], "error_txt")
self.assertEqual(rv['warning'], "warning_txt")
self.assertEqual(rv['message'], "message_txt")
self.assertEqual(rv['near'], "near_txt")
def test_standard_item_access_throws_on_unknown_keys(self):
rv = LintMatch()
self.assertRaises(KeyError, lambda: rv['line'])
def test_create_from_tuple(self):
m = object()
match = (m, 1, 2, "error_txt", "warning_txt", "message_txt", "near_txt")
actual = LintMatch(*match)
expected = LintMatch({
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
})
self.assertEqual(actual, expected)
| mit | -3,822,240,807,406,042,600 | -4,657,284,689,149,954,000 | 28.901734 | 81 | 0.520974 | false |
williamsjj/twisted_cql | examples/session_client_example.py | 1 | 1142 | # -*- coding: utf-8-*-
# ###################################################################
# FILENAME: examples/session_client_example.py
# PROJECT:
# DESCRIPTION: Cassandra session wrappers.
#
# ###################################################################
# (C)2015 DigiTar, All Rights Reserved
# ###################################################################
from twisted.internet import task
from twisted.internet.defer import inlineCallbacks
from cassandra import ConsistencyLevel
from twisted_cql import session as cql_session
@inlineCallbacks
def main_datastax(reactor):
session = cql_session.CassandraSession(["localhost"],
port=9042,
keyspace="testkeyspace",
username="someuser",
password="somepass")
rows = yield session.execute_query("SELECT * FROM testtable;",
consistency_level=ConsistencyLevel.ONE)
print repr(rows)
if __name__ == '__main__':
task.react(main_datastax) | bsd-2-clause | 1,429,452,789,521,492,700 | 6,481,460,100,238,366,000 | 34.71875 | 78 | 0.465849 | false |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_4/tests/regressiontests/multiple_database/models.py | 43 | 2251 | from __future__ import absolute_import
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, null=True, related_name='edited')
reviews = generic.GenericRelation(Review)
pages = models.IntegerField(default=100)
def __unicode__(self):
return self.title
class Meta:
ordering = ('title',)
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
| mit | -2,171,590,376,698,010,400 | -2,302,337,888,204,794,000 | 27.858974 | 72 | 0.682363 | false |
CEG-FYP-OpenStack/scheduler | nova/tests/unit/api/openstack/compute/test_attach_interfaces.py | 8 | 21320 | # Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import attach_interfaces \
as attach_interfaces_v21
from nova.api.openstack.compute.legacy_v2.contrib import attach_interfaces \
as attach_interfaces_v2
from nova.compute import api as compute_api
from nova import exception
from nova.network import api as network_api
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network_cache_model
from webob import exc
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NOT_FOUND_PORT_ID = '00000000-0000-0000-0000-000000000000'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_list_ports(self, *args, **kwargs):
result = []
for port in ports:
if port['device_id'] == kwargs['device_id']:
result.append(port)
return {'ports': result}
def fake_show_port(self, context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
else:
raise exception.PortNotFound(port_id=port_id)
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if network_id == FAKE_BAD_NET_ID:
raise exception.NetworkNotFound(network_id=network_id)
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
if port_id == FAKE_NOT_FOUND_PORT_ID:
raise exception.PortNotFound(port_id=port_id)
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, *args, **kwargs):
return objects.Instance(uuid=FAKE_UUID1)
class InterfaceAttachTestsV21(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
validate_exc = exception.ValidationError
in_use_exc = exc.HTTPConflict
not_found_exc = exc.HTTPNotFound
not_usable_exc = exc.HTTPBadRequest
def setUp(self):
super(InterfaceAttachTestsV21, self).setUp()
self.flags(timeout=30, group='neutron')
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
self.attachments = self.controller_cls()
self.req = fakes.HTTPRequest.blank('')
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=''))
def _test_instance_not_found(self, func, args, mock_get, kwargs=None):
if not kwargs:
kwargs = {}
self.assertRaises(exc.HTTPNotFound, func, self.req, *args, **kwargs)
def test_show_instance_not_found(self):
self._test_instance_not_found(self.attachments.show, ('fake', 'fake'))
def test_index_instance_not_found(self):
self._test_instance_not_found(self.attachments.index, ('fake', ))
def test_detach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.delete,
('fake', 'fake'))
def test_attach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.create, ('fake', ),
kwargs={'body': {'interfaceAttachment': {}}})
def test_show(self):
result = self.attachments.show(self.req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_with_port_not_found(self):
self.assertRaises(exc.HTTPNotFound,
self.attachments.show, self.req, FAKE_UUID2,
FAKE_PORT_ID1)
@mock.patch.object(network_api.API, 'show_port',
side_effect=exception.Forbidden)
def test_show_forbidden(self, show_port_mock):
self.assertRaises(exc.HTTPForbidden,
self.attachments.show, self.req, FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
result = self.attachments.delete(self.req, FAKE_UUID1, FAKE_PORT_ID1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
attach_interfaces_v21.InterfaceAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_detach_interface_instance_locked(self):
def fake_detach_interface_from_locked_server(self, context,
instance, port_id):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stubs.Set(compute_api.API,
'detach_interface',
fake_detach_interface_from_locked_server)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete_interface_not_found(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
self.req,
FAKE_UUID1,
'invaid-port-id')
def test_attach_interface_instance_locked(self):
def fake_attach_interface_to_locked_server(self, context,
instance, network_id, port_id, requested_ip):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stubs.Set(compute_api.API,
'attach_interface',
fake_attach_interface_to_locked_server)
body = {}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_without_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
def test_attach_interface_with_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def _attach_interface_bad_request_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def _attach_interface_not_found_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.assertRaises(self.not_found_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_with_port_and_network_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2
}
}
self._attach_interface_bad_request_case(body)
def test_attach_interface_with_not_found_network_id(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_BAD_NET_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_not_found_port_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_NOT_FOUND_PORT_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_invalid_state(self):
def fake_attach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='attach_interface')
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface_invalid_state)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'attach_interface',
side_effect=NotImplementedError())
def test_attach_interface_with_not_implemented(self, _mock):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_detach_interface_with_invalid_state(self):
def fake_detach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='detach_interface')
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface_invalid_state)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_NET_ID1)
@mock.patch.object(compute_api.API, 'detach_interface',
side_effect=NotImplementedError())
def test_detach_interface_with_not_implemented(self, _mock):
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.delete,
self.req, FAKE_UUID1, FAKE_NET_ID1)
def test_attach_interface_invalid_fixed_ip(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_NET_ID1,
'fixed_ips': [{'ip_address': 'invalid_ip'}]
}
}
self.assertRaises(self.validate_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_fixed_ip_already_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.FixedIpAlreadyInUse(
address='10.0.2.2', instance_uuid=FAKE_UUID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortInUse(
port_id=FAKE_PORT_ID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_not_usable(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortNotUsable(
port_id=FAKE_PORT_ID1,
instance=fake_instance.uuid)
body = {}
self.assertRaises(self.not_usable_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_failed_no_network(self, attach_mock, get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=FAKE_UUID2)
get_mock.return_value = fake_instance
attach_mock.side_effect = (
exception.InterfaceAttachFailedNoNetwork(project_id=FAKE_UUID2))
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body={})
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_no_more_fixed_ips(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.NoMoreFixedIps(
net=FAKE_NET_ID1)
body = {}
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
def _test_attach_interface_with_invalid_parameter(self, param):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {'interface_attachment': param}
self.assertRaises(exception.ValidationError,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_instance_with_non_uuid_net_id(self):
param = {'net_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_uuid_port_id(self):
param = {'port_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_array_fixed_ips(self):
param = {'fixed_ips': 'non_array'}
self._test_attach_interface_with_invalid_parameter(param)
class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
controller_cls = attach_interfaces_v2.InterfaceAttachmentController
validate_exc = exc.HTTPBadRequest
in_use_exc = exc.HTTPBadRequest
def test_attach_interface_instance_with_non_uuid_net_id(self):
pass
def test_attach_interface_instance_with_non_uuid_port_id(self):
pass
def test_attach_interface_instance_with_non_array_fixed_ips(self):
pass
class AttachInterfacesPolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(AttachInterfacesPolicyEnforcementv21, self).setUp()
self.controller = \
attach_interfaces_v21.InterfaceAttachmentController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-attach-interfaces"
self.policy.set_rules({self.rule_name: "project:non_fake"})
def test_index_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_show_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_create_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID, body={})
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_delete_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
| apache-2.0 | 7,825,512,240,032,606,000 | 9,119,616,469,241,473,000 | 40 | 78 | 0.584428 | false |
marcelveldt/plugin.video.flix2kodi | lib/keyring/http.py | 13 | 1255 | """
urllib2.HTTPPasswordMgr object using the keyring, for use with the
urllib2.HTTPBasicAuthHandler.
usage:
import urllib2
handlers = [urllib2.HTTPBasicAuthHandler(PasswordMgr())]
urllib2.install_opener(handlers)
urllib2.urlopen(...)
This will prompt for a password if one is required and isn't already
in the keyring. Then, it adds it to the keyring for subsequent use.
"""
import getpass
from . import get_password, delete_password, set_password
class PasswordMgr(object):
def get_username(self, realm, authuri):
return getpass.getuser()
def add_password(self, realm, authuri, password):
user = self.get_username(realm, authuri)
set_password(realm, user, password)
def find_user_password(self, realm, authuri):
user = self.get_username(realm, authuri)
password = get_password(realm, user)
if password is None:
prompt = 'password for %(user)s@%(realm)s for '\
'%(authuri)s: ' % vars()
password = getpass.getpass(prompt)
set_password(realm, user, password)
return user, password
def clear_password(self, realm, authuri):
user = self.get_username(realm, authuri)
delete_password(realm, user)
| gpl-2.0 | -5,596,698,237,351,113,000 | -9,049,023,399,268,306,000 | 31.179487 | 68 | 0.666932 | false |
jmighion/ansible | lib/ansible/modules/network/nxos/nxos_static_route.py | 23 | 8490 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_static_route
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages static route configuration
description:
- Manages static route configuration
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If no vrf is supplied, vrf is set to default.
- If C(state=absent), the route will be removed, regardless of the
non-required parameters.
options:
prefix:
description:
- Destination prefix of static route.
required: true
next_hop:
description:
- Next hop address or interface of static route.
If interface, it must be the fully-qualified interface name.
required: true
vrf:
description:
- VRF for static route.
required: false
default: default
tag:
description:
- Route tag value (numeric).
required: false
default: null
route_name:
description:
- Name of the route. Used with the name parameter on the CLI.
required: false
default: null
pref:
description:
- Preference or administrative difference of route (range 1-255).
required: false
default: null
state:
description:
- Manage the state of the resource.
required: true
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_static_route:
prefix: "192.168.20.64/24"
next_hop: "3.3.3.3"
route_name: testing
pref: 100
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def reconcile_candidate(module, candidate, prefix):
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
state = module.params['state']
set_command = set_route_command(module, prefix)
remove_command = remove_route_command(module, prefix)
parents = []
commands = []
if module.params['vrf'] == 'default':
config = netcfg.get_section(set_command)
if config and state == 'absent':
commands = [remove_command]
elif not config and state == 'present':
commands = [set_command]
else:
parents = ['vrf context {0}'.format(module.params['vrf'])]
config = netcfg.get_section(parents)
if not isinstance(config, list):
config = config.split('\n')
config = [line.strip() for line in config]
if set_command in config and state == 'absent':
commands = [remove_command]
elif set_command not in config and state == 'present':
commands = [set_command]
if commands:
candidate.add(commands, parents=parents)
def fix_prefix_to_regex(prefix):
prefix = prefix.replace('.', r'\.').replace('/', r'\/')
return prefix
def get_existing(module, prefix, warnings):
key_map = ['tag', 'pref', 'route_name', 'next_hop']
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = 'vrf context {0}'.format(module.params['vrf'])
prefix_to_regex = fix_prefix_to_regex(prefix)
route_regex = r'.*ip\sroute\s{0}\s(?P<next_hop>\S+)(\sname\s(?P<route_name>\S+))?(\stag\s(?P<tag>\d+))?(\s(?P<pref>\d+))?.*'.format(prefix_to_regex)
if module.params['vrf'] == 'default':
config = str(netcfg)
else:
config = netcfg.get_section(parents)
if config:
try:
match_route = re.match(route_regex, config, re.DOTALL)
group_route = match_route.groupdict()
for key in key_map:
if key not in group_route:
group_route[key] = ''
group_route['prefix'] = prefix
group_route['vrf'] = module.params['vrf']
except (AttributeError, TypeError):
group_route = {}
else:
group_route = {}
msg = ("VRF {0} didn't exist.".format(module.params['vrf']))
if msg not in warnings:
warnings.append(msg)
return group_route
def remove_route_command(module, prefix):
return 'no ip route {0} {1}'.format(prefix, module.params['next_hop'])
def set_route_command(module, prefix):
route_cmd = 'ip route {0} {1}'.format(prefix, module.params['next_hop'])
if module.params['route_name']:
route_cmd += ' name {0}'.format(module.params['route_name'])
if module.params['tag']:
route_cmd += ' tag {0}'.format(module.params['tag'])
if module.params['pref']:
route_cmd += ' {0}'.format(module.params['pref'])
return route_cmd
def get_dotted_mask(mask):
bits = 0
for i in range(32-mask, 32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff)))
return mask
def get_network_start(address, netmask):
address = address.split('.')
netmask = netmask.split('.')
return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
def network_from_string(address, mask, module):
octects = address.split('.')
if len(octects) > 4:
module.fail_json(msg='Incorrect address format.', address=address)
for octect in octects:
try:
if int(octect) < 0 or int(octect) > 255:
module.fail_json(msg='Address may contain invalid values.',
address=address)
except ValueError:
module.fail_json(msg='Address may contain non-integer values.',
address=address)
try:
if int(mask) < 0 or int(mask) > 32:
module.fail_json(msg='Incorrect mask value.', mask=mask)
except ValueError:
module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
netmask = get_dotted_mask(int(mask))
return '.'.join(get_network_start(address, netmask))
def normalize_prefix(module, prefix):
splitted_prefix = prefix.split('/')
address = splitted_prefix[0]
if len(splitted_prefix) > 2:
module.fail_json(msg='Incorrect address format.', address=address)
elif len(splitted_prefix) == 2:
mask = splitted_prefix[1]
network = network_from_string(address, mask, module)
normalized_prefix = str(network) + '/' + str(mask)
else:
normalized_prefix = prefix + '/' + str(32)
return normalized_prefix
def main():
argument_spec = dict(
prefix=dict(required=True, type='str'),
next_hop=dict(required=True, type='str'),
vrf=dict(type='str', default='default'),
tag=dict(type='str'),
route_name=dict(type='str'),
pref=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
prefix = normalize_prefix(module, module.params['prefix'])
candidate = CustomNetworkConfig(indent=3)
reconcile_candidate(module, candidate, prefix)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['commands'] = candidate
result['changed'] = True
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,600,086,004,943,110,000 | 8,747,688,105,188,412,000 | 29.539568 | 152 | 0.625088 | false |
golismero/golismero | thirdparty_libs/geopy/format.py | 84 | 2758 | from geopy import units
# Unicode characters for symbols that appear in coordinate strings.
DEGREE = unichr(176)
PRIME = unichr(8242)
DOUBLE_PRIME = unichr(8243)
ASCII_DEGREE = ''
ASCII_PRIME = "'"
ASCII_DOUBLE_PRIME = '"'
LATIN1_DEGREE = chr(176)
HTML_DEGREE = '°'
HTML_PRIME = '′'
HTML_DOUBLE_PRIME = '″'
XML_DECIMAL_DEGREE = '°'
XML_DECIMAL_PRIME = '′'
XML_DECIMAL_DOUBLE_PRIME = '″'
XML_HEX_DEGREE = '&xB0;'
XML_HEX_PRIME = '&x2032;'
XML_HEX_DOUBLE_PRIME = '&x2033;'
ABBR_DEGREE = 'deg'
ABBR_ARCMIN = 'arcmin'
ABBR_ARCSEC = 'arcsec'
DEGREES_FORMAT = "%(degrees)d%(deg)s %(minutes)d%(arcmin)s %(seconds)s%(arcsec)s"
UNICODE_SYMBOLS = {'deg': DEGREE, 'arcmin': PRIME, 'arcsec': DOUBLE_PRIME}
ASCII_SYMBOLS = {'deg': ASCII_DEGREE, 'arcmin': ASCII_PRIME, 'arcsec': ASCII_DOUBLE_PRIME}
LATIN1_SYMBOLS = {'deg': LATIN1_DEGREE, 'arcmin': ASCII_PRIME, 'arcsec': ASCII_DOUBLE_PRIME}
HTML_SYMBOLS = {'deg': HTML_DEGREE, 'arcmin': HTML_PRIME, 'arcsec': HTML_DOUBLE_PRIME}
XML_SYMBOLS = {'deg': XML_DECIMAL_DEGREE, 'arcmin': XML_DECIMAL_PRIME, 'arcsec': XML_DECIMAL_DOUBLE_PRIME}
ABBR_SYMBOLS = {'deg': ABBR_DEGREE, 'arcmin': ABBR_ARCMIN, 'arcsec': ABBR_ARCSEC}
def format_degrees(degrees, format=DEGREES_FORMAT, symbols=ASCII_SYMBOLS):
arcminutes = units.arcminutes(degrees=degrees - int(degrees))
arcseconds = units.arcseconds(arcminutes=arcminutes - int(arcminutes))
format_dict = dict(
symbols,
degrees=degrees,
minutes=abs(arcminutes),
seconds=abs(arcseconds)
)
return format % format_dict
DISTANCE_FORMAT = "%(magnitude)s%(unit)s"
DISTANCE_UNITS = {
'km': lambda d: d,
'm': lambda d: units.meters(kilometers=d),
'mi': lambda d: units.miles(kilometers=d),
'ft': lambda d: units.feet(kilometers=d),
'nm': lambda d: units.nautical(kilometers=d),
'nmi': lambda d: units.nautical(kilometers=d)
}
def format_distance(kilometers, format=DISTANCE_FORMAT, unit='km'):
magnitude = DISTANCE_UNITS[unit](kilometers)
return format % {'magnitude': magnitude, 'unit': unit}
_DIRECTIONS = [
('north', 'N'),
('north by east', 'NbE'),
('north-northeast', 'NNE'),
('northeast by north', 'NEbN'),
('northeast', 'NE'),
('northeast by east', 'NEbE'),
('east-northeast', 'ENE'),
('east by north', 'EbN'),
('east', 'E'),
('east by south', 'EbS'),
('east-southeast', 'ESE'),
('southeast by east', 'SEbE'),
('southeast', 'SE'),
('southeast by south', 'SEbS'),
]
DIRECTIONS, DIRECTIONS_ABBR = zip(*_DIRECTIONS)
ANGLE_DIRECTIONS = dict((n * 11.25, d) for n, d in enumerate(DIRECTIONS))
ANGLE_DIRECTIONS_ABBR = dict((n * 11.25, d) for n, d in enumerate(DIRECTIONS_ABBR))
def format_direction(degrees):
pass | gpl-2.0 | -8,377,402,419,444,308,000 | -4,374,378,924,851,149,000 | 34.831169 | 106 | 0.655185 | false |
matmutant/sl4a | python/src/Demo/xml/roundtrip.py | 37 | 1201 | """
A simple demo that reads in an XML document and spits out an equivalent,
but not necessarily identical, document.
"""
import sys, string
from xml.sax import saxutils, handler, make_parser
# --- The ContentHandler
class ContentGenerator(handler.ContentHandler):
def __init__(self, out = sys.stdout):
handler.ContentHandler.__init__(self)
self._out = out
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="iso-8859-1"?>\n')
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s="%s"' % (name, saxutils.escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def characters(self, content):
self._out.write(saxutils.escape(content))
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
# --- The main program
parser = make_parser()
parser.setContentHandler(ContentGenerator())
parser.parse(sys.argv[1])
| apache-2.0 | -6,420,708,665,451,836,000 | -7,732,349,964,583,107,000 | 25.688889 | 72 | 0.636969 | false |
mobify/python-driver | cassandra/decoder.py | 10 | 1632 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import warnings
import cassandra.query
import logging
log = logging.getLogger(__name__)
_have_warned = False
def warn_once(f):
@wraps(f)
def new_f(*args, **kwargs):
global _have_warned
if not _have_warned:
msg = "cassandra.decoder.%s has moved to cassandra.query.%s" % (f.__name__, f.__name__)
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
_have_warned = True
return f(*args, **kwargs)
return new_f
tuple_factory = warn_once(cassandra.query.tuple_factory)
"""
Deprecated: use :meth:`cassandra.query.tuple_factory()`
"""
named_tuple_factory = warn_once(cassandra.query.named_tuple_factory)
"""
Deprecated: use :meth:`cassandra.query.named_tuple_factory()`
"""
dict_factory = warn_once(cassandra.query.dict_factory)
"""
Deprecated: use :meth:`cassandra.query.dict_factory()`
"""
ordered_dict_factory = warn_once(cassandra.query.ordered_dict_factory)
"""
Deprecated: use :meth:`cassandra.query.ordered_dict_factory()`
"""
| apache-2.0 | -8,637,745,282,531,957,000 | 3,086,064,962,202,758,000 | 27.137931 | 99 | 0.702819 | false |
abhijitbangera/ecommerce | src/products/models.py | 1 | 4589 | from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from django.utils.text import slugify
from django.utils.safestring import mark_safe
# Create your models here.
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self,*args,**kwargs):
return self.get_queryset().active()
def get_related(self,instance):
products_one=self.get_queryset().filter(categories__in=instance.categories.all())
products_two=self.get_queryset().filter(default=instance.default)
qs=(products_one|products_two).exclude(id=instance.id).distinct()
return qs
class Product(models.Model):
title =models.CharField(max_length=120)
description=models.TextField(blank=True,null=True)
price=models.DecimalField(decimal_places=2,max_digits=20)
active=models.BooleanField(default=True)
categories=models.ManyToManyField('Category',blank=True)
default=models.ForeignKey('Category',related_name='default_category',null=True,blank=True)
objects=ProductManager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("product_details",kwargs={"pk":self.pk})
# def get_image_url(self):
# img=self.productimage_set.first()
# if img:
# return img.image.url
# return img
#slug
#inventory
class Variation(models.Model):
product=models.ForeignKey(Product)
title=models.CharField(max_length=120)
price=models.DecimalField(decimal_places=2,max_digits=20)
sale_price=models.DecimalField(decimal_places=2,max_digits=20,null=True,blank=True)
active=models.BooleanField(default=True)
inventory=models.IntegerField(null=True,blank=True) #refers none== to unlimted amount
def __str__(self):
return self.title
def get_price(self):
if self.sale_price is not None:
return self.sale_price
else:
return self.price
def get_html_price(self):
if self.sale_price is not None:
html_text="<span class='sale-price'>%s</span> <span class='og-price'>%s</span>" %(self.sale_price,self.price)
else:
html_text="<span class='price'>%s</span>" %(self.price)
return mark_safe(html_text)
def get_absolute_url(self):
return self.product.get_absolute_url()
def add_to_cart(self):
return "%s?item=%s&qty=1" %(reverse("cart"),self.id)
def remove_from_cart(self):
return "%s?item=%s&qty=1&delete=True" %(reverse("cart"),self.id)
def product_post_saved_receiver(sender,instance,created,*args,**kwargs):
print(sender)
product=instance
variations=product.variation_set.all() #variations=Variation.objects.filter(product=Product)
if variations.count()==0:
new_var=Variation()
new_var.product=product
new_var.title="Default"
new_var.price=product.price
new_var.save()
print(created)
post_save.connect(product_post_saved_receiver,sender=Product)
def image_upload_to(instance,filename):
title=instance.product.title
slug=slugify(title)
file_extension=filename.split(".")[1]
new_filename="%s.%s" %(instance.id,file_extension)
return "products/%s/%s" %(slug,new_filename)
#Product Image
class ProductImage(models.Model):
product=models.ForeignKey(Product)
image=models.ImageField(upload_to=image_upload_to)
def __str__(self):
return self.product.title
class Category(models.Model):
title=models.CharField(max_length=120,unique=True)
slug=models.SlugField(unique=True)
description=models.TextField(null=True,blank=True)
active=models.BooleanField(default=True)
timestamp=models.DateTimeField(auto_now_add=True,auto_now=False)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("category_detail",kwargs={"slug": self.slug})
def image_upload_to_featured(instance,filename):
title=instance.product.title
slug=slugify(title)
file_extension=filename.split(".")[1]
new_filename="%s.%s" %(instance.id,file_extension)
return "products/%s/featured/%s" %(slug,new_filename)
class ProductFeatured(models.Model):
product=models.ForeignKey(Product)
image=models.ImageField(upload_to=image_upload_to_featured)
title=models.CharField(max_length=120,null=True,blank=True)
text=models.CharField(max_length=220,null=True,blank=True)
text_right=models.BooleanField(default=False)
text_css_color=models.CharField(max_length=6,null=True,blank=True)
show_price=models.BooleanField(default=False)
make_image_background=models.BooleanField(default=False)
active=models.BooleanField(default=True)
def __str__(self):
return self.product.title | mit | -7,034,631,537,040,636,000 | -1,573,012,372,804,171,500 | 31.323944 | 112 | 0.755066 | false |
elijah513/django | django/core/serializers/pyyaml.py | 439 | 2843 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
| bsd-3-clause | 1,094,514,417,292,426,100 | -6,020,783,172,976,550,000 | 32.845238 | 98 | 0.704537 | false |
suiyuan2009/tensorflow | tensorflow/python/debug/examples/debug_errors.py | 150 | 2655 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of debugging TensorFlow runtime errors using tfdbg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
ph_float = tf.placeholder(tf.float32, name="ph_float")
x = tf.transpose(ph_float, name="x")
v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v")
m = tf.constant(
np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]),
dtype=tf.float32,
name="m")
y = tf.matmul(m, x, name="y")
z = tf.matmul(m, v, name="z")
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
if FLAGS.error == "shape_mismatch":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])}))
elif FLAGS.error == "uninitialized_variable":
print(sess.run(z))
elif FLAGS.error == "no_error":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])}))
else:
raise ValueError("Unrecognized error type: " + FLAGS.error)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--error",
type=str,
default="shape_mismatch",
help="""\
Type of the error to generate (shape_mismatch | uninitialized_variable |
no_error).\
""")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 8,469,840,889,044,540,000 | 9,129,050,204,459,562,000 | 32.1875 | 80 | 0.636535 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/shelve.py | 34 | 8527 | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections.abc
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.abc.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.abc.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
if self.dict is None:
return
try:
self.sync()
try:
self.dict.close()
except AttributeError:
pass
finally:
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except:
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
| apache-2.0 | 5,951,117,210,223,876,000 | 3,137,299,233,687,346,000 | 34.090535 | 80 | 0.628826 | false |
837468220/python-for-android | python-modules/twisted/twisted/test/test_doc.py | 99 | 3683 | from twisted.trial import unittest
import inspect, glob, os
from os import path
from twisted.python import reflect
import twisted
def errorInFile(f, line=17, name=''):
"""Return a filename formatted so emacs will recognize it as an error point
@param line: Line number in file. Defaults to 17 because that's about how
long the copyright headers are.
"""
return '%s:%d:%s' % (f, line, name)
# return 'File "%s", line %d, in %s' % (f, line, name)
class DocCoverage(unittest.TestCase):
def setUp(self):
remove = len(os.path.dirname(os.path.dirname(twisted.__file__)))+1
def visit(dirlist, directory, files):
if '__init__.py' in files:
d = directory[remove:].replace('/','.')
dirlist.append(d)
self.packageNames = []
os.path.walk(os.path.dirname(twisted.__file__),
visit, self.packageNames)
def testModules(self):
"""Looking for docstrings in all modules."""
docless = []
for packageName in self.packageNames:
if packageName in ('twisted.test',):
# because some stuff in here behaves oddly when imported
continue
try:
package = reflect.namedModule(packageName)
except ImportError, e:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
docless.extend(self.modulesInPackage(packageName, package))
self.failIf(docless, "No docstrings in module files:\n"
"%s" % ('\n'.join(map(errorInFile, docless)),))
def modulesInPackage(self, packageName, package):
docless = []
directory = path.dirname(package.__file__)
for modfile in glob.glob(path.join(directory, '*.py')):
moduleName = inspect.getmodulename(modfile)
if moduleName == '__init__':
# These are tested by test_packages.
continue
elif moduleName in ('spelunk_gnome','gtkmanhole'):
# argh special case pygtk evil argh. How does epydoc deal
# with this?
continue
try:
module = reflect.namedModule('.'.join([packageName,
moduleName]))
except Exception, e:
# print moduleName, "misbehaved:", e
pass
else:
if not inspect.getdoc(module):
docless.append(modfile)
return docless
def testPackages(self):
"""Looking for docstrings in all packages."""
docless = []
for packageName in self.packageNames:
try:
package = reflect.namedModule(packageName)
except Exception, e:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
if not inspect.getdoc(package):
docless.append(package.__file__.replace('.pyc','.py'))
self.failIf(docless, "No docstrings for package files\n"
"%s" % ('\n'.join(map(errorInFile, docless),)))
# This test takes a while and doesn't come close to passing. :(
testModules.skip = "Activate me when you feel like writing docstrings, and fixing GTK crashing bugs."
| apache-2.0 | -6,734,098,438,976,674,000 | 7,503,486,755,985,714,000 | 39.032609 | 105 | 0.547108 | false |
tencrance/profiling | profiling/viewer.py | 5 | 20846 | # -*- coding: utf-8 -*-
"""
profiling.viewer
~~~~~~~~~~~~~~~~
A text user interface application which inspects statistics. To run it
easily do:
.. sourcecode:: console
$ python -m profiling view SOURCE
::
viewer = StatisticsViewer()
loop = viewer.loop()
loop.run()
"""
from __future__ import absolute_import
from collections import deque
import urwid
from urwid import connect_signal as on
from . import sortkeys
__all__ = ['StatisticsTable', 'StatisticsViewer']
class Formatter(object):
def _markup(get_string, get_attr=None):
def markup(self, *args, **kwargs):
string = get_string(self, *args, **kwargs)
if get_attr is None:
return string
attr = get_attr(self, *args, **kwargs)
return (attr, string)
return markup
_numeric = {'align': 'right', 'wrap': 'clip'}
def _make_text(get_markup, **text_kwargs):
def make_text(self, *args, **kwargs):
markup = get_markup(self, *args, **kwargs)
return urwid.Text(markup, **text_kwargs)
return make_text
# percent
def format_percent(self, ratio, denom=1):
try:
ratio /= denom
except ZeroDivisionError:
ratio = 0
ratio = round(ratio, 4)
if ratio >= 1:
precision = 0
elif ratio >= 0.1:
precision = 1
else:
precision = 2
return ('{:.' + str(precision) + '%}').format(ratio)
def attr_ratio(self, ratio, denom=1):
try:
ratio /= denom
except ZeroDivisionError:
ratio = 0
if ratio > 0.9:
return 'danger'
elif ratio > 0.7:
return 'caution'
elif ratio > 0.3:
return 'warning'
elif ratio > 0.1:
return 'notice'
elif ratio <= 0:
return 'zero'
markup_percent = _markup(format_percent, attr_ratio)
make_percent_text = _make_text(markup_percent, **_numeric)
# int
def format_int(self, num):
return '{:.0f}'.format(num)
def attr_int(self, num):
return None if num else 'zero'
markup_int = _markup(format_int, attr_int)
make_int_text = _make_text(markup_int, **_numeric)
# int or n/a
def format_int_or_na(self, num):
if not num:
return 'n/a'
return self.format_int(num)
markup_int_or_na = _markup(format_int_or_na, attr_int)
make_int_or_na_text = _make_text(markup_int_or_na, **_numeric)
# time
def format_time(self, sec):
if sec == 0:
return '0'
elif sec < 1:
return '{:,.0f}'.format(sec * 1e6).replace(',', '.')
else:
return '{:.2f}s'.format(sec)
def attr_time(self, sec):
if sec == 0:
return 'zero'
elif sec < 1:
return 'usec'
else:
return 'sec'
markup_time = _markup(format_time, attr_time)
make_time_text = _make_text(markup_time, **_numeric)
# stat
def markup_stat(self, stat):
if stat.name:
loc = '({0}:{1})'.format(stat.module or stat.filename, stat.lineno)
return [('name', stat.name), ' ', ('loc', loc)]
else:
return ('loc', stat.module or stat.filename)
make_stat_text = _make_text(markup_stat, wrap='clip')
del _markup
del _make_text
fmt = Formatter()
class StatWidget(urwid.TreeWidget):
signals = ['expanded', 'collapsed']
icon_chars = ('+', '-', ' ') # collapsed, expanded, leaf
def __init__(self, node):
super(StatWidget, self).__init__(node)
self._w = urwid.AttrWrap(self._w, None, StatisticsViewer.focus_map)
@property
def expanded(self):
return self._expanded
@expanded.setter
def expanded(self, expanded):
in_init = not hasattr(self, 'expanded')
self._expanded = expanded
if in_init:
return
if expanded:
urwid.emit_signal(self, 'expanded')
else:
urwid.emit_signal(self, 'collapsed')
def selectable(self):
return True
def load_inner_widget(self):
node = self.get_node()
stat = node.get_value()
stats = node.get_root().get_value()
return StatisticsTable.make_columns([
fmt.make_stat_text(stat),
fmt.make_percent_text(stat.total_time, stats.cpu_time),
fmt.make_percent_text(stat.own_time, stats.cpu_time),
fmt.make_int_or_na_text(stat.calls),
fmt.make_time_text(stat.total_time),
fmt.make_time_text(stat.total_time_per_call),
fmt.make_time_text(stat.own_time),
fmt.make_time_text(stat.own_time_per_call)])
def get_indented_widget(self):
icon = self.get_mark()
widget = self.get_inner_widget()
node = self.get_node()
widget = urwid.Columns([('fixed', 1, icon), widget], 1)
indent = (node.get_depth() - 1)
widget = urwid.Padding(widget, left=indent)
return widget
def get_mark(self):
if self.is_leaf:
char = self.icon_chars[2]
else:
char = self.icon_chars[int(self.expanded)]
return urwid.SelectableIcon(('mark', char), 0)
def update_mark(self):
widget = self._w.base_widget
try:
widget.widget_list[0] = self.get_mark()
except (AttributeError, TypeError):
pass
def update_expanded_icon(self):
self.update_mark()
def expand(self):
self.expanded = True
self.update_mark()
def collapse(self):
self.expanded = False
self.update_mark()
def keypress(self, size, key):
command = self._command_map[key]
if command == urwid.ACTIVATE:
key = '-' if self.expanded else '+'
elif command == urwid.CURSOR_RIGHT:
key = '+'
elif self.expanded and command == urwid.CURSOR_LEFT:
key = '-'
return super(StatWidget, self).keypress(size, key)
class EmptyWidget(urwid.Widget):
"""A widget which doesn't render anything."""
def __init__(self, rows=0):
super(EmptyWidget, self).__init__()
self._rows = rows
def rows(self, size, focus=False):
return self._rows
def render(self, size, focus=False):
return urwid.SolidCanvas(' ', size[0], self.rows(size, focus))
class StatisticsWidget(StatWidget):
def load_inner_widget(self):
return EmptyWidget()
def get_indented_widget(self):
return self.get_inner_widget()
def get_mark(self):
raise TypeError('Statistics widget has no mark')
def update(self):
pass
def unexpand(self):
pass
class StatNodeBase(urwid.TreeNode):
def __init__(self, stat=None, parent=None, key=None, depth=None,
table=None):
super(StatNodeBase, self).__init__(stat, parent, key, depth)
self.table = table
def get_focus(self):
widget, focus = super(StatNodeBase, self).get_focus()
if self.table is not None:
self.table.walker.set_focus(self)
return widget, focus
def get_widget(self, reload=False):
if self._widget is None or reload:
self._widget = self.load_widget()
self.setup_widget(self._widget)
return self._widget
def load_widget(self):
return self._widget_class(self)
def setup_widget(self, widget):
if self.table is None:
return
stat = self.get_value()
if hash(stat) in self.table._expanded_stat_hashes:
widget.expand()
class NullStatWidget(StatWidget):
def __init__(self, node):
urwid.TreeWidget.__init__(self, node)
def get_indented_widget(self):
widget = urwid.Text(('weak', '- Not Available -'), align='center')
widget = urwid.Filler(widget)
widget = urwid.BoxAdapter(widget, 3)
return widget
class NullStatNode(StatNodeBase):
_widget_class = NullStatWidget
class LeafStatNode(StatNodeBase):
_widget_class = StatWidget
class StatNode(StatNodeBase, urwid.ParentNode):
def total_usage(self):
stat = self.get_value()
stats = self.get_root().get_value()
try:
return stat.total_time / stats.cpu_time
except AttributeError:
return 0.0
def load_widget(self):
if self.is_root():
widget_class = StatisticsWidget
else:
widget_class = StatWidget
widget = widget_class(self)
widget.collapse()
return widget
def setup_widget(self, widget):
super(StatNode, self).setup_widget(widget)
if self.get_depth() == 0:
# just expand the root node
widget.expand()
return
table = self.table
if table is None:
return
on(widget, 'expanded', table._widget_expanded, widget)
on(widget, 'collapsed', table._widget_collapsed, widget)
def load_child_keys(self):
stat = self.get_value()
if stat is None:
return ()
return stat.sorted(self.table.order)
def load_child_node(self, stat):
depth = self.get_depth() + 1
node_class = StatNode if len(stat) else LeafStatNode
return node_class(stat, self, stat, depth, self.table)
class StatisticsListBox(urwid.TreeListBox):
signals = ['focus_changed']
def change_focus(self, *args, **kwargs):
super(StatisticsListBox, self).change_focus(*args, **kwargs)
focus = self.get_focus()
urwid.emit_signal(self, 'focus_changed', focus)
class StatisticsWalker(urwid.TreeWalker):
signals = ['focus_changed']
def set_focus(self, focus):
super(StatisticsWalker, self).set_focus(focus)
urwid.emit_signal(self, 'focus_changed', focus)
class StatisticsTable(urwid.WidgetWrap):
#: The column declarations.
columns = [
# name, align, width, order
('FUNCTION', 'left', ('weight', 1), sortkeys.by_function),
('TOTAL%', 'right', (6,), None),
('OWN%', 'right', (6,), None),
('CALLS', 'right', (6,), sortkeys.by_calls),
('TOTAL', 'right', (10,), sortkeys.by_total_time),
('/CALL', 'right', (6,), sortkeys.by_total_time_per_call),
('OWN', 'right', (10,), sortkeys.by_own_time),
('/CALL', 'right', (6,), sortkeys.by_own_time_per_call),
]
#: The initial order.
order = sortkeys.by_total_time
#: Whether the viewer is active.
active = False
#: Whether the viewer is paused.
paused = False
title = None
stats = None
time = None
def __init__(self):
cls = type(self)
self._expanded_stat_hashes = set()
self.walker = StatisticsWalker(NullStatNode())
on(self.walker, 'focus_changed', self._walker_focus_changed)
tbody = StatisticsListBox(self.walker)
thead = urwid.AttrMap(cls.make_columns([
urwid.Text(name, align, 'clip')
for name, align, __, __ in self.columns
]), None)
header = urwid.Columns([])
widget = urwid.Frame(tbody, urwid.Pile([header, thead]))
super(StatisticsTable, self).__init__(widget)
self.update_frame()
@classmethod
def make_columns(cls, column_widgets):
widget_list = []
widths = (width for __, __, width, __ in cls.columns)
for width, widget in zip(widths, column_widgets):
widget_list.append(width + (widget,))
return urwid.Columns(widget_list, 1)
@property
def tbody(self):
return self._w.body
@tbody.setter
def tbody(self, body):
self._w.body = body
@property
def thead(self):
return self._w.header.contents[1][0]
@thead.setter
def thead(self, thead):
self._w.header.contents[1] = (thead, ('pack', None))
@property
def header(self):
return self._w.header.contents[0][0]
@header.setter
def header(self, header):
self._w.header.contents[0] = (header, ('pack', None))
@property
def footer(self):
return self._w.footer
@footer.setter
def footer(self, footer):
self._w.footer = footer
def get_focus(self):
return self.tbody.get_focus()
def set_focus(self, focus):
self.tbody.set_focus(focus)
def get_path(self):
"""Gets the path to the focused statistic. Each step is a hash of
statistic object.
"""
path = deque()
__, node = self.get_focus()
while not node.is_root():
stat = node.get_value()
path.appendleft(hash(stat))
node = node.get_parent()
return path
def find_node(self, node, path):
"""Finds a node by the given path from the given node."""
for hash_value in path:
if isinstance(node, LeafStatNode):
break
for stat in node.get_child_keys():
if hash(stat) == hash_value:
node = node.get_child_node(stat)
break
else:
break
return node
def get_stats(self):
return self.stats
def set_stats(self, stats, title=None, time=None):
self.stats = stats
self.title = title
self.time = time
if not self.paused:
self.activate()
self.refresh()
def sort_stats(self, order=sortkeys.by_total_time):
assert callable(order)
self.order = order
self.refresh()
def shift_order(self, delta):
orders = [order for __, __, __, order in self.columns if order]
x = orders.index(self.order)
order = orders[(x + delta) % len(orders)]
self.sort_stats(order)
def pause(self):
self.paused = True
self.update_frame()
def resume(self):
self.paused = False
try:
stats, title, time = self._pending
except AttributeError:
self.activate()
else:
del self._pending
self.set_stats(stats, title, time)
def activate(self):
self.active = True
self.update_frame()
def inactivate(self):
self.active = False
self.update_frame()
def refresh(self):
stats = self.get_stats()
node = StatNode(stats, table=self)
path = self.get_path()
node = self.find_node(node, path)
self.set_focus(node)
def update_frame(self, focus=None):
# set thead attr
if self.paused:
thead_attr = 'thead.paused'
elif not self.active:
thead_attr = 'thead.inactive'
else:
thead_attr = 'thead'
self.thead.set_attr_map({None: thead_attr})
# set sorting column in thead attr
for x, (__, __, __, order) in enumerate(self.columns):
attr = thead_attr + '.sorted' if order is self.order else None
widget = self.thead.base_widget.contents[x][0]
text, __ = widget.get_text()
widget.set_text((attr, text))
if self.paused:
return
# update header
stats = self.get_stats()
if stats is None:
return
title = self.title
time = self.time
if title or time:
if time is not None:
time_string = '{:%H:%M:%S}'.format(time)
if title and time:
markup = [('weak', title), ' ', time_string]
elif title:
markup = title
else:
markup = time_string
meta_info = urwid.Text(markup, align='right')
else:
meta_info = None
fraction_string = '({0}/{1})'.format(
fmt.format_time(stats.cpu_time),
fmt.format_time(stats.wall_time))
cpu_info = urwid.Text([
'CPU ', fmt.markup_percent(stats.cpu_usage),
' ', ('weak', fraction_string)])
# set header columns
col_opts = ('weight', 1, False)
self.header.contents = \
[(w, col_opts) for w in [cpu_info, meta_info] if w]
def focus_hotspot(self, size):
widget, __ = self.tbody.get_focus()
while widget:
node = widget.get_node()
widget.expand()
widget = widget.first_child()
self.tbody.change_focus(size, node)
def defocus(self):
__, node = self.get_focus()
self.set_focus(node.get_root())
def keypress(self, size, key):
base = super(StatisticsTable, self)
command = self._command_map[key]
if key == ']':
self.shift_order(+1)
return True
elif key == '[':
self.shift_order(-1)
return True
elif key == '>':
self.focus_hotspot(size)
return True
elif command == self._command_map['esc']:
self.defocus()
return True
elif command == self._command_map['right']:
widget, node = self.tbody.get_focus()
if widget.expanded:
heavy_widget = widget.first_child()
if heavy_widget is not None:
heavy_node = heavy_widget.get_node()
self.tbody.change_focus(size, heavy_node)
return True
elif command == self._command_map['left']:
widget, node = self.tbody.get_focus()
if not widget.expanded:
parent_node = node.get_parent()
if not parent_node.is_root():
self.tbody.change_focus(size, parent_node)
return True
elif command == self._command_map[' ']:
if self.paused:
self.resume()
else:
self.pause()
return True
return base.keypress(size, key)
# signal handlers
def _walker_focus_changed(self, focus):
self.update_frame(focus)
def _widget_expanded(self, widget):
stat = widget.get_node().get_value()
self._expanded_stat_hashes.add(hash(stat))
def _widget_collapsed(self, widget):
stat = widget.get_node().get_value()
self._expanded_stat_hashes.discard(hash(stat))
class StatisticsViewer(object):
weak_color = 'light green'
palette = [
('weak', weak_color, ''),
('focus', 'standout', '', 'standout'),
# ui
('thead', 'dark cyan, standout', '', 'standout'),
('thead.paused', 'dark red, standout', '', 'standout'),
('thead.inactive', 'brown, standout', '', 'standout'),
('mark', 'dark cyan', ''),
# risk
('danger', 'dark red', '', 'blink'),
('caution', 'light red', '', 'blink'),
('warning', 'brown', '', 'blink'),
('notice', 'dark green', '', 'blink'),
# clock
('sec', 'brown', ''),
('msec', 'dark green', ''),
('usec', '', ''),
# etc
('zero', weak_color, ''),
('name', 'bold', ''),
('loc', 'dark blue', ''),
]
# add thead.*.sorted palette entries
for entry in palette[:]:
attr = entry[0]
if attr is None or not attr.startswith('thead'):
continue
palette.append((attr + '.sorted', entry[1] + ', underline',
entry[2], entry[3] + ', underline'))
focus_map = {None: 'focus'}
focus_map.update((x[0], 'focus') for x in palette)
def unhandled_input(self, key):
if key in ('q', 'Q'):
raise urwid.ExitMainLoop()
def __init__(self):
self.table = StatisticsTable()
self.widget = urwid.Padding(self.table, right=1)
def loop(self, *args, **kwargs):
kwargs.setdefault('unhandled_input', self.unhandled_input)
loop = urwid.MainLoop(self.widget, self.palette, *args, **kwargs)
return loop
def set_stats(self, stats, title=None, time=None):
self.table.set_stats(stats, title, time)
def activate(self):
return self.table.activate()
def inactivate(self):
return self.table.inactivate()
def use_vim_command_map(self):
urwid.command_map['h'] = urwid.command_map['left']
urwid.command_map['j'] = urwid.command_map['down']
urwid.command_map['k'] = urwid.command_map['up']
urwid.command_map['l'] = urwid.command_map['right']
def use_game_command_map(self):
urwid.command_map['a'] = urwid.command_map['left']
urwid.command_map['s'] = urwid.command_map['down']
urwid.command_map['w'] = urwid.command_map['up']
urwid.command_map['d'] = urwid.command_map['right']
| bsd-3-clause | 4,430,965,880,251,224,000 | 2,140,637,383,048,723,500 | 27.912621 | 79 | 0.548115 | false |
bradleyhd/netsim | nodes_vs_routing_speed.py | 1 | 2878 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
def exponential(x, a, b, c):
return a * x**b + c
fig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k')
xs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]]
ys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]]
ys = np.array(ys) * 1000
def graph(i, label, color, marker, l_marker):
y = np.array(ys[i])
x = np.array(xs[i])
xl = np.linspace(np.min(x), np.max(x), 500)
popt, pcov = curve_fit(exponential, x, y)
plt.scatter(x, y, label=label, color=color, marker=marker)
plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker)
blue = '#5738FF'
purple = '#E747E7'
orange = '#E7A725'
green = '#A1FF47'
red = '#FF1E43'
gray = '#333333'
white = 'w'
graph(0, 'EDS5 - original graph', red, 'o', '--')
graph(1, 'N5 - original graph', purple, 's', '--')
graph(2, 'EDS5 - decision graph', blue, '^', '--')
graph(3, 'N5 - decision graph', white, 'D', '--')
ax = fig.gca()
plt.title('Effects of Node Ordering on Routing Speed', color=white)
plt.xlabel('Effective $\\vert V\/\\vert$')
plt.ylabel('Routing Time (ms)')
plt.axes().set_axis_bgcolor('black')
ax.xaxis.label.set_color(white)
ax.yaxis.label.set_color(white)
ax.tick_params(axis='x', colors=white)
ax.tick_params(axis='y', colors=white)
ax.spines['bottom'].set_color(white)
ax.spines['top'].set_color(white)
ax.spines['left'].set_color(white)
ax.spines['right'].set_color(white)
legend = plt.legend(loc=0, numpoints=1, framealpha=0.0)
legend.get_frame().set_facecolor('k')
max_x = np.max(np.array(xs))
max_y = np.max(np.array(ys))
min_x = np.min(np.array(xs))
min_y = 0 - (max_y * 0.01)
min_x = 0 - (max_x * 0.01)
max_x *= 1.01
max_y *= 1.01
plt.axes().set_xlim([min_x, max_x])
plt.axes().set_ylim([min_y, max_y])
for text in legend.get_texts():
text.set_color(white)
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.savefig('nodes_vs_routing_speed.png', transparent=True)
#plt.show() | gpl-3.0 | -6,620,598,401,275,449,000 | 336,851,470,793,031,230 | 34.9875 | 649 | 0.69458 | false |
marcydoty/geraldo | site/newsite/site-geraldo/appengine_django/serializer/python.py | 20 | 4799 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Python "serializer", based on the default Django python serializer.
The only customisation is in the deserialization process which needs to take
special care to resolve the name and parent attributes of the key for each
entity and also recreate the keys for any references appropriately.
"""
from django.conf import settings
from django.core.serializers import base
from django.core.serializers import python
from django.db import models
from google.appengine.api import datastore_types
from google.appengine.ext import db
from django.utils.encoding import smart_unicode
Serializer = python.Serializer
class FakeParent(object):
"""Fake parent 'model' like object.
This class exists to allow a parent object to be provided to a new model
without having to load the parent instance itself.
"""
def __init__(self, parent_key):
self._entity = parent_key
def Deserializer(object_list, **options):
"""Deserialize simple Python objects back into Model instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
models.get_apps()
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = python._get_model(d["model"])
data = {}
key = resolve_key(Model._meta.module_name, d["pk"])
if key.name():
data["key_name"] = key.name()
parent = None
if key.parent():
parent = FakeParent(key.parent())
m2m_data = {}
# Handle each field
for (field_name, field_value) in d["fields"].iteritems():
if isinstance(field_value, str):
field_value = smart_unicode(
field_value, options.get("encoding",
settings.DEFAULT_CHARSET),
strings_only=True)
field = Model.properties()[field_name]
if isinstance(field, db.Reference):
# Resolve foreign key references.
data[field.name] = resolve_key(Model._meta.module_name, field_value)
if not data[field.name].name():
raise base.DeserializationError(u"Cannot load Reference with "
"unnamed key: '%s'" % field_value)
else:
data[field.name] = field.validate(field_value)
# Create the new model instance with all it's data, but no parent.
object = Model(**data)
# Now add the parent into the hidden attribute, bypassing the type checks
# in the Model's __init__ routine.
object._parent = parent
# When the deserialized object is saved our replacement DeserializedObject
# class will set object._parent to force the real parent model to be loaded
# the first time it is referenced.
yield base.DeserializedObject(object, m2m_data)
def resolve_key(model, key_data):
"""Creates a Key instance from a some data.
Args:
model: The name of the model this key is being resolved for. Only used in
the fourth case below (a plain key_name string).
key_data: The data to create a key instance from. May be in four formats:
* The str() output of a key instance. Eg. A base64 encoded string.
* The repr() output of a key instance. Eg. A string for eval().
* A list of arguments to pass to db.Key.from_path.
* A single string value, being the key_name of the instance. When this
format is used the resulting key has no parent, and is for the model
named in the model parameter.
Returns:
An instance of db.Key. If the data cannot be used to create a Key instance
an error will be raised.
"""
if isinstance(key_data, list):
# The key_data is a from_path sequence.
return db.Key.from_path(*key_data)
elif isinstance(key_data, basestring):
if key_data.find("from_path") != -1:
# key_data is encoded in repr(key) format
return eval(key_data)
else:
try:
# key_data encoded a str(key) format
return db.Key(key_data)
except datastore_types.datastore_errors.BadKeyError, e:
# Final try, assume it's a plain key name for the model.
return db.Key.from_path(model, key_data)
else:
raise base.DeserializationError(u"Invalid key data: '%s'" % key_data)
| lgpl-3.0 | -646,229,918,262,810,800 | -2,609,397,152,650,659,300 | 35.915385 | 79 | 0.684309 | false |
nnikhilsingh/terrence | tao/caching.py | 1 | 20893 | # -*- coding: utf-8 -*-
import os
import time
import hashlib
import gzip
import bz2
import re
from sqlalchemy.orm.exc import NoResultFound
from tao.database import SearchEngineResultsPage
from tao.parsing import parse_serp
from tao.outputconverter import store_serp_result
import logging
"""
GoogleScraper is a complex application and thus searching is error prone. While developing,
you may need to repeat the same searches several times and you might end up being banned by
the search engine providers. This is why all searches are chached by default.
Every SERP page is cached in a separate file. In the future, it might be more straightforward to
cache scraping jobs in archives (zip files).
What determines the uniqueness of a SERP result?
- The complete url (because in URLs search queries and params are included)
- The scrape mode: Raw Http might request different resources than a browser.
- Optionally the http headers (because different User-Agents yield different results)
Using these three pieces of information would guarantee that we cache only unique requests,
but then we couldn't read back the information of the cache files, since these parameters
are only available at runtime of the scrapers. So we have to be satisfied with the
keyword, search_engine and scrapemode as identifying params.
How does caching work on a higher level?
Assume the user interrupted his scrape job at 1000/2000 keywords and there remain
quite some keywords to scrape for. Then the previously parsed 1000 results are already
stored in the database and shouldn't be added a second time.
"""
logger = logging.getLogger(__name__)
ALLOWED_COMPRESSION_ALGORITHMS = ('gz', 'bz2')
class InvalidConfigurationFileException(Exception):
"""
Used when the cache module cannot
determine the kind (compression for instance) of a
configuration file
"""
pass
class CompressedFile(object):
"""Read and write the data of a compressed file.
Used to cache files for tao.s
Supported algorithms: gz, bz2
>>> import os
>>> f = CompressedFile('/tmp/test.txt', algorithm='gz')
>>> f.write('hello world')
>>> assert os.path.exists('/tmp/test.txt.gz')
>>> f2 = CompressedFile('/tmp/test.txt.gz', algorithm='gz')
>>> assert f2.read() == 'hello world'
"""
def __init__(self, path, algorithm='gz'):
"""Create a new compressed file to read and write data to.
Args:
algorithm: Which algorithm to use.
path: A valid file path to the file to read/write. Depends
on the action called.
@todo: it would be a better approach to pass an Algorithm object instead of a string
"""
self.algorithm = algorithm
assert self.algorithm in ALLOWED_COMPRESSION_ALGORITHMS, \
'{algo} is not an supported compression algorithm'.format(algo=self.algorithm)
if path.endswith(self.algorithm):
self.path = path
else:
self.path = '{path}.{ext}'.format(path=path, ext=algorithm)
self.readers = {
'gz': self.read_gz,
'bz2': self.read_bz2
}
self.writers = {
'gz': self.write_gz,
'bz2': self.write_bz2
}
def read_gz(self):
with gzip.open(self.path, 'rb') as f:
return f.read().decode()
def read_bz2(self):
with bz2.open(self.path, 'rb') as f:
return f.read().decode()
def write_gz(self, data):
with gzip.open(self.path, 'wb') as f:
f.write(data)
def write_bz2(self, data):
with bz2.open(self.path, 'wb') as f:
f.write(data)
def read(self):
assert os.path.exists(self.path)
return self.readers[self.algorithm]()
def write(self, data):
if not isinstance(data, bytes):
data = data.encode()
return self.writers[self.algorithm](data)
class CacheManager():
"""
Manages caching for tao.
"""
def __init__(self, config):
self.config = config
self.maybe_create_cache_dir()
def maybe_create_cache_dir(self):
if self.config.get('do_caching', True):
cd = self.config.get('cachedir', '.scrapecache')
if not os.path.exists(cd):
os.mkdir(cd)
def maybe_clean_cache(self):
"""
Clean the cache.
Clean all cached searches (the obtained html code) in the cache directory iff
the respective files are older than specified in the configuration. Defaults to 12 hours.
"""
cachedir = self.config.get('cachedir', '.scrapecache')
if os.path.exists(cachedir):
for fname in os.listdir(cachedir):
path = os.path.join(cachedir, fname)
if time.time() > os.path.getmtime(path) + (60 * 60 * int(self.config.get('clean_cache_after', 48))):
# Remove the whole directory if necessary
if os.path.isdir(path):
import shutil
shutil.rmtree(path)
else:
os.remove(os.path.join(cachedir, fname))
def cached_file_name(self, keyword, search_engine, scrape_mode, page_number):
"""Make a unique file name from the search engine search request.
Important! The order of the sequence is darn important! If search queries have the same
words but in a different order, they are unique searches.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: The number of the SERP page.
Returns:
A unique file name based on the parameters of the search request.
"""
assert isinstance(keyword, str), 'Keyword {} must be a string'.format(keyword)
assert isinstance(search_engine, str), 'Search engine {} must be a string'.format(search_engine)
assert isinstance(scrape_mode, str), 'Scrapemode {} needs to be a string'.format(scrape_mode)
assert isinstance(page_number, int), 'Page_number {} needs to be an int'.format(page_number)
unique = [keyword, search_engine, scrape_mode, page_number]
sha = hashlib.sha256()
sha.update(b''.join(str(s).encode() for s in unique))
return '{file_name}.{extension}'.format(file_name=sha.hexdigest(), extension='cache')
def get_cached(self, keyword, search_engine, scrapemode, page_number):
"""Loads a cached SERP result.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
The contents of the HTML that was shipped while searching. False if there couldn't
be found a file based on the above params.
"""
if self.config.get('do_caching', False):
fname = self.cached_file_name(keyword, search_engine, scrapemode, page_number)
cdir = self.config.get('cachedir', '.scrapecache')
if fname in os.listdir(cdir):
# If the cached file is older than 12 hours, return False and thus
# make a new fresh request.
try:
modtime = os.path.getmtime(os.path.join(cdir, fname))
except FileNotFoundError:
return False
if (time.time() - modtime) / 60 / 60 > int(self.config('clean_cache_after', 48)):
return False
path = os.path.join(cdir, fname)
return self.read_cached_file(path)
else:
return False
def read_cached_file(self, path):
"""Read a compressed or uncompressed file.
The compressing schema is determined by the file extension. For example
a file that ends with .gz needs to be gunzipped.
Supported algorithms:
gzip and bzip2
Args:
path: The path to the cached file.
Returns:
The data of the cached file as a string.
Raises:
InvalidConfigurationFileException: When the type of the cached file
cannot be determined.
"""
if self.config.get('do_caching', False):
ext = path.split('.')[-1]
# The path needs to have an extension in any case.
# When uncompressed, ext is 'cache', else it is the
# compressing scheme file ending like .gz or .bz2 ...
assert ext in ALLOWED_COMPRESSION_ALGORITHMS or ext == 'cache', 'Invalid extension: {}'.format(ext)
if ext == 'cache':
with open(path, 'r') as fd:
try:
data = fd.read()
return data
except UnicodeDecodeError as e:
logger.warning(str(e))
# If we get this error, the cache files are probably
# compressed but the 'compress_cached_files' flag was
# set to False. Try to decompress them, but this may
# lead to a infinite recursion. This isn't proper coding,
# but convenient for the end user.
self.config['compress_cached_files'] = True
elif ext in ALLOWED_COMPRESSION_ALGORITHMS:
f = CompressedFile(path)
return f.read()
else:
raise InvalidConfigurationFileException('"{}" is a invalid configuration file.'.format(path))
def cache_results(self, parser, query, search_engine, scrape_mode, page_number, db_lock=None):
"""Stores the html of an parser in a file.
The file name is determined by the parameters query, search_engine, scrape_mode and page_number.
See cached_file_name() for more information.
This will always write(overwrite) the cached file. If compress_cached_files is
True, the page is written in bytes (obviously).
Args:
parser: A parser with the data to cache.
query: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrape_mode: The scrapemode that was used.
page_number: The page number that the serp page is.
db_lock: If an db_lock is given, all action are wrapped in this lock.
"""
if self.config.get('do_caching', False):
if db_lock:
db_lock.acquire()
if self.config.get('minimize_caching_files', True):
html = parser.cleaned_html
else:
html = parser.html
fname = self.cached_file_name(query, search_engine, scrape_mode, page_number)
cachedir = self.config.get('cachedir', '.scrapecache')
path = os.path.join(cachedir, fname)
if self.config.get('compress_cached_files'):
algorithm = self.config.get('compressing_algorithm', 'gz')
f = CompressedFile(path, algorithm=algorithm)
f.write(html)
else:
with open(path, 'w') as fd:
if isinstance(html, bytes):
fd.write(html.decode())
else:
fd.write(html)
if db_lock:
db_lock.release()
def _get_all_cache_files(self):
"""Return all files found in the cachedir.
Returns:
All files that have the string "cache" in it within the cache directory.
Files are either uncompressed filename.cache or are compressed with a
compression algorithm: "filename.cache.zip"
"""
files = set()
for dirpath, dirname, filenames in os.walk(self.config.get('cachedir', '.scrapecache')):
for name in filenames:
if 'cache' in name:
files.add(os.path.join(dirpath, name))
return files
def _caching_is_one_to_one(self, keywords, search_engine, scrapemode, page_number):
"""Check whether all keywords map to a unique file name.
Args:
keywords: All keywords for which to check the uniqueness of the hash
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
True if all keywords map to a unique hash and False if not.
"""
mappings = {}
for kw in keywords:
file_hash = self.cached_file_name(kw, search_engine, scrapemode, page_number)
if file_hash not in mappings:
mappings.update({file_hash: [kw, ]})
else:
mappings[file_hash].append(kw)
duplicates = [v for k, v in mappings.items() if len(v) > 1]
if duplicates:
logger.info('Not one-to-one. {}'.format(duplicates))
return False
else:
logger.info('one-to-one')
return True
def parse_all_cached_files(self, scrape_jobs, session, scraper_search):
"""Walk recursively through the cachedir (as given by the Config) and parse all cached files.
Args:
session: An sql alchemy session to add the entities
scraper_search: Abstract object representing the current search.
Returns:
The scrape jobs that couldn't be parsed from the cache directory.
"""
files = self._get_all_cache_files()
num_cached = num_total = 0
mapping = {}
for job in scrape_jobs:
cache_name = self.cached_file_name(
job['query'],
job['search_engine'],
job['scrape_method'],
job['page_number']
)
mapping[cache_name] = job
num_total += 1
for path in files:
# strip of the extension of the path if it has eny
fname = os.path.split(path)[1]
clean_filename = fname
for ext in ALLOWED_COMPRESSION_ALGORITHMS:
if fname.endswith(ext):
clean_filename = fname.rstrip('.' + ext)
job = mapping.get(clean_filename, None)
if job:
# We found a file that contains the keyword, search engine name and
# search mode that fits our description. Let's see if there is already
# an record in the database and link it to our new ScraperSearch object.
serp = self.get_serp_from_database(session, job['query'], job['search_engine'], job['scrape_method'],
job['page_number'])
if not serp:
serp = self.parse_again(fname, job['search_engine'], job['scrape_method'], job['query'])
serp.scraper_searches.append(scraper_search)
session.add(serp)
if num_cached % 200 == 0:
session.commit()
store_serp_result(serp, self.config)
num_cached += 1
scrape_jobs.remove(job)
logger.info('{} cache files found in {}'.format(len(files), self.config.get('cachedir')))
logger.info('{}/{} objects have been read from the cache. {} remain to get scraped.'.format(
num_cached, num_total, num_total - num_cached))
session.add(scraper_search)
session.commit()
return scrape_jobs
def parse_again(self, fname, search_engine, scrape_method, query):
"""
@todo: `scrape_method` is not used here -> check if scrape_method is passed to this function and remove it
"""
path = os.path.join(self.config.get('cachedir', '.scrapecache'), fname)
html = self.read_cached_file(path)
return parse_serp(
self.config,
html=html,
search_engine=search_engine,
query=query
)
def get_serp_from_database(self, session, query, search_engine, scrape_method, page_number):
try:
serp = session.query(SearchEngineResultsPage).filter(
SearchEngineResultsPage.query == query,
SearchEngineResultsPage.search_engine_name == search_engine,
SearchEngineResultsPage.scrape_method == scrape_method,
SearchEngineResultsPage.page_number == page_number).first()
return serp
except NoResultFound:
# that shouldn't happen
# we have a cache file that matches the above identifying information
# but it was never stored to the database.
return False
def clean_cachefiles(self):
"""Clean silly html from all cachefiles in the cachdir"""
if input(
'Do you really want to strip all cache files from bloating tags such as <script> and <style>? ').startswith(
'y'):
import lxml.html
from lxml.html.clean import Cleaner
cleaner = Cleaner()
cleaner.style = True
cleaner.scripts = True
cleaner.javascript = True
for file in self._get_all_cache_files():
cfile = CompressedFile(file)
data = cfile.read()
cleaned = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(data)))
cfile.write(cleaned)
logger.info('Cleaned {}. Size before: {}, after {}'.format(file, len(data), len(cleaned)))
def fix_broken_cache_names(self, url, search_engine, scrapemode, page_number):
"""Fix broken cache names.
Args:
url: A list of strings to add to each cached_file_name() call.
@todo: `url` is not used here -> check if scrape_method is passed to this function and remove it
"""
files = self._get_all_cache_files()
logger.debug('{} cache files found in {}'.format(len(files), self.config.get('cachedir', '.scrapecache')))
r = re.compile(r'<title>(?P<kw>.*?) - Google Search</title>')
i = 0
for path in files:
fname = os.path.split(path)[1].strip()
data = self.read_cached_file(path)
infilekws = r.search(data).group('kw')
realname = self.cached_file_name(infilekws, search_engine, scrapemode, page_number)
if fname != realname:
logger.debug('The search query in the title element in file {} differ from that hash of its name. Fixing...'.format(path))
src = os.path.abspath(path)
dst = os.path.abspath(os.path.join(os.path.split(path)[0], realname))
logger.debug('Renamed from {} => {}'.format(src, dst))
os.rename(src, dst)
i += 1
logger.debug('Renamed {} files.'.format(i))
def cached(self, f, attr_to_cache=None):
"""Decorator that makes return value of functions cachable.
Any function that returns a value and that is decorated with
cached will be supplied with the previously calculated result of
an earlier call. The parameter name with the cached value may
be set with attr_to_cache.
Args:
attr_to_cache: The name of attribute whose data
is cachable.
Returns: The modified and wrapped function.
@todo: `attr_to_cache` is not used here -> check if scrape_method is passed to this function and remove it
"""
def wraps(*args, **kwargs):
cached_value = self.get_cached(*args, params=kwargs)
if cached_value:
f(*args, attr_to_cache=cached_value, **kwargs)
else:
# Nothing was cached for this attribute
value = f(*args, attr_to_cache=None, **kwargs)
self.cache_results(value, *args, params=kwargs)
return wraps
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | 8,061,523,483,594,115,000 | 3,773,261,140,932,486,000 | 36.90689 | 138 | 0.571866 | false |
Shaps/ansible | test/lib/ansible_test/_internal/test.py | 11 | 14612 | """Classes for storing and processing test results."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import re
from . import types as t
from .util import (
display,
get_ansible_version,
)
from .util_common import (
write_text_test_results,
write_json_test_results,
ResultType,
)
from .config import (
TestConfig,
)
def calculate_best_confidence(choices, metadata):
"""
:type choices: tuple[tuple[str, int]]
:type metadata: Metadata
:rtype: int
"""
best_confidence = 0
for path, line in choices:
confidence = calculate_confidence(path, line, metadata)
best_confidence = max(confidence, best_confidence)
return best_confidence
def calculate_confidence(path, line, metadata):
"""
:type path: str
:type line: int
:type metadata: Metadata
:rtype: int
"""
ranges = metadata.changes.get(path)
# no changes were made to the file
if not ranges:
return 0
# changes were made to the same file and line
if any(r[0] <= line <= r[1] in r for r in ranges):
return 100
# changes were made to the same file and the line number is unknown
if line == 0:
return 75
# changes were made to the same file and the line number is different
return 50
class TestResult:
"""Base class for test results."""
def __init__(self, command, test, python_version=None):
"""
:type command: str
:type test: str
:type python_version: str
"""
self.command = command
self.test = test
self.python_version = python_version
self.name = self.test or self.command
if self.python_version:
self.name += '-python-%s' % self.python_version
try:
import junit_xml
except ImportError:
junit_xml = None
self.junit = junit_xml
def write(self, args):
"""
:type args: TestConfig
"""
self.write_console()
self.write_bot(args)
if args.lint:
self.write_lint()
if args.junit:
if self.junit:
self.write_junit(args)
else:
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
def write_console(self):
"""Write results to console."""
def write_lint(self):
"""Write lint results to stdout."""
def write_bot(self, args):
"""
:type args: TestConfig
"""
def write_junit(self, args):
"""
:type args: TestConfig
"""
def create_result_name(self, extension):
"""
:type extension: str
:rtype: str
"""
name = 'ansible-test-%s' % self.command
if self.test:
name += '-%s' % self.test
if self.python_version:
name += '-python-%s' % self.python_version
name += extension
return name
def save_junit(self, args, test_case, properties=None):
"""
:type args: TestConfig
:type test_case: junit_xml.TestCase
:type properties: dict[str, str] | None
:rtype: str | None
"""
test_suites = [
self.junit.TestSuite(
name='ansible-test',
test_cases=[test_case],
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
properties=properties,
),
]
report = self.junit.TestSuite.to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
if args.explain:
return
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
class TestTimeout(TestResult):
"""Test timeout."""
def __init__(self, timeout_duration):
"""
:type timeout_duration: int
"""
super(TestTimeout, self).__init__(command='timeout', test='')
self.timeout_duration = timeout_duration
def write(self, args):
"""
:type args: TestConfig
"""
message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
output = '''
One or more of the following situations may be responsible:
- Code changes have resulted in tests that hang or run for an excessive amount of time.
- Tests have been added which exceed the time limit when combined with existing tests.
- Test infrastructure and/or external dependencies are operating slower than normal.'''
if args.coverage:
output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
output += '\n\nConsult the console log for additional details on where the timeout occurred.'
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
# hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers
xml = '''
<?xml version="1.0" encoding="utf-8"?>
<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
\t\t<testcase classname="timeout" name="timeout">
\t\t\t<error message="%s" type="error">%s</error>
\t\t</testcase>
\t</testsuite>
</testsuites>
''' % (timestamp, message, output)
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
class TestSuccess(TestResult):
"""Test success."""
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
self.save_junit(args, test_case)
class TestSkipped(TestResult):
"""Test skipped."""
def write_console(self):
"""Write results to console."""
display.info('No tests applicable.', verbosity=1)
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
test_case.add_skipped_info('No tests applicable.')
self.save_junit(args, test_case)
class TestFailure(TestResult):
"""Test failure."""
def __init__(self, command, test, python_version=None, messages=None, summary=None):
"""
:type command: str
:type test: str
:type python_version: str | None
:type messages: list[TestMessage] | None
:type summary: unicode | None
"""
super(TestFailure, self).__init__(command, test, python_version)
if messages:
messages = sorted(messages)
else:
messages = []
self.messages = messages
self.summary = summary
def write(self, args):
"""
:type args: TestConfig
"""
if args.metadata.changes:
self.populate_confidence(args.metadata)
super(TestFailure, self).write(args)
def write_console(self):
"""Write results to console."""
if self.summary:
display.error(self.summary)
else:
if self.python_version:
specifier = ' on python %s' % self.python_version
else:
specifier = ''
display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
for message in self.messages:
display.error(message.format(show_confidence=True))
doc_url = self.find_docs()
if doc_url:
display.info('See documentation for help: %s' % doc_url)
def write_lint(self):
"""Write lint results to stdout."""
if self.summary:
command = self.format_command()
message = 'The test `%s` failed. See stderr output for details.' % command
path = ''
message = TestMessage(message, path)
print(message)
else:
for message in self.messages:
print(message)
def write_junit(self, args):
"""
:type args: TestConfig
"""
title = self.format_title()
output = self.format_block()
test_case = self.junit.TestCase(classname=self.command, name=self.name)
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
test_case.add_failure_info(message=title, output='\n%s' % output)
self.save_junit(args, test_case)
def write_bot(self, args):
"""
:type args: TestConfig
"""
docs = self.find_docs()
message = self.format_title(help_link=docs)
output = self.format_block()
if self.messages:
verified = all((m.confidence or 0) >= 50 for m in self.messages)
else:
verified = False
bot_data = dict(
verified=verified,
docs=docs,
results=[
dict(
message=message,
output=output,
),
],
)
if args.explain:
return
write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
def populate_confidence(self, metadata):
"""
:type metadata: Metadata
"""
for message in self.messages:
if message.confidence is None:
message.confidence = calculate_confidence(message.path, message.line, metadata)
def format_command(self):
"""
:rtype: str
"""
command = 'ansible-test %s' % self.command
if self.test:
command += ' --test %s' % self.test
if self.python_version:
command += ' --python %s' % self.python_version
return command
def find_docs(self):
"""
:rtype: str
"""
if self.command != 'sanity':
return None # only sanity tests have docs links
# Use the major.minor version for the URL only if this a release that
# matches the pattern 2.4.0, otherwise, use 'devel'
ansible_version = get_ansible_version()
url_version = 'devel'
if re.search(r'^[0-9.]+$', ansible_version):
url_version = '.'.join(ansible_version.split('.')[:2])
testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
url = '%s/%s/' % (testing_docs_url, self.command)
if self.test:
url += '%s.html' % self.test
return url
def format_title(self, help_link=None):
"""
:type help_link: str | None
:rtype: str
"""
command = self.format_command()
if self.summary:
reason = 'the error'
else:
reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
if help_link:
help_link_markup = ' [[explain](%s)]' % help_link
else:
help_link_markup = ''
title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
return title
def format_block(self):
"""
:rtype: str
"""
if self.summary:
block = self.summary
else:
block = '\n'.join(m.format() for m in self.messages)
message = block.strip()
# Hack to remove ANSI color reset code from SubprocessError messages.
message = message.replace(display.clear, '')
return message
class TestMessage:
"""Single test message for one file."""
def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
"""
:type message: str
:type path: str
:type line: int
:type column: int
:type level: str
:type code: str | None
:type confidence: int | None
"""
self.__path = path
self.__line = line
self.__column = column
self.__level = level
self.__code = code
self.__message = message
self.confidence = confidence
@property
def path(self): # type: () -> str
"""Return the path."""
return self.__path
@property
def line(self): # type: () -> int
"""Return the line number, or 0 if none is available."""
return self.__line
@property
def column(self): # type: () -> int
"""Return the column number, or 0 if none is available."""
return self.__column
@property
def level(self): # type: () -> str
"""Return the level."""
return self.__level
@property
def code(self): # type: () -> t.Optional[str]
"""Return the code, if any."""
return self.__code
@property
def message(self): # type: () -> str
"""Return the message."""
return self.__message
@property
def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
"""Return a tuple with all the immutable values of this test message."""
return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
def __lt__(self, other):
return self.tuple < other.tuple
def __le__(self, other):
return self.tuple <= other.tuple
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __gt__(self, other):
return self.tuple > other.tuple
def __ge__(self, other):
return self.tuple >= other.tuple
def __hash__(self):
return hash(self.tuple)
def __str__(self):
return self.format()
def format(self, show_confidence=False):
"""
:type show_confidence: bool
:rtype: str
"""
if self.__code:
msg = '%s: %s' % (self.__code, self.__message)
else:
msg = self.__message
if show_confidence and self.confidence is not None:
msg += ' (%d%%)' % self.confidence
return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
| gpl-3.0 | -269,924,671,700,884,160 | 9,149,179,569,751,941,000 | 27.317829 | 150 | 0.564194 | false |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/debug/wrappers/hooks.py | 32 | 13374 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfdbg CLI as SessionRunHook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import stepper
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.training import session_run_hook
# The prefix for GRPC endpoint URLs.
_GRPC_ENDPOINT_PREFIX = "grpc://"
class LocalCLIDebugHook(session_run_hook.SessionRunHook,
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Command-line-interface debugger hook.
Can be used as a monitor/hook for `tf.train.MonitoredSession`s and
`tf.contrib.learn`'s `Estimator`s and `Experiment`s.
"""
def __init__(self,
ui_type="curses",
dump_root=None,
thread_name_filter=None):
"""Create a local debugger command-line interface (CLI) hook.
Args:
ui_type: (str) user-interface type.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
"""
self._ui_type = ui_type
self._dump_root = dump_root
self._thread_name_filter = thread_name_filter
self._wrapper_initialized = False
self._pending_tensor_filters = {}
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
Override default behavior to accommodate the possibility of this method being
called prior to the initialization of the underlying
`LocalCLIDebugWrapperSession` object.
Args:
filter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()`
for details.
tensor_filter: See doc of
`LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
"""
if self._wrapper_initialized:
local_cli_wrapper.LocalCLIDebugWrapperSession.add_tensor_filter(
self, filter_name, tensor_filter)
else:
self._pending_tensor_filters[filter_name] = tensor_filter
def begin(self):
pass
def before_run(self, run_context):
if not self._wrapper_initialized:
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self,
run_context.session,
ui_type=self._ui_type,
dump_root=self._dump_root,
thread_name_filter=self._thread_name_filter)
# Actually register tensor filters registered prior to the construction
# of the underlying LocalCLIDebugWrapperSession object.
for filter_name in self._pending_tensor_filters:
local_cli_wrapper.LocalCLIDebugWrapperSession.add_tensor_filter(
self, filter_name, self._pending_tensor_filters[filter_name])
self._wrapper_initialized = True
# Increment run call counter.
self._run_call_count += 1
# Adapt run_context to an instance of OnRunStartRequest for invoking
# superclass on_run_start().
on_run_start_request = framework.OnRunStartRequest(
run_context.original_args.fetches, run_context.original_args.feed_dict,
None, None, self._run_call_count)
on_run_start_response = self.on_run_start(on_run_start_request)
self._performed_action = on_run_start_response.action
run_args = session_run_hook.SessionRunArgs(
None, feed_dict=None, options=config_pb2.RunOptions())
if self._performed_action == framework.OnRunStartAction.DEBUG_RUN:
self._decorate_options_for_debug(
run_args.options,
run_context.session.graph,
framework.WatchOptions(
node_name_regex_whitelist=(
on_run_start_response.node_name_regex_whitelist),
op_type_regex_whitelist=(
on_run_start_response.op_type_regex_whitelist),
tensor_dtype_regex_whitelist=(
on_run_start_response.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
on_run_start_response.tolerate_debug_op_creation_failures)))
elif self._performed_action == framework.OnRunStartAction.PROFILE_RUN:
self._decorate_run_options_for_profile(run_args.options)
elif self._performed_action == framework.OnRunStartAction.INVOKE_STEPPER:
# The _finalized property must be set to False so that the NodeStepper
# can insert ops for retrieving TensorHandles.
# pylint: disable=protected-access
run_context.session.graph._finalized = False
# pylint: enable=protected-access
with stepper.NodeStepper(
run_context.session,
run_context.original_args.
fetches,
run_context.original_args.feed_dict) as node_stepper:
self.invoke_node_stepper(
node_stepper, restore_variable_values_on_exit=True)
return run_args
def after_run(self, run_context, run_values):
# Adapt run_context and run_values to OnRunEndRequest and invoke superclass
# on_run_end()
on_run_end_request = framework.OnRunEndRequest(self._performed_action,
run_values.run_metadata)
self.on_run_end(on_run_end_request)
def _decorate_options_for_debug(self, options, graph, watch_options):
"""Modify RunOptions.debug_options.debug_tensor_watch_opts for debugging."""
debug_utils.watch_graph(
options,
graph,
debug_urls=self._get_run_debug_urls(),
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
options.output_partition_graphs = True
class DumpingDebugHook(session_run_hook.SessionRunHook,
dumping_wrapper.DumpingDebugWrapperSession):
"""A debugger hook that dumps debug data to filesystem.
Can be used as a monitor/hook for `tf.train.MonitoredSession`s and
`tf.contrib.learn`'s `Estimator`s and `Experiment`s.
"""
def __init__(self,
session_root,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Create a local debugger command-line interface (CLI) hook.
Args:
session_root: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
watch_fn: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (bool) Whether usage is to be logged.
"""
self._session_root = session_root
self._watch_fn = watch_fn
self._thread_name_filter = thread_name_filter
self._log_usage = log_usage
self._wrapper_initialized = False
def begin(self):
pass
def before_run(self, run_context):
if not self._wrapper_initialized:
# TODO(cais): Make this hook have a DumpingDebugWrapperSession property
# instead of subclassing DumpingDebugWrapperSession.
dumping_wrapper.DumpingDebugWrapperSession.__init__(
self,
run_context.session,
self._session_root,
watch_fn=self._watch_fn,
thread_name_filter=self._thread_name_filter,
log_usage=self._log_usage)
self._wrapper_initialized = True
self._run_call_count += 1
debug_urls, watch_options = self._prepare_run_watch_config(
run_context.original_args.fetches, run_context.original_args.feed_dict)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
run_context.session.graph,
debug_urls=debug_urls,
debug_ops=watch_options.debug_ops,
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
run_args = session_run_hook.SessionRunArgs(
None, feed_dict=None, options=run_options)
return run_args
def after_run(self, run_context, run_values):
pass
class GrpcDebugHook(session_run_hook.SessionRunHook):
"""A hook that streams debugger-related events to any grpc_debug_server.
For example, the debugger data server is a grpc_debug_server. The debugger
data server writes debugger-related events it receives via GRPC to logdir.
This enables debugging features in Tensorboard such as health pills.
When the arguments of debug_utils.watch_graph changes, strongly consider
changing arguments here too so that features are available to tflearn users.
Can be used as a monitor/hook for `tf.train.MonitoredSession`s and
`tf.contrib.learn`'s `Estimator`s and `Experiment`s.
"""
def __init__(self,
grpc_debug_server_addresses,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Constructs a GrpcDebugHook.
Args:
grpc_debug_server_addresses: (`list` of `str`) A list of the gRPC debug
server addresses, in the format of <host:port>, without the "grpc://"
prefix. For example: ["localhost:7000", "192.168.0.2:8000"]
watch_fn: A function that allows for customizing which ops to watch at
which specific steps. See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__` for details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (bool) Whether usage is to be logged.
Raises:
ValueError: if any debugger server addresses start with grpc://.
"""
for address in grpc_debug_server_addresses:
if address.startswith(_GRPC_ENDPOINT_PREFIX):
raise ValueError(
("Debug server address %r starts with %r. It should not because "
"the hook already automatically adds the prefix.") % (
address, _GRPC_ENDPOINT_PREFIX))
# A wrapper session responsible for GRPC communication.
self._grpc_debug_wrapper_session = None
self._thread_name_filter = thread_name_filter
self._grpc_debug_server_addresses = grpc_debug_server_addresses
self._watch_fn = watch_fn
self._log_usage = log_usage
def before_run(self, run_context):
"""Called right before a session is run.
Args:
run_context: A session_run_hook.SessionRunContext. Encapsulates
information on the run.
Returns:
A session_run_hook.SessionRunArgs object.
"""
if not self._grpc_debug_wrapper_session:
self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession(
run_context.session,
self._grpc_debug_server_addresses,
watch_fn=self._watch_fn,
thread_name_filter=self._thread_name_filter,
log_usage=self._log_usage)
fetches = run_context.original_args.fetches
feed_dict = run_context.original_args.feed_dict
watch_options = self._watch_fn(fetches, feed_dict)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
run_context.session.graph,
debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls(
fetches, feed_dict),
debug_ops=watch_options.debug_ops,
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
return session_run_hook.SessionRunArgs(
None, feed_dict=None, options=run_options)
| apache-2.0 | 6,383,763,221,191,575,000 | 5,131,177,833,486,679,000 | 39.162162 | 81 | 0.6802 | false |
KarelJakubec/pip | pip/_vendor/cachecontrol/adapter.py | 469 | 4196 | import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
| mit | 3,370,060,341,947,106,000 | -3,393,662,953,332,138,000 | 34.863248 | 75 | 0.561964 | false |
cribster/cribster.github.io | bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit | 7,724,435,001,093,069,000 | -6,299,608,114,044,575,000 | 31.925234 | 114 | 0.675844 | false |
ahmedjabbar/uor | tg/tg-test.py | 199 | 1450 | import tgl
import pprint
from functools import partial
our_id = 0
pp = pprint.PrettyPrinter(indent=4)
binlog_done = False;
def on_binlog_replay_end():
binlog_done = True;
def on_get_difference_end():
pass
def on_our_id(id):
our_id = id
return "Set ID: " + str(our_id)
def msg_cb(success, msg):
pp.pprint(success)
pp.pprint(msg)
HISTORY_QUERY_SIZE = 100
def history_cb(msg_list, peer, success, msgs):
print(len(msgs))
msg_list.extend(msgs)
print(len(msg_list))
if len(msgs) == HISTORY_QUERY_SIZE:
tgl.get_history(peer, len(msg_list), HISTORY_QUERY_SIZE, partial(history_cb, msg_list, peer));
def cb(success):
print(success)
def on_msg_receive(msg):
if msg.out and not binlog_done:
return;
if msg.dest.id == our_id: # direct message
peer = msg.src
else: # chatroom
peer = msg.dest
pp.pprint(msg)
if msg.text.startswith("!ping"):
peer.send_msg("PONG! google.com", preview=False, reply=msg.id)
def on_secret_chat_update(peer, types):
return "on_secret_chat_update"
def on_user_update():
pass
def on_chat_update():
pass
# Set callbacks
tgl.set_on_binlog_replay_end(on_binlog_replay_end)
tgl.set_on_get_difference_end(on_get_difference_end)
tgl.set_on_our_id(on_our_id)
tgl.set_on_msg_receive(on_msg_receive)
tgl.set_on_secret_chat_update(on_secret_chat_update)
tgl.set_on_user_update(on_user_update)
tgl.set_on_chat_update(on_chat_update)
| gpl-2.0 | 1,729,924,143,937,402,400 | 4,507,597,698,947,369,000 | 20.014493 | 98 | 0.676552 | false |
syci/OCB | addons/sale/tests/test_sale_order.py | 40 | 7670 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.exceptions import UserError, AccessError
from test_sale_common import TestSale
class TestSaleOrder(TestSale):
def test_sale_order(self):
""" Test the sale order flow (invoicing and quantity updates)
- Invoice repeatedly while varrying delivered quantities and check that invoice are always what we expect
"""
# DBO TODO: validate invoice and register payments
inv_obj = self.env['account.invoice']
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
self.assertEqual(so.amount_total, sum([2 * p.list_price for (k, p) in self.products.iteritems()]), 'Sale: total amount is wrong')
# send quotation
so.force_quotation_send()
self.assertTrue(so.state == 'sent', 'Sale: state after sending is wrong')
# confirm quotation
so.action_confirm()
self.assertTrue(so.state == 'sale')
self.assertTrue(so.invoice_status == 'to invoice')
# create invoice: only 'invoice on order' products are invoiced
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'order' else 0 for (k, p) in self.products.iteritems()]), 'Sale: invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'no', 'Sale: SO status after invoicing should be "nothing to invoice"')
self.assertTrue(len(so.invoice_ids) == 1, 'Sale: invoice is missing')
# deliver lines except 'time and material' then invoice again
for line in so.order_line:
line.qty_delivered = 2 if line.product_id.invoice_policy in ['order', 'delivery'] else 0
self.assertTrue(so.invoice_status == 'to invoice', 'Sale: SO status after delivery should be "to invoice"')
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: second invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'delivery' else 0 for (k, p) in self.products.iteritems()]), 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything should be "invoiced"')
self.assertTrue(len(so.invoice_ids) == 2, 'Sale: invoice is missing')
# go over the sold quantity
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.qty_delivered = 10
self.assertTrue(so.invoice_status == 'upselling', 'Sale: SO status after increasing delivered qty higher than ordered qty should be "upselling"')
# upsell and invoice
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.product_uom_qty = 10
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 1, 'Sale: third invoice is missing lines')
self.assertEqual(inv.amount_total, 8 * self.products['serv_order'].list_price, 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything (including the upsel) should be "invoiced"')
def test_unlink_cancel(self):
""" Test deleting and cancelling sale orders depending on their state and on the user's rights """
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
# only quotations are deletable
with self.assertRaises(UserError):
so.action_confirm()
so.unlink()
so_copy = so.copy()
with self.assertRaises(AccessError):
so_copy.sudo(self.user).unlink()
self.assertTrue(so_copy.sudo(self.manager).unlink(), 'Sale: deleting a quotation should be possible')
# cancelling and setting to done, you should not be able to delete any SO ever
so.action_cancel()
self.assertTrue(so.state == 'cancel', 'Sale: cancelling SO should always be possible')
with self.assertRaises(UserError):
so.sudo(self.manager).unlink()
so.action_done()
self.assertTrue(so.state == 'done', 'Sale: SO not done')
def test_cost_invoicing(self):
""" Test confirming a vendor invoice to reinvoice cost on the so """
serv_cost = self.env.ref('product.product_product_1b')
prod_gap = self.env.ref('product.product_product_1')
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_gap.name, 'product_id': prod_gap.id, 'product_uom_qty': 2, 'product_uom': prod_gap.uom_id.id, 'price_unit': prod_gap.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
})
so.action_confirm()
so._create_analytic_account()
inv_partner = self.env.ref('base.res_partner_2')
company = self.env.ref('base.main_company')
journal = self.env['account.journal'].create({'name': 'Purchase Journal - Test', 'code': 'STPJ', 'type': 'purchase', 'company_id': company.id})
account_payable = self.env['account.account'].create({'code': 'X1111', 'name': 'Sale - Test Payable Account', 'user_type_id': self.env.ref('account.data_account_type_payable').id, 'reconcile': True})
account_income = self.env['account.account'].create({'code': 'X1112', 'name': 'Sale - Test Account', 'user_type_id': self.env.ref('account.data_account_type_direct_costs').id})
invoice_vals = {
'name': '',
'type': 'in_invoice',
'partner_id': inv_partner.id,
'invoice_line_ids': [(0, 0, {'name': serv_cost.name, 'product_id': serv_cost.id, 'quantity': 2, 'uom_id': serv_cost.uom_id.id, 'price_unit': serv_cost.standard_price, 'account_analytic_id': so.project_id.id, 'account_id': account_income.id})],
'account_id': account_payable.id,
'journal_id': journal.id,
'currency_id': company.currency_id.id,
}
inv = self.env['account.invoice'].create(invoice_vals)
inv.signal_workflow('invoice_open')
sol = so.order_line.filtered(lambda l: l.product_id == serv_cost)
self.assertTrue(sol, 'Sale: cost invoicing does not add lines when confirming vendor invoice')
self.assertTrue(sol.price_unit == 160 and sol.qty_delivered == 2 and sol.product_uom_qty == sol.qty_invoiced == 0, 'Sale: line is wrong after confirming vendor invoice')
| agpl-3.0 | 1,167,336,234,610,697,200 | -2,247,351,374,927,323,100 | 60.854839 | 255 | 0.629465 | false |
kartikp1995/gnuradio | gnuradio-runtime/python/pmt/qa_pmt_to_python.py | 48 | 1554 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import unittest
import pmt
import pmt_to_python as pmt2py
class test_pmt_to_python(unittest.TestCase):
def test_pmt_from_double(self):
b = pmt.from_double(123765)
self.assertEqual(pmt.to_python(b), 123765)
t = pmt.to_pmt(range(5))
def test_numpy_to_uvector_and_reverse(self):
import numpy as np
N = 100
narr = np.ndarray(N, dtype=np.complex128)
narr.real[:] = np.random.uniform(size=N)
narr.imag[:] = np.random.uniform(size=N)
uvector = pmt2py.numpy_to_uvector(narr)
nparr = pmt2py.uvector_to_numpy(uvector)
self.assertTrue(nparr.dtype==narr.dtype)
self.assertTrue(np.alltrue(nparr == narr))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 1,502,471,604,461,833,200 | 9,182,250,713,583,102,000 | 31.375 | 70 | 0.690476 | false |
da1z/intellij-community | python/helpers/profiler/thriftpy/_compat.py | 20 | 3953 | # -*- coding: utf-8 -*-
"""
thriftpy._compat
~~~~~~~~~~~~~
py2/py3 compatibility support.
"""
from __future__ import absolute_import
import platform
import sys
import types
PY3 = sys.version_info[0] == 3
PYPY = "__pypy__" in sys.modules
UNIX = platform.system() in ("Linux", "Darwin")
CYTHON = False # Cython always disabled in pypy and windows
# only python2.7.9 and python 3.4 or above have true ssl context
MODERN_SSL = (2, 7, 9) <= sys.version_info < (3, 0, 0) or \
sys.version_info >= (3, 4)
if PY3:
text_type = str
string_types = (str,)
def u(s):
return s
else:
text_type = unicode # noqa
string_types = (str, unicode) # noqa
def u(s):
if not isinstance(s, text_type):
s = s.decode("utf-8")
return s
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass for py2 & py3
This code snippet is copied from six."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def init_func_generator(spec):
"""Generate `__init__` function based on TPayload.default_spec
For example::
spec = [('name', 'Alice'), ('number', None)]
will generate::
def __init__(self, name='Alice', number=None):
kwargs = locals()
kwargs.pop('self')
self.__dict__.update(kwargs)
TODO: The `locals()` part may need refine.
"""
if not spec:
def __init__(self):
pass
return __init__
varnames, defaults = zip(*spec)
varnames = ('self', ) + varnames
def init(self):
self.__dict__ = locals().copy()
del self.__dict__['self']
code = init.__code__
if PY3:
new_code = types.CodeType(len(varnames),
0,
len(varnames),
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
varnames,
code.co_filename,
"__init__",
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars)
else:
new_code = types.CodeType(len(varnames),
len(varnames),
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
varnames,
code.co_filename,
"__init__",
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars)
return types.FunctionType(new_code,
{"__builtins__": __builtins__},
argdefs=defaults)
| apache-2.0 | -6,574,284,570,774,916,000 | 3,674,596,520,783,339,500 | 30.373016 | 72 | 0.454086 | false |
gangadhar-kadam/sapphire_app | hr/report/monthly_salary_register/monthly_salary_register.py | 2 | 4044 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt, cstr
from webnotes import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
"Employee:Link/Employee:120", "Employee Name::140", "Branch:Link/Branch:120",
"Department:Link/Department:120", "Designation:Link/Designation:120",
"Company:Link/Company:120", "Month::80", "Leave Without pay:Float:130",
"Payment Days:Float:120"
]
earning_types = webnotes.conn.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where ifnull(e_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = webnotes.conn.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where ifnull(d_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Link/Earning Type:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Link/Deduction Type:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = webnotes.conn.sql("""select * from `tabSalary Slip` where docstatus = 1 %s""" %
conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = webnotes.conn.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, webnotes._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = webnotes.conn.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, webnotes._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.e_type] = flt(d.d_modified_amount)
return ss_ded_map | agpl-3.0 | 7,610,514,151,434,082,000 | -237,220,355,417,152,350 | 36.803738 | 96 | 0.670623 | false |
goldenbull/grpc | src/python/grpcio/tests/unit/framework/interfaces/face/_3069_test_constant.py | 25 | 1848 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A test constant working around issue 3069."""
# test_constants is referenced from specification in this module.
from tests.unit.framework.common import test_constants # pylint: disable=unused-import
# TODO(issue 3069): Replace uses of this constant with
# test_constants.SHORT_TIMEOUT.
REALLY_SHORT_TIMEOUT = 0.1
| bsd-3-clause | 9,215,038,664,742,357,000 | -3,845,084,666,558,133,000 | 48.945946 | 87 | 0.781385 | false |
rruebner/odoo | addons/hw_escpos/escpos/escpos.py | 66 | 31141 | # -*- coding: utf-8 -*-
'''
@author: Manuel F Martinez <[email protected]>
@organization: Bashlinux
@copyright: Copyright (c) 2012 Bashlinux
@license: GPL
'''
try:
import qrcode
except ImportError:
qrcode = None
import time
import copy
import io
import base64
import math
import md5
import re
import traceback
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from PIL import Image
try:
import jcconv
except ImportError:
jcconv = None
print 'ESC/POS: please install jcconv for improved Japanese receipt printing:'
print ' # pip install jcconv'
from constants import *
from exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff,basestring):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
for style in self.cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1)
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = ""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
raw += buffer.decode('hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += buffer.decode("hex")
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print 'print_b64_img'
id = md5.new(img).digest()
if id not in self.img_cache:
print 'not in cache'
img = img[img.find(',')+1:]
f = io.BytesIO('img')
f.write(base64.decodestring(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
img.paste(img_rgba, mask=img_rgba.split()[3])
print 'convert image'
pix_line, img_size = self._convert_image(img)
print 'print image'
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print 'raw image'
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
else:
raise exception.BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(elem.attrib['src'])
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
encoded = char.encode(encoding)
break
except ValueError: #the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
encoding = remaining.items()[0][0]
else:
encoding = 'cp437'
encoded = '\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = encodings[encoding] + encoded
return encoded
def encode_str(txt):
buffer = ''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| agpl-3.0 | -5,003,523,582,505,021,000 | -1,061,380,546,079,461,100 | 33.334068 | 146 | 0.47712 | false |
capriele/crazyflie-clients-python-move | lib/cflib/crtp/debugdriver.py | 9 | 35152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Fake link driver used to debug the UI without using the Crazyflie.
The operation of this driver can be controlled in two ways, either by
connecting to different URIs or by sending messages to the DebugDriver port
though CRTP once connected.
For normal connections a console thread is also started that will send
generated console output via CRTP.
"""
__author__ = 'Bitcraze AB'
__all__ = ['DebugDriver']
from threading import Thread
from .crtpdriver import CRTPDriver
from .crtpstack import CRTPPacket, CRTPPort
from .exceptions import WrongUriType
import Queue
import re
import time
import struct
from datetime import datetime
from cflib.crazyflie.log import LogTocElement
from cflib.crazyflie.param import ParamTocElement
import random
import string
import errno
import logging
logger = logging.getLogger(__name__)
# This setup is used to debug raw memory logging
memlogging = {0x01: {"min": 0, "max": 255, "mod": 1, "vartype": 1},
0x02: {"min": 0, "max": 65000, "mod": 100, "vartype": 2},
0x03: {"min": 0, "max": 100000, "mod": 1000, "vartype": 3},
0x04: {"min":-100, "max": 100, "mod": 1, "vartype": 4},
0x05: {"min":-10000, "max": 10000, "mod": 2000, "vartype": 5},
0x06: {"min":-50000, "max": 50000, "mod": 1000, "vartype": 6},
0x07: {"min": 0, "max": 255, "mod": 1, "vartype": 1}}
class DebugDriver (CRTPDriver):
""" Debug driver used for debugging UI/communication without using a
Crazyflie"""
def __init__(self):
self.fakeLoggingThreads = []
# Fill up the fake logging TOC with values and data
self.fakeLogToc = []
self.fakeLogToc.append({"varid": 0, "vartype": 5, "vargroup": "imu",
"varname": "gyro_x", "min":-10000,
"max": 10000, "mod": 1000})
self.fakeLogToc.append({"varid": 1, "vartype": 5, "vargroup": "imu",
"varname": "gyro_y", "min":-10000,
"max": 10000, "mod": 150})
self.fakeLogToc.append({"varid": 2, "vartype": 5, "vargroup": "imu",
"varname": "gyro_z", "min":-10000,
"max": 10000, "mod": 200})
self.fakeLogToc.append({"varid": 3, "vartype": 5, "vargroup": "imu",
"varname": "acc_x", "min":-1000,
"max": 1000, "mod": 15})
self.fakeLogToc.append({"varid": 4, "vartype": 5, "vargroup": "imu",
"varname": "acc_y", "min":-1000,
"max": 1000, "mod": 10})
self.fakeLogToc.append({"varid": 5, "vartype": 5, "vargroup": "imu",
"varname": "acc_z", "min":-1000,
"max": 1000, "mod": 20})
self.fakeLogToc.append({"varid": 6, "vartype": 7,
"vargroup": "stabilizer", "varname": "roll",
"min":-90, "max": 90, "mod": 2})
self.fakeLogToc.append({"varid": 7, "vartype": 7,
"vargroup": "stabilizer", "varname": "pitch",
"min":-90, "max": 90, "mod": 1.5})
self.fakeLogToc.append({"varid": 8, "vartype": 7,
"vargroup": "stabilizer", "varname": "yaw",
"min":-90, "max": 90, "mod": 2.5})
self.fakeLogToc.append({"varid": 9, "vartype": 7, "vargroup": "pm",
"varname": "vbat", "min": 3.0,
"max": 4.2, "mod": 0.1})
self.fakeLogToc.append({"varid": 10, "vartype": 6, "vargroup": "motor",
"varname": "m1", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 11, "vartype": 6, "vargroup": "motor",
"varname": "m2", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 12, "vartype": 6, "vargroup": "motor",
"varname": "m3", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 13, "vartype": 6, "vargroup": "motor",
"varname": "m4", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 14, "vartype": 2,
"vargroup": "stabilizer", "varname": "thrust",
"min": 0, "max": 65000, "mod": 1000})
self.fakeLogToc.append({"varid": 15, "vartype": 7,
"vargroup": "baro", "varname": "asl",
"min": 540, "max": 545, "mod": 0.5})
self.fakeLogToc.append({"varid": 16, "vartype": 7,
"vargroup": "baro", "varname": "aslRaw",
"min": 540, "max": 545, "mod": 1.0})
self.fakeLogToc.append({"varid": 17, "vartype": 7,
"vargroup": "baro", "varname": "aslLong",
"min": 540, "max": 545, "mod": 0.5})
self.fakeLogToc.append({"varid": 18, "vartype": 7,
"vargroup": "baro", "varname": "temp",
"min": 26, "max": 38, "mod": 1.0})
self.fakeLogToc.append({"varid": 19, "vartype": 7,
"vargroup": "altHold", "varname": "target",
"min": 542, "max": 543, "mod": 0.1})
self.fakeLogToc.append({"varid": 20, "vartype": 6,
"vargroup": "gps", "varname": "lat",
"min": 556112190, "max": 556112790,
"mod": 10})
self.fakeLogToc.append({"varid": 21, "vartype": 6,
"vargroup": "gps", "varname": "lon",
"min": 129945110, "max": 129945710,
"mod": 10})
self.fakeLogToc.append({"varid": 22, "vartype": 6,
"vargroup": "gps", "varname": "hMSL",
"min": 0, "max": 100000,
"mod": 1000})
self.fakeLogToc.append({"varid": 23, "vartype": 6,
"vargroup": "gps", "varname": "heading",
"min": -10000000, "max": 10000000,
"mod": 100000})
self.fakeLogToc.append({"varid": 24, "vartype": 6,
"vargroup": "gps", "varname": "gSpeed",
"min": 0, "max": 1000,
"mod": 100})
self.fakeLogToc.append({"varid": 25, "vartype": 3,
"vargroup": "gps", "varname": "hAcc",
"min": 0, "max": 5000,
"mod": 100})
self.fakeLogToc.append({"varid": 26, "vartype": 1,
"vargroup": "gps", "varname": "fixType",
"min": 0, "max": 5,
"mod": 1})
# Fill up the fake logging TOC with values and data
self.fakeParamToc = []
self.fakeParamToc.append({"varid": 0, "vartype": 0x08,
"vargroup": "blah", "varname": "p",
"writable": True, "value": 100})
self.fakeParamToc.append({"varid": 1, "vartype": 0x0A,
"vargroup": "info", "varname": "cid",
"writable": False, "value": 1234})
self.fakeParamToc.append({"varid": 2, "vartype": 0x06,
"vargroup": "rpid", "varname": "prp",
"writable": True, "value": 1.5})
self.fakeParamToc.append({"varid": 3, "vartype": 0x06,
"vargroup": "rpid", "varname": "pyaw",
"writable": True, "value": 2.5})
self.fakeParamToc.append({"varid": 4, "vartype": 0x06,
"vargroup": "rpid", "varname": "irp",
"writable": True, "value": 3.5})
self.fakeParamToc.append({"varid": 5, "vartype": 0x06,
"vargroup": "rpid", "varname": "iyaw",
"writable": True, "value": 4.5})
self.fakeParamToc.append({"varid": 6, "vartype": 0x06,
"vargroup": "rpid", "varname": "drp",
"writable": True, "value": 5.5})
self.fakeParamToc.append({"varid": 7, "vartype": 0x06,
"vargroup": "rpid", "varname": "dyaw",
"writable": True, "value": 6.5})
self.fakeParamToc.append({"varid": 8, "vartype": 0x06,
"vargroup": "apid", "varname": "prp",
"writable": True, "value": 7.5})
self.fakeParamToc.append({"varid": 9, "vartype": 0x06,
"vargroup": "apid", "varname": "pyaw",
"writable": True, "value": 8.5})
self.fakeParamToc.append({"varid": 10, "vartype": 0x06,
"vargroup": "apid", "varname": "irp",
"writable": True, "value": 9.5})
self.fakeParamToc.append({"varid": 11, "vartype": 0x06,
"vargroup": "apid", "varname": "iyaw",
"writable": True, "value": 10.5})
self.fakeParamToc.append({"varid": 12, "vartype": 0x06,
"vargroup": "apid", "varname": "drp",
"writable": True, "value": 11.5})
self.fakeParamToc.append({"varid": 13, "vartype": 0x06,
"vargroup": "apid", "varname": "dyaw",
"writable": True, "value": 12.5})
self.fakeParamToc.append({"varid": 14, "vartype": 0x08,
"vargroup": "flightctrl",
"varname": "xmode", "writable": True,
"value": 1})
self.fakeParamToc.append({"varid": 15, "vartype": 0x08,
"vargroup": "flightctrl",
"varname": "ratepid", "writable": True,
"value": 1})
self.fakeParamToc.append({"varid": 16, "vartype": 0x08,
"vargroup": "imu_sensors",
"varname": "HMC5883L", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 17, "vartype": 0x08,
"vargroup": "imu_sensors",
"varname": "MS5611", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 18, "vartype": 0x0A,
"vargroup": "firmware",
"varname": "revision0", "writable": False,
"value": 0xdeb})
self.fakeParamToc.append({"varid": 19, "vartype": 0x09,
"vargroup": "firmware",
"varname": "revision1", "writable": False,
"value": 0x99})
self.fakeParamToc.append({"varid": 20, "vartype": 0x08,
"vargroup": "firmware",
"varname": "modified", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 21, "vartype": 0x08,
"vargroup": "imu_tests",
"varname": "MPU6050", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 22, "vartype": 0x08,
"vargroup": "imu_tests",
"varname": "HMC5883L", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 23, "vartype": 0x08,
"vargroup": "imu_tests",
"varname": "MS5611", "writable": False,
"value": 1})
self.fakeflash = {}
self._random_answer_delay = True
self.queue = Queue.Queue()
self._packet_handler = _PacketHandlingThread(self.queue,
self.fakeLogToc,
self.fakeParamToc)
self._packet_handler.start()
def scan_interface(self):
return [["debug://0/0", "Normal connection"],
["debug://0/1", "Fail to connect"],
["debug://0/2", "Incomplete log TOC download"],
["debug://0/3", "Insert random delays on replies"],
["debug://0/4", "Insert random delays on replies and random TOC CRCs"],
["debug://0/5", "Normal but random TOC CRCs"]]
def get_status(self):
return "Ok"
def get_name(self):
return "debug"
def connect(self, uri, linkQualityCallback, linkErrorCallback):
if not re.search("^debug://", uri):
raise WrongUriType("Not a debug URI")
self._packet_handler.linkErrorCallback = linkErrorCallback
self._packet_handler.linkQualityCallback = linkQualityCallback
# Debug-options for this driver that
# is set by using different connection URIs
self._packet_handler.inhibitAnswers = False
self._packet_handler.doIncompleteLogTOC = False
self._packet_handler.bootloader = False
self._packet_handler._random_answer_delay = False
self._packet_handler._random_toc_crcs = False
if (re.search("^debug://.*/1\Z", uri)):
self._packet_handler.inhibitAnswers = True
if (re.search("^debug://.*/110\Z", uri)):
self._packet_handler.bootloader = True
if (re.search("^debug://.*/2\Z", uri)):
self._packet_handler.doIncompleteLogTOC = True
if (re.search("^debug://.*/3\Z", uri)):
self._packet_handler._random_answer_delay = True
if (re.search("^debug://.*/4\Z", uri)):
self._packet_handler._random_answer_delay = True
self._packet_handler._random_toc_crcs = True
if (re.search("^debug://.*/5\Z", uri)):
self._packet_handler._random_toc_crcs = True
self.fakeConsoleThread = None
if (not self._packet_handler.inhibitAnswers and not self._packet_handler.bootloader):
self.fakeConsoleThread = FakeConsoleThread(self.queue)
self.fakeConsoleThread.start()
if (self._packet_handler.linkQualityCallback is not None):
self._packet_handler.linkQualityCallback(0)
def receive_packet(self, time=0):
if time == 0:
try:
return self.queue.get(False)
except Queue.Empty:
return None
elif time < 0:
try:
return self.queue.get(True)
except Queue.Empty:
return None
else:
try:
return self.queue.get(True, time)
except Queue.Empty:
return None
def send_packet(self, pk):
self._packet_handler.handle_packet(pk)
def close(self):
logger.info("Closing debugdriver")
for f in self._packet_handler.fakeLoggingThreads:
f.stop()
if self.fakeConsoleThread:
self.fakeConsoleThread.stop()
class _PacketHandlingThread(Thread):
"""Thread for handling packets asynchronously"""
def __init__(self, out_queue, fake_log_toc, fake_param_toc):
Thread.__init__(self)
self.setDaemon(True)
self.queue = out_queue
self.fakeLogToc = fake_log_toc
self.fakeParamToc = fake_param_toc
self._in_queue = Queue.Queue()
self.inhibitAnswers = False
self.doIncompleteLogTOC = False
self.bootloader = False
self._random_answer_delay = False
self._random_toc_crcs = False
self.linkErrorCallback = None
self.linkQualityCallback = None
random.seed(None)
self.fakeLoggingThreads = []
self._added_blocks = []
self.nowAnswerCounter = 4
def handle_packet(self, pk):
self._in_queue.put(pk)
def run(self):
while (True):
pk = self._in_queue.get(True)
if (self.inhibitAnswers):
self.nowAnswerCounter = self.nowAnswerCounter - 1
logger.debug("Not answering with any data, will send link errori"
" in %d retries", self.nowAnswerCounter)
if (self.nowAnswerCounter == 0):
self.linkErrorCallback("Nothing is answering, and it"
" shouldn't")
else:
if (pk.port == 0xFF):
self._handle_bootloader(pk)
elif (pk.port == CRTPPort.DEBUGDRIVER):
self._handle_debugmessage(pk)
elif (pk.port == CRTPPort.COMMANDER):
pass
elif (pk.port == CRTPPort.LOGGING):
self._handle_logging(pk)
elif (pk.port == CRTPPort.PARAM):
self.handleParam(pk)
else:
logger.warning("Not handling incomming packets on port [%d]",
pk.port)
def _handle_bootloader(self, pk):
cmd = pk.datal[1]
if (cmd == 0x10): # Request info about copter
p = CRTPPacket()
p.set_header(0xFF, 0xFF)
pageSize = 1024
buffPages = 10
flashPages = 100
flashStart = 1
p.data = struct.pack('<BBHHHH', 0xFF, 0x10, pageSize, buffPages,
flashPages, flashStart)
p.data += struct.pack('B' * 12, 0xA0A1A2A3A4A5)
self._send_packet(p)
logging.info("Bootloader: Sending info back info")
elif (cmd == 0x14): # Upload buffer
[page, addr] = struct.unpack('<HH', p.data[0:4])
elif (cmd == 0x18): # Flash page
p = CRTPPacket()
p.set_header(0xFF, 0xFF)
p.data = struct.pack('<BBH', 0xFF, 0x18, 1)
self._send_packet(p)
elif (cmd == 0xFF): # Reset to firmware
logger.info("Bootloader: Got reset command")
else:
logger.warning("Bootloader: Unknown command 0x%02X", cmd)
def _handle_debugmessage(self, pk):
if (pk.channel == 0):
cmd = struct.unpack("B", pk.data[0])[0]
if (cmd == 0): # Fake link quality
newLinkQuality = struct.unpack("B", pk.data[1])[0]
self.linkQualityCallback(newLinkQuality)
elif (cmd == 1):
self.linkErrorCallback("DebugDriver was forced to disconnect!")
else:
logger.warning("Debug port: Not handling cmd=%d on channel 0",
cmd)
else:
logger.warning("Debug port: Not handling channel=%d",
pk.channel)
def _handle_toc_access(self, pk):
chan = pk.channel
cmd = struct.unpack("B", pk.data[0])[0]
logger.info("TOC access on port %d", pk.port)
if (chan == 0): # TOC Access
cmd = struct.unpack("B", pk.data[0])[0]
if (cmd == 0): # Reqest variable info
p = CRTPPacket()
p.set_header(pk.port, 0)
varIndex = 0
if (len(pk.data) > 1):
varIndex = struct.unpack("B", pk.data[1])[0]
logger.debug("TOC[%d]: Requesting ID=%d", pk.port,
varIndex)
else:
logger.debug("TOC[%d]: Requesting first index..surprise,"
" it 0 !", pk.port)
if (pk.port == CRTPPort.LOGGING):
l = self.fakeLogToc[varIndex]
if (pk.port == CRTPPort.PARAM):
l = self.fakeParamToc[varIndex]
vartype = l["vartype"]
if (pk.port == CRTPPort.PARAM and l["writable"] is True):
vartype = vartype | (0x10)
p.data = struct.pack("<BBB", cmd, l["varid"], vartype)
for ch in l["vargroup"]:
p.data += ch
p.data += '\0'
for ch in l["varname"]:
p.data += ch
p.data += '\0'
if (self.doIncompleteLogTOC is False):
self._send_packet(p)
elif (varIndex < 5):
self._send_packet(p)
else:
logger.info("TOC: Doing incomplete TOC, stopping after"
" varIndex => 5")
if (cmd == 1): # TOC CRC32 request
fakecrc = 0
if (pk.port == CRTPPort.LOGGING):
tocLen = len(self.fakeLogToc)
fakecrc = 0xAAAAAAAA
if (pk.port == CRTPPort.PARAM):
tocLen = len(self.fakeParamToc)
fakecrc = 0xBBBBBBBB
if self._random_toc_crcs:
fakecrc = int(''.join(random.choice("ABCDEF" + string.digits) for x in range(8)), 16)
logger.debug("Generated random TOC CRC: 0x%x", fakecrc)
logger.info("TOC[%d]: Requesting TOC CRC, sending back fake"
" stuff: %d", pk.port, len(self.fakeLogToc))
p = CRTPPacket()
p.set_header(pk.port, 0)
p.data = struct.pack('<BBIBB', 1, tocLen, fakecrc, 16, 24)
self._send_packet(p)
def handleParam(self, pk):
chan = pk.channel
cmd = struct.unpack("B", pk.data[0])[0]
logger.debug("PARAM: Port=%d, Chan=%d, cmd=%d", pk.port,
chan, cmd)
if (chan == 0): # TOC Access
self._handle_toc_access(pk)
elif (chan == 2): # Settings access
varId = pk.datal[0]
formatStr = ParamTocElement.types[self.fakeParamToc
[varId]["vartype"]][1]
newvalue = struct.unpack(formatStr, pk.data[1:])[0]
self.fakeParamToc[varId]["value"] = newvalue
logger.info("PARAM: New value [%s] for param [%d]", newvalue,
varId)
# Send back the new value
p = CRTPPacket()
p.set_header(pk.port, 2)
p.data += struct.pack("<B", varId)
p.data += struct.pack(formatStr, self.fakeParamToc[varId]["value"])
self._send_packet(p)
elif (chan == 1):
p = CRTPPacket()
p.set_header(pk.port, 1)
varId = cmd
p.data += struct.pack("<B", varId)
formatStr = ParamTocElement.types[self.fakeParamToc
[varId]["vartype"]][1]
p.data += struct.pack(formatStr, self.fakeParamToc[varId]["value"])
logger.info("PARAM: Getting value for %d", varId)
self._send_packet(p)
def _handle_logging(self, pk):
chan = pk.channel
cmd = struct.unpack("B", pk.data[0])[0]
logger.debug("LOG: Chan=%d, cmd=%d", chan, cmd)
if (chan == 0): # TOC Access
self._handle_toc_access(pk)
elif (chan == 1): # Settings access
if (cmd == 0):
blockId = ord(pk.data[1])
if blockId not in self._added_blocks:
self._added_blocks.append(blockId)
logger.info("LOG:Adding block id=%d", blockId)
listofvars = pk.data[3:]
fakeThread = _FakeLoggingDataThread(self.queue, blockId,
listofvars,
self.fakeLogToc)
self.fakeLoggingThreads.append(fakeThread)
fakeThread.start()
# Anser that everything is ok
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', 0, blockId, 0x00)
self._send_packet(p)
else:
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', 0, blockId, errno.EEXIST)
self._send_packet(p)
if (cmd == 1):
logger.warning("LOG: Appending block not implemented!")
if (cmd == 2):
blockId = ord(pk.data[1])
logger.info("LOG: Should delete block %d", blockId)
success = False
for fb in self.fakeLoggingThreads:
if (fb.blockId == blockId):
fb._disable_logging()
fb.stop()
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, blockId, 0x00)
self._send_packet(p)
logger.info("LOG: Deleted block=%d", blockId)
success = True
if (success is False):
logger.warning("LOG: Could not delete block=%d, not found",
blockId)
# TODO: Send back error code
if (cmd == 3):
blockId = ord(pk.data[1])
period = ord(pk.data[2]) * 10 # Sent as multiple of 10 ms
logger.info("LOG:Starting block %d", blockId)
success = False
for fb in self.fakeLoggingThreads:
if (fb.blockId == blockId):
fb._enable_logging()
fb.period = period
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, blockId, 0x00)
self._send_packet(p)
logger.info("LOG:Started block=%d", blockId)
success = True
if (success is False):
logger.info("LOG:Could not start block=%d, not found",
blockId)
# TODO: Send back error code
if (cmd == 4):
blockId = ord(pk.data[1])
logger.info("LOG:Pausing block %d", blockId)
success = False
for fb in self.fakeLoggingThreads:
if (fb.blockId == blockId):
fb._disable_logging()
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, blockId, 0x00)
self._send_packet(p)
logger.info("LOG:Pause block=%d", blockId)
success = True
if (success is False):
logger.warning("LOG:Could not pause block=%d, not found",
blockId)
# TODO: Send back error code
if (cmd == 5):
logger.info("LOG: Reset logging, but doing nothing")
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, 0x00, 0x00)
self._send_packet(p)
import traceback
logger.info(traceback.format_exc())
elif (chan > 1):
logger.warning("LOG: Uplink packets with channes > 1 not"
" supported!")
def _send_packet(self, pk):
# Do not delay log data
if self._random_answer_delay and pk.port != 0x05 and pk.channel != 0x02:
# Calculate a delay between 0ms and 250ms
delay = random.randint(0, 250)/1000.0
logger.debug("Delaying answer %.2fms", delay*1000)
time.sleep(delay)
self.queue.put(pk)
class _FakeLoggingDataThread (Thread):
"""Thread that will send back fake logging data via CRTP"""
def __init__(self, outQueue, blockId, listofvars, fakeLogToc):
Thread.__init__(self)
self.starttime = datetime.now()
self.outQueue = outQueue
self.setDaemon(True)
self.mod = 0
self.blockId = blockId
self.period = 0
self.listofvars = listofvars
self.shouldLog = False
self.fakeLogToc = fakeLogToc
self.fakeLoggingData = []
self.setName("Fakelog block=%d" % blockId)
self.shouldQuit = False
logging.info("FakeDataLoggingThread created for blockid=%d", blockId)
i = 0
while (i < len(listofvars)):
varType = ord(listofvars[i])
var_stored_as = (varType >> 8)
var_fetch_as = (varType & 0xFF)
if (var_stored_as > 0):
addr = struct.unpack("<I", listofvars[i + 1:i + 5])
logger.debug("FakeLoggingThread: We should log a memory addr"
" 0x%04X", addr)
self.fakeLoggingData.append([memlogging[var_fetch_as],
memlogging[var_fetch_as]["min"],
1])
i = i + 5
else:
varId = ord(listofvars[i])
logger.debug("FakeLoggingThread: We sould log variable from"
" TOC: id=%d, type=0x%02X", varId, varType)
for t in self.fakeLogToc:
if (varId == t["varid"]):
# Each touple will have var data and current fake value
self.fakeLoggingData.append([t, t["min"], 1])
i = i + 2
def _enable_logging(self):
self.shouldLog = True
logging.info("_FakeLoggingDataThread: Enable thread [%s] at period %d",
self.getName(), self.period)
def _disable_logging(self):
self.shouldLog = False
logging.info("_FakeLoggingDataThread: Disable thread [%s]",
self.getName())
def stop(self):
self.shouldQuit = True
def run(self):
while(self.shouldQuit is False):
if (self.shouldLog is True):
p = CRTPPacket()
p.set_header(5, 2)
p.data = struct.pack('<B', self.blockId)
timestamp = int((datetime.now()-self.starttime).total_seconds()*1000)
p.data += struct.pack('BBB', timestamp&0xff, (timestamp>>8)&0x0ff, (timestamp>>16)&0x0ff) # Timestamp
for d in self.fakeLoggingData:
# Set new value
d[1] = d[1] + d[0]["mod"] * d[2]
# Obej the limitations
if (d[1] > d[0]["max"]):
d[1] = d[0]["max"] # Limit value
d[2] = -1 # Switch direction
if (d[1] < d[0]["min"]):
d[1] = d[0]["min"] # Limit value
d[2] = 1 # Switch direction
# Pack value
formatStr = LogTocElement.types[d[0]["vartype"]][1]
p.data += struct.pack(formatStr, d[1])
self.outQueue.put(p)
time.sleep(self.period / 1000.0) # Period in ms here
class FakeConsoleThread (Thread):
"""Thread that will send back fake console data via CRTP"""
def __init__(self, outQueue):
Thread.__init__(self)
self.outQueue = outQueue
self.setDaemon(True)
self._should_run = True
def stop(self):
self._shoud_run = False
def run(self):
# Temporary hack to test GPS from firmware by sending NMEA string on
# console
long_val = 0
lat_val = 0
alt_val = 0
while(self._should_run):
long_val += 1
lat_val += 1
alt_val += 1.0
long_string = "5536.677%d" % (long_val % 99)
lat_string = "01259.645%d" % (lat_val % 99)
alt_string = "%.1f" % (alt_val % 100.0)
# Copy of what is sent from the module, but note that only
# the GPGGA message is being simulated, the others are fixed...
self._send_text("Time is now %s\n" % datetime.now())
self._send_text("$GPVTG,,T,,M,0.386,N,0.716,K,A*2E\n")
self._send_text("$GPGGA,135544.0")
self._send_text("0,%s,N,%s,E,1,04,2.62,3.6,M,%s,M,,*58\n" % (long_string, lat_string, alt_string))
self._send_text("$GPGSA,A,3,31,20,23,07,,,,,,,,,3.02,2.62,1.52*05\n")
self._send_text("$GPGSV,2,1,07,07,09,181,15,13,63,219,26,16,02,097,,17,05,233,20*7E\n")
self._send_text("$GPGSV,2,2,07,20,42,119,35,23,77,097,27,31,12,032,19*47\n")
self._send_text("$GPGLL,5536.67734,N,01259.64578,E,135544.00,A,A*68\n")
time.sleep(2)
def _send_text(self, message):
p = CRTPPacket()
p.set_header(0, 0)
us = "%is" % len(message)
# This might be done prettier ;-)
p.data = struct.pack(us, message)
self.outQueue.put(p)
| gpl-2.0 | -3,420,640,992,246,351,000 | -4,709,938,057,984,391,000 | 44.770833 | 118 | 0.463217 | false |
ehashman/oh-mainline | vendor/packages/gdata/samples/apps/adminsettings_example.py | 41 | 5677 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a Sample for Google Apps Admin Settings.
AdminSettingsSample: shows everything you ever wanted to know about
your Google Apps Domain but were afraid to ask.
"""
__author__ = '[email protected]'
import getopt
import getpass
import sys
import time
import gdata.apps.service
import gdata.apps.adminsettings.service
class AdminSettingsSample(object):
"""AdminSettingsSample object demos Admin Settings API."""
def __init__(self, email, password, domain):
"""Constructor for the AdminSettingsSample object.
Takes an email and password corresponding to a google apps admin
account to demon the Admin Settings API.
Args:
email: [string] The e-mail address of the account to use for the sample.
password: [string] The password corresponding to the account specified by
the email parameter.
domain: [string] The domain for the Profiles feed
"""
self.gd_client = gdata.apps.adminsettings.service.AdminSettingsService()
self.gd_client.domain = domain
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'GoogleInc-AdminSettingsPythonSample-1'
self.gd_client.ProgrammaticLogin()
def Run(self):
#pause 1 sec inbetween calls to prevent quota warning
print 'Google Apps Domain: ', self.gd_client.domain
time.sleep(1)
print 'Default Language: ', self.gd_client.GetDefaultLanguage()
time.sleep(1)
print 'Organization Name: ', self.gd_client.GetOrganizationName()
time.sleep(1)
print 'Maximum Users: ', self.gd_client.GetMaximumNumberOfUsers()
time.sleep(1)
print 'Current Users: ', self.gd_client.GetCurrentNumberOfUsers()
time.sleep(1)
print 'Domain is Verified: ',self.gd_client.IsDomainVerified()
time.sleep(1)
print 'Support PIN: ',self.gd_client.GetSupportPIN()
time.sleep(1)
print 'Domain Edition: ', self.gd_client.GetEdition()
time.sleep(1)
print 'Customer PIN: ', self.gd_client.GetCustomerPIN()
time.sleep(1)
print 'Domain Creation Time: ', self.gd_client.GetCreationTime()
time.sleep(1)
print 'Domain Country Code: ', self.gd_client.GetCountryCode()
time.sleep(1)
print 'Admin Secondary Email: ', self.gd_client.GetAdminSecondaryEmail()
time.sleep(1)
cnameverificationstatus = self.gd_client.GetCNAMEVerificationStatus()
print 'CNAME Verification Record Name: ', cnameverificationstatus['recordName']
print 'CNAME Verification Verified: ', cnameverificationstatus['verified']
print 'CNAME Verification Method: ', cnameverificationstatus['verificationMethod']
time.sleep(1)
mxverificationstatus = self.gd_client.GetMXVerificationStatus()
print 'MX Verification Verified: ', mxverificationstatus['verified']
print 'MX Verification Method: ', mxverificationstatus['verificationMethod']
time.sleep(1)
ssosettings = self.gd_client.GetSSOSettings()
print 'SSO Enabled: ', ssosettings['enableSSO']
print 'SSO Signon Page: ', ssosettings['samlSignonUri']
print 'SSO Logout Page: ', ssosettings['samlLogoutUri']
print 'SSO Password Page: ', ssosettings['changePasswordUri']
print 'SSO Whitelist IPs: ', ssosettings['ssoWhitelist']
print 'SSO Use Domain Specific Issuer: ', ssosettings['useDomainSpecificIssuer']
time.sleep(1)
ssokey = self.gd_client.GetSSOKey()
print 'SSO Key Modulus: ', ssokey['modulus']
print 'SSO Key Exponent: ', ssokey['exponent']
print 'SSO Key Algorithm: ', ssokey['algorithm']
print 'SSO Key Format: ', ssokey['format']
print 'User Migration Enabled: ', self.gd_client.IsUserMigrationEnabled()
time.sleep(1)
outboundgatewaysettings = self.gd_client.GetOutboundGatewaySettings()
print 'Outbound Gateway Smart Host: ', outboundgatewaysettings['smartHost']
print 'Outbound Gateway Mode: ', outboundgatewaysettings['smtpMode']
def main():
"""Demonstrates use of the Admin Settings API using the AdminSettingsSample object."""
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=', 'domain='])
except getopt.error, msg:
print 'python adminsettings_example.py --user [username] --pw [password]'
print ' --domain [domain]'
sys.exit(2)
user = ''
pw = ''
domain = ''
# Process options
for option, arg in opts:
if option == '--user':
user = arg
elif option == '--pw':
pw = arg
elif option == '--domain':
domain = arg
while not domain:
print 'NOTE: Please run these tests only with a test account.'
domain = raw_input('Please enter your apps domain: ')
while not user:
user = raw_input('Please enter a administrator account: ')+'@'+domain
while not pw:
pw = getpass.getpass('Please enter password: ')
if not pw:
print 'Password cannot be blank.'
try:
sample = AdminSettingsSample(user, pw, domain)
except gdata.service.BadAuthentication:
print 'Invalid user credentials given.'
return
sample.Run()
if __name__ == '__main__':
main()
| agpl-3.0 | 3,154,682,050,310,571,000 | 1,739,991,808,630,697,500 | 36.596026 | 88 | 0.703012 | false |
waynesun09/virt-test | virttest/remote.py | 1 | 44950 | """
Functions and classes used for logging into guests and transferring files.
"""
import logging
import time
import re
import os
import shutil
import tempfile
import aexpect
import utils_misc
import rss_client
import base64
from remote_commander import remote_master
from remote_commander import messenger
from autotest.client.shared import error
from autotest.client import utils
import data_dir
class LoginError(Exception):
def __init__(self, msg, output):
Exception.__init__(self, msg, output)
self.msg = msg
self.output = output
def __str__(self):
return "%s (output: %r)" % (self.msg, self.output)
class LoginAuthenticationError(LoginError):
pass
class LoginTimeoutError(LoginError):
def __init__(self, output):
LoginError.__init__(self, "Login timeout expired", output)
class LoginProcessTerminatedError(LoginError):
def __init__(self, status, output):
LoginError.__init__(self, None, output)
self.status = status
def __str__(self):
return ("Client process terminated (status: %s, output: %r)" %
(self.status, self.output))
class LoginBadClientError(LoginError):
def __init__(self, client):
LoginError.__init__(self, None, None)
self.client = client
def __str__(self):
return "Unknown remote shell client: %r" % self.client
class SCPError(Exception):
def __init__(self, msg, output):
Exception.__init__(self, msg, output)
self.msg = msg
self.output = output
def __str__(self):
return "%s (output: %r)" % (self.msg, self.output)
class SCPAuthenticationError(SCPError):
pass
class SCPAuthenticationTimeoutError(SCPAuthenticationError):
def __init__(self, output):
SCPAuthenticationError.__init__(self, "Authentication timeout expired",
output)
class SCPTransferTimeoutError(SCPError):
def __init__(self, output):
SCPError.__init__(self, "Transfer timeout expired", output)
class SCPTransferFailedError(SCPError):
def __init__(self, status, output):
SCPError.__init__(self, None, output)
self.status = status
def __str__(self):
return ("SCP transfer failed (status: %s, output: %r)" %
(self.status, self.output))
def handle_prompts(session, username, password, prompt, timeout=10,
debug=False):
"""
Connect to a remote host (guest) using SSH or Telnet or else.
Wait for questions and provide answers. If timeout expires while
waiting for output from the child (e.g. a password prompt or
a shell prompt) -- fail.
:param session: An Expect or ShellSession instance to operate on
:param username: The username to send in reply to a login prompt
:param password: The password to send in reply to a password prompt
:param prompt: The shell prompt that indicates a successful login
:param timeout: The maximal time duration (in seconds) to wait for each
step of the login procedure (i.e. the "Are you sure" prompt, the
password prompt, the shell prompt, etc)
:raise LoginTimeoutError: If timeout expires
:raise LoginAuthenticationError: If authentication fails
:raise LoginProcessTerminatedError: If the client terminates during login
:raise LoginError: If some other error occurs
"""
password_prompt_count = 0
login_prompt_count = 0
while True:
try:
match, text = session.read_until_last_line_matches(
[r"[Aa]re you sure", r"[Pp]assword:\s*",
r"\(or (press|type) Control-D to continue\):\s*$", # Prompt of rescue mode for Red Hat.
r"[Gg]ive.*[Ll]ogin:\s*$", # Prompt of rescue mode for SUSE.
r"(?<![Ll]ast).*[Ll]ogin:\s*$", # Don't match "Last Login:"
r"[Cc]onnection.*closed", r"[Cc]onnection.*refused",
r"[Pp]lease wait", r"[Ww]arning", r"[Ee]nter.*username",
r"[Ee]nter.*password", prompt],
timeout=timeout, internal_timeout=0.5)
if match == 0: # "Are you sure you want to continue connecting"
if debug:
logging.debug("Got 'Are you sure...', sending 'yes'")
session.sendline("yes")
continue
elif match in [1, 2, 3, 10]: # "password:"
if password_prompt_count == 0:
if debug:
logging.debug("Got password prompt, sending '%s'",
password)
session.sendline(password)
password_prompt_count += 1
continue
else:
raise LoginAuthenticationError("Got password prompt twice",
text)
elif match == 4 or match == 9: # "login:"
if login_prompt_count == 0 and password_prompt_count == 0:
if debug:
logging.debug("Got username prompt; sending '%s'",
username)
session.sendline(username)
login_prompt_count += 1
continue
else:
if login_prompt_count > 0:
msg = "Got username prompt twice"
else:
msg = "Got username prompt after password prompt"
raise LoginAuthenticationError(msg, text)
elif match == 5: # "Connection closed"
raise LoginError("Client said 'connection closed'", text)
elif match == 6: # "Connection refused"
raise LoginError("Client said 'connection refused'", text)
elif match == 7: # "Please wait"
if debug:
logging.debug("Got 'Please wait'")
timeout = 30
continue
elif match == 8: # "Warning added RSA"
if debug:
logging.debug("Got 'Warning added RSA to known host list")
continue
elif match == 11: # prompt
if debug:
logging.debug("Got shell prompt -- logged in")
break
except aexpect.ExpectTimeoutError, e:
raise LoginTimeoutError(e.output)
except aexpect.ExpectProcessTerminatedError, e:
raise LoginProcessTerminatedError(e.status, e.output)
def remote_login(client, host, port, username, password, prompt, linesep="\n",
log_filename=None, timeout=10, interface=None):
"""
Log into a remote host (guest) using SSH/Telnet/Netcat.
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param host: Hostname or IP address
:param port: Port to connect to
:param username: Username (if required)
:param password: Password (if required)
:param prompt: Shell prompt (regular expression)
:param linesep: The line separator to use when sending lines
(e.g. '\\n' or '\\r\\n')
:param log_filename: If specified, log all output to this file
:param timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt
or the password prompt)
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise LoginError: If using ipv6 linklocal but not assign a interface that
the neighbour attache
:raise LoginBadClientError: If an unknown client is requested
:raise: Whatever handle_prompts() raises
:return: A ShellSession object.
"""
if host and host.lower().startswith("fe80"):
if not interface:
raise LoginError("When using ipv6 linklocal an interface must "
"be assigned")
host = "%s%%%s" % (host, interface)
if client == "ssh":
cmd = ("ssh -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -p %s %s@%s" %
(port, username, host))
elif client == "telnet":
cmd = "telnet -l %s %s %s" % (username, host, port)
elif client == "nc":
cmd = "nc %s %s" % (host, port)
else:
raise LoginBadClientError(client)
logging.debug("Login command: '%s'", cmd)
session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
try:
handle_prompts(session, username, password, prompt, timeout)
except Exception:
session.close()
raise
if log_filename:
session.set_output_func(utils_misc.log_line)
session.set_output_params((log_filename,))
session.set_log_file(log_filename)
return session
class AexpectIOWrapperOut(messenger.StdIOWrapperOutBase64):
"""
Basic implementation of IOWrapper for stdout
"""
def close(self):
self._obj.close()
def fileno(self):
return os.open(self._obj, os.O_RDWR)
def write(self, data):
self._obj.send(data)
def remote_commander(client, host, port, username, password, prompt,
linesep="\n", log_filename=None, timeout=10, path=None):
"""
Log into a remote host (guest) using SSH/Telnet/Netcat.
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param host: Hostname or IP address
:param port: Port to connect to
:param username: Username (if required)
:param password: Password (if required)
:param prompt: Shell prompt (regular expression)
:param linesep: The line separator to use when sending lines
(e.g. '\\n' or '\\r\\n')
:param log_filename: If specified, log all output to this file
:param timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt
or the password prompt)
:param path: The path to place where remote_runner.py is placed.
:raise LoginBadClientError: If an unknown client is requested
:raise: Whatever handle_prompts() raises
:return: A ShellSession object.
"""
if path is None:
path = "/tmp"
if client == "ssh":
cmd = ("ssh -o UserKnownHostsFile=/dev/null "
"-o PreferredAuthentications=password "
"-p %s %s@%s %s agent_base64" %
(port, username, host, os.path.join(path, "remote_runner.py")))
elif client == "telnet":
cmd = "telnet -l %s %s %s" % (username, host, port)
elif client == "nc":
cmd = "nc %s %s" % (host, port)
else:
raise LoginBadClientError(client)
logging.debug("Login command: '%s'", cmd)
session = aexpect.Expect(cmd, linesep=linesep)
try:
handle_prompts(session, username, password, prompt, timeout)
except Exception:
session.close()
raise
if log_filename:
session.set_output_func(utils_misc.log_line)
session.set_output_params((log_filename,))
session.set_log_file(log_filename)
session.send_ctrl("raw")
# Wrap io interfaces.
inw = messenger.StdIOWrapperInBase64(session._get_fd("tail"))
outw = AexpectIOWrapperOut(session)
# Create commander
cmd = remote_master.CommanderMaster(inw, outw, False)
return cmd
def wait_for_login(client, host, port, username, password, prompt,
linesep="\n", log_filename=None, timeout=240,
internal_timeout=10, interface=None):
"""
Make multiple attempts to log into a guest until one succeeds or timeouts.
:param timeout: Total time duration to wait for a successful login
:param internal_timeout: The maximum time duration (in seconds) to wait for
each step of the login procedure (e.g. the
"Are you sure" prompt or the password prompt)
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:see: remote_login()
:raise: Whatever remote_login() raises
:return: A ShellSession object.
"""
logging.debug("Attempting to log into %s:%s using %s (timeout %ds)",
host, port, client, timeout)
end_time = time.time() + timeout
while time.time() < end_time:
try:
return remote_login(client, host, port, username, password, prompt,
linesep, log_filename, internal_timeout,
interface)
except LoginError, e:
logging.debug(e)
time.sleep(2)
# Timeout expired; try one more time but don't catch exceptions
return remote_login(client, host, port, username, password, prompt,
linesep, log_filename, internal_timeout, interface)
def _remote_scp(session, password_list, transfer_timeout=600, login_timeout=20):
"""
Transfer files using SCP, given a command line.
Transfer file(s) to a remote host (guest) using SCP. Wait for questions
and provide answers. If login_timeout expires while waiting for output
from the child (e.g. a password prompt), fail. If transfer_timeout expires
while waiting for the transfer to complete, fail.
:param session: An Expect or ShellSession instance to operate on
:param password_list: Password list to send in reply to the password prompt
:param transfer_timeout: The time duration (in seconds) to wait for the
transfer to complete.
:param login_timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt or
the password prompt)
:raise SCPAuthenticationError: If authentication fails
:raise SCPTransferTimeoutError: If the transfer fails to complete in time
:raise SCPTransferFailedError: If the process terminates with a nonzero
exit code
:raise SCPError: If some other error occurs
"""
password_prompt_count = 0
timeout = login_timeout
authentication_done = False
scp_type = len(password_list)
while True:
try:
match, text = session.read_until_last_line_matches(
[r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"],
timeout=timeout, internal_timeout=0.5)
if match == 0: # "Are you sure you want to continue connecting"
logging.debug("Got 'Are you sure...', sending 'yes'")
session.sendline("yes")
continue
elif match == 1: # "password:"
if password_prompt_count == 0:
logging.debug("Got password prompt, sending '%s'" %
password_list[password_prompt_count])
session.sendline(password_list[password_prompt_count])
password_prompt_count += 1
timeout = transfer_timeout
if scp_type == 1:
authentication_done = True
continue
elif password_prompt_count == 1 and scp_type == 2:
logging.debug("Got password prompt, sending '%s'" %
password_list[password_prompt_count])
session.sendline(password_list[password_prompt_count])
password_prompt_count += 1
timeout = transfer_timeout
authentication_done = True
continue
else:
raise SCPAuthenticationError("Got password prompt twice",
text)
elif match == 2: # "lost connection"
raise SCPError("SCP client said 'lost connection'", text)
except aexpect.ExpectTimeoutError, e:
if authentication_done:
raise SCPTransferTimeoutError(e.output)
else:
raise SCPAuthenticationTimeoutError(e.output)
except aexpect.ExpectProcessTerminatedError, e:
if e.status == 0:
logging.debug("SCP process terminated with status 0")
break
else:
raise SCPTransferFailedError(e.status, e.output)
def remote_scp(command, password_list, log_filename=None, transfer_timeout=600,
login_timeout=20):
"""
Transfer files using SCP, given a command line.
:param command: The command to execute
(e.g. "scp -r foobar root@localhost:/tmp/").
:param password_list: Password list to send in reply to a password prompt.
:param log_filename: If specified, log all output to this file
:param transfer_timeout: The time duration (in seconds) to wait for the
transfer to complete.
:param login_timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt
or the password prompt)
:raise: Whatever _remote_scp() raises
"""
logging.debug("Trying to SCP with command '%s', timeout %ss",
command, transfer_timeout)
if log_filename:
output_func = utils_misc.log_line
output_params = (log_filename,)
else:
output_func = None
output_params = ()
session = aexpect.Expect(command,
output_func=output_func,
output_params=output_params)
try:
_remote_scp(session, password_list, transfer_timeout, login_timeout)
finally:
session.close()
def scp_to_remote(host, port, username, password, local_path, remote_path,
limit="", log_filename=None, timeout=600, interface=None):
"""
Copy files to a remote host (guest) through scp.
:param host: Hostname or IP address
:param username: Username (if required)
:param password: Password (if required)
:param local_path: Path on the local machine where we are copying from
:param remote_path: Path on the remote machine where we are copying to
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file
:param timeout: The time duration (in seconds) to wait for the transfer
to complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever remote_scp() raises
"""
if (limit):
limit = "-l %s" % (limit)
if host and host.lower().startswith("fe80"):
if not interface:
raise SCPError("When using ipv6 linklocal address must assign",
"the interface the neighbour attache")
host = "%s%%%s" % (host, interface)
command = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r %s "
"-P %s %s %s@\[%s\]:%s" %
(limit, port, local_path, username, host, remote_path))
password_list = []
password_list.append(password)
return remote_scp(command, password_list, log_filename, timeout)
def scp_from_remote(host, port, username, password, remote_path, local_path,
limit="", log_filename=None, timeout=600, interface=None):
"""
Copy files from a remote host (guest).
:param host: Hostname or IP address
:param username: Username (if required)
:param password: Password (if required)
:param local_path: Path on the local machine where we are copying from
:param remote_path: Path on the remote machine where we are copying to
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file
:param timeout: The time duration (in seconds) to wait for the transfer
to complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever remote_scp() raises
"""
if (limit):
limit = "-l %s" % (limit)
if host and host.lower().startswith("fe80"):
if not interface:
raise SCPError("When using ipv6 linklocal address must assign, ",
"the interface the neighbour attache")
host = "%s%%%s" % (host, interface)
command = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r %s "
"-P %s %s@\[%s\]:%s %s" %
(limit, port, username, host, remote_path, local_path))
password_list = []
password_list.append(password)
remote_scp(command, password_list, log_filename, timeout)
def scp_between_remotes(src, dst, port, s_passwd, d_passwd, s_name, d_name,
s_path, d_path, limit="", log_filename=None,
timeout=600, src_inter=None, dst_inter=None):
"""
Copy files from a remote host (guest) to another remote host (guest).
:param src/dst: Hostname or IP address of src and dst
:param s_name/d_name: Username (if required)
:param s_passwd/d_passwd: Password (if required)
:param s_path/d_path: Path on the remote machine where we are copying
from/to
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file
:param timeout: The time duration (in seconds) to wait for the transfer
to complete.
:src_inter: The interface on local that the src neighbour attache
:dst_inter: The interface on the src that the dst neighbour attache
:return: True on success and False on failure.
"""
if (limit):
limit = "-l %s" % (limit)
if src and src.lower().startswith("fe80"):
if not src_inter:
raise SCPError("When using ipv6 linklocal address must assign ",
"the interface the neighbour attache")
src = "%s%%%s" % (src, src_inter)
if dst and dst.lower().startswith("fe80"):
if not dst_inter:
raise SCPError("When using ipv6 linklocal address must assign ",
"the interface the neighbour attache")
dst = "%s%%%s" % (dst, dst_inter)
command = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r %s -P %s"
" %s@\[%s\]:%s %s@\[%s\]:%s" %
(limit, port, s_name, src, s_path, d_name, dst, d_path))
password_list = []
password_list.append(s_passwd)
password_list.append(d_passwd)
return remote_scp(command, password_list, log_filename, timeout)
def nc_copy_between_remotes(src, dst, s_port, s_passwd, d_passwd,
s_name, d_name, s_path, d_path,
c_type="ssh", c_prompt="\n",
d_port="8888", d_protocol="udp", timeout=10,
check_sum=True):
"""
Copy files from guest to guest using netcat.
This method only supports linux guest OS.
:param src/dst: Hostname or IP address of src and dst
:param s_name/d_name: Username (if required)
:param s_passwd/d_passwd: Password (if required)
:param s_path/d_path: Path on the remote machine where we are copying
:param c_type: Login method to remote host(guest).
:param c_prompt: command line prompt of remote host(guest)
:param d_port: the port data transfer
:param d_protocol: nc protocol use (tcp or udp)
:param timeout: If a connection and stdin are idle for more than timeout
seconds, then the connection is silently closed.
:return: True on success and False on failure.
"""
s_session = remote_login(c_type, src, s_port, s_name, s_passwd, c_prompt)
d_session = remote_login(c_type, dst, s_port, d_name, d_passwd, c_prompt)
s_session.cmd("iptables -I INPUT -p %s -j ACCEPT" % d_protocol)
d_session.cmd("iptables -I OUTPUT -p %s -j ACCEPT" % d_protocol)
logging.info("Transfer data using netcat from %s to %s" % (src, dst))
cmd = "nc"
if d_protocol == "udp":
cmd += " -u"
cmd += " -w %s" % timeout
s_session.sendline("%s -l %s < %s" % (cmd, d_port, s_path))
d_session.sendline("echo a | %s %s %s > %s" % (cmd, src, d_port, d_path))
if check_sum:
if (s_session.cmd("md5sum %s" % s_path).split()[0] !=
d_session.cmd("md5sum %s" % d_path).split()[0]):
return False
return True
def udp_copy_between_remotes(src, dst, s_port, s_passwd, d_passwd,
s_name, d_name, s_path, d_path,
c_type="ssh", c_prompt="\n",
d_port="9000", timeout=600):
"""
Copy files from guest to guest using udp.
:param src/dst: Hostname or IP address of src and dst
:param s_name/d_name: Username (if required)
:param s_passwd/d_passwd: Password (if required)
:param s_path/d_path: Path on the remote machine where we are copying
:param c_type: Login method to remote host(guest).
:param c_prompt: command line prompt of remote host(guest)
:param d_port: the port data transfer
:param timeout: data transfer timeout
"""
s_session = remote_login(c_type, src, s_port, s_name, s_passwd, c_prompt)
d_session = remote_login(c_type, dst, s_port, d_name, d_passwd, c_prompt)
def get_abs_path(session, filename, extension):
"""
return file path drive+path
"""
cmd_tmp = "wmic datafile where \"Filename='%s' and "
cmd_tmp += "extension='%s'\" get drive^,path"
cmd = cmd_tmp % (filename, extension)
info = session.cmd_output(cmd, timeout=360).strip()
drive_path = re.search(r'(\w):\s+(\S+)', info, re.M)
if not drive_path:
raise error.TestError("Not found file %s.%s in your guest"
% (filename, extension))
return ":".join(drive_path.groups())
def get_file_md5(session, file_path):
"""
Get files md5sums
"""
if c_type == "ssh":
md5_cmd = "md5sum %s" % file_path
md5_reg = r"(\w+)\s+%s.*" % file_path
else:
drive_path = get_abs_path(session, "md5sums", "exe")
filename = file_path.split("\\")[-1]
md5_reg = r"%s\s+(\w+)" % filename
md5_cmd = '%smd5sums.exe %s | find "%s"' % (drive_path, file_path,
filename)
o = session.cmd_output(md5_cmd)
file_md5 = re.findall(md5_reg, o)
if not o:
raise error.TestError("Get file %s md5sum error" % file_path)
return file_md5
def server_alive(session):
if c_type == "ssh":
check_cmd = "ps aux"
else:
check_cmd = "tasklist"
o = session.cmd_output(check_cmd)
if not o:
raise error.TestError("Can not get the server status")
if "sendfile" in o.lower():
return True
return False
def start_server(session):
if c_type == "ssh":
start_cmd = "sendfile %s &" % d_port
else:
drive_path = get_abs_path(session, "sendfile", "exe")
start_cmd = "start /b %ssendfile.exe %s" % (drive_path,
d_port)
session.cmd_output_safe(start_cmd)
if not server_alive(session):
raise error.TestError("Start udt server failed")
def start_client(session):
if c_type == "ssh":
client_cmd = "recvfile %s %s %s %s" % (src, d_port,
s_path, d_path)
else:
drive_path = get_abs_path(session, "recvfile", "exe")
client_cmd_tmp = "%srecvfile.exe %s %s %s %s"
client_cmd = client_cmd_tmp % (drive_path, src, d_port,
s_path.split("\\")[-1],
d_path.split("\\")[-1])
session.cmd_output_safe(client_cmd, timeout)
def stop_server(session):
if c_type == "ssh":
stop_cmd = "killall sendfile"
else:
stop_cmd = "taskkill /F /IM sendfile.exe"
if server_alive(session):
session.cmd_output_safe(stop_cmd)
try:
src_md5 = get_file_md5(s_session, s_path)
if not server_alive(s_session):
start_server(s_session)
start_client(d_session)
dst_md5 = get_file_md5(d_session, d_path)
if src_md5 != dst_md5:
err_msg = "Files md5sum mismatch, file %s md5sum is '%s', "
err_msg = "but the file %s md5sum is %s"
raise error.TestError(err_msg % (s_path, src_md5,
d_path, dst_md5))
finally:
stop_server(s_session)
s_session.close()
d_session.close()
def copy_files_to(address, client, username, password, port, local_path,
remote_path, limit="", log_filename=None,
verbose=False, timeout=600, interface=None):
"""
Copy files to a remote host (guest) using the selected client.
:param client: Type of transfer client
:param username: Username (if required)
:param password: Password (if requried)
:param local_path: Path on the local machine where we are copying from
:param remote_path: Path on the remote machine where we are copying to
:param address: Address of remote host(guest)
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file (SCP only)
:param verbose: If True, log some stats using logging.debug (RSS only)
:param timeout: The time duration (in seconds) to wait for the transfer to
complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever remote_scp() raises
"""
if client == "scp":
scp_to_remote(address, port, username, password, local_path,
remote_path, limit, log_filename, timeout,
interface=interface)
elif client == "rss":
log_func = None
if verbose:
log_func = logging.debug
c = rss_client.FileUploadClient(address, port, log_func)
c.upload(local_path, remote_path, timeout)
c.close()
else:
raise error.TestError("No such file copy client: '%s', valid values"
"are scp and rss" % client)
def copy_files_from(address, client, username, password, port, remote_path,
local_path, limit="", log_filename=None,
verbose=False, timeout=600, interface=None):
"""
Copy files from a remote host (guest) using the selected client.
:param client: Type of transfer client
:param username: Username (if required)
:param password: Password (if requried)
:param remote_path: Path on the remote machine where we are copying from
:param local_path: Path on the local machine where we are copying to
:param address: Address of remote host(guest)
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file (SCP only)
:param verbose: If True, log some stats using ``logging.debug`` (RSS only)
:param timeout: The time duration (in seconds) to wait for the transfer to
complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever ``remote_scp()`` raises
"""
if client == "scp":
scp_from_remote(address, port, username, password, remote_path,
local_path, limit, log_filename, timeout,
interface=interface)
elif client == "rss":
log_func = None
if verbose:
log_func = logging.debug
c = rss_client.FileDownloadClient(address, port, log_func)
c.download(remote_path, local_path, timeout)
c.close()
else:
raise error.TestError("No such file copy client: '%s', valid values"
"are scp and rss" % client)
class Remote_Package(object):
def __init__(self, address, client, username, password, port, remote_path):
"""
Initialization of Remote Package class.
:param address: Address of remote host(guest)
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param username: Username (if required)
:param password: Password (if requried)
:param port: Port to connect to
:param remote_path: Rmote package path
"""
self.address = address
self.client = client
self.port = port
self.username = username
self.password = password
self.remote_path = remote_path
if self.client == "nc":
self.cp_client = "rss"
self.cp_port = 10023
elif self.client == "ssh":
self.cp_client = "scp"
self.cp_port = 22
else:
raise LoginBadClientError(client)
def pull_file(self, local_path, timeout=600):
"""
Copy file from remote to local.
"""
logging.debug("Pull remote: '%s' to local: '%s'." % (self.remote_path,
local_path))
copy_files_from(self.address, self.cp_client, self.username,
self.password, self.cp_port, self.remote_path,
local_path, timeout=timeout)
def push_file(self, local_path, timeout=600):
"""
Copy file from local to remote.
"""
logging.debug("Push local: '%s' to remote: '%s'." % (local_path,
self.remote_path))
copy_files_to(self.address, self.cp_client, self.username,
self.password, self.cp_port, local_path,
self.remote_path, timeout=timeout)
class RemoteFile(object):
"""
Class to handle the operations of file on remote host or guest.
"""
def __init__(self, address, client, username, password, port,
remote_path, limit="", log_filename=None,
verbose=False, timeout=600):
"""
Initialization of RemoteFile class.
:param address: Address of remote host(guest)
:param client: Type of transfer client
:param username: Username (if required)
:param password: Password (if requried)
:param remote_path: Path of file which we want to edit on remote.
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file(SCP only)
:param verbose: If True, log some stats using logging.debug (RSS only)
:param timeout: The time duration (in seconds) to wait for the
transfer tocomplete.
"""
self.address = address
self.client = client
self.username = username
self.password = password
self.port = port
self.remote_path = remote_path
self.limit = limit
self.log_filename = log_filename
self.verbose = verbose
self.timeout = timeout
# Get a local_path and all actions is taken on it.
filename = os.path.basename(self.remote_path)
# Get a local_path.
tmp_dir = data_dir.get_tmp_dir()
local_file = tempfile.NamedTemporaryFile(prefix=("%s_" % filename),
dir=tmp_dir)
self.local_path = local_file.name
local_file.close()
# Get a backup_path.
backup_file = tempfile.NamedTemporaryFile(prefix=("%s_" % filename),
dir=tmp_dir)
self.backup_path = backup_file.name
backup_file.close()
# Get file from remote.
try:
self._pull_file()
except SCPTransferFailedError:
# Remote file doesn't exist, create empty file on local
self._write_local([])
# Save a backup.
shutil.copy(self.local_path, self.backup_path)
def __del__(self):
"""
Called when the instance is about to be destroyed.
"""
self._reset_file()
if os.path.exists(self.backup_path):
os.remove(self.backup_path)
if os.path.exists(self.local_path):
os.remove(self.local_path)
def _pull_file(self):
"""
Copy file from remote to local.
"""
if self.client == "test":
shutil.copy(self.remote_path, self.local_path)
else:
copy_files_from(self.address, self.client, self.username,
self.password, self.port, self.remote_path,
self.local_path, self.limit, self.log_filename,
self.verbose, self.timeout)
def _push_file(self):
"""
Copy file from local to remote.
"""
if self.client == "test":
shutil.copy(self.local_path, self.remote_path)
else:
copy_files_to(self.address, self.client, self.username,
self.password, self.port, self.local_path,
self.remote_path, self.limit, self.log_filename,
self.verbose, self.timeout)
def _reset_file(self):
"""
Copy backup from local to remote.
"""
if self.client == "test":
shutil.copy(self.backup_path, self.remote_path)
else:
copy_files_to(self.address, self.client, self.username,
self.password, self.port, self.backup_path,
self.remote_path, self.limit, self.log_filename,
self.verbose, self.timeout)
def _read_local(self):
"""
Read file on local_path.
:return: string list got from readlines().
"""
local_file = open(self.local_path, "r")
lines = local_file.readlines()
local_file.close()
return lines
def _write_local(self, lines):
"""
Write file on local_path. Call writelines method of File.
"""
local_file = open(self.local_path, "w")
local_file.writelines(lines)
local_file.close()
def add(self, line_list):
"""
Append lines in line_list into file on remote.
"""
lines = self._read_local()
for line in line_list:
lines.append("\n%s" % line)
self._write_local(lines)
self._push_file()
def sub(self, pattern2repl_dict):
"""
Replace the string which match the pattern
to the value contained in pattern2repl_dict.
"""
lines = self._read_local()
for pattern, repl in pattern2repl_dict.items():
for index in range(len(lines)):
line = lines[index]
lines[index] = re.sub(pattern, repl, line)
self._write_local(lines)
self._push_file()
def truncate(self, length=0):
"""
Truncate the detail of remote file to assigned length
Content before
line 1
line 2
line 3
remote_file.truncate(length=1)
Content after
line 1
:param length: how many lines you want to keep
"""
lines = self._read_local()
lines = lines[0: length]
self._write_local(lines)
self._push_file()
def remove(self, pattern_list):
"""
Remove the lines in remote file which matchs a pattern
in pattern_list.
"""
lines = self._read_local()
for pattern in pattern_list:
for index in range(len(lines)):
line = lines[index]
if re.match(pattern, line):
lines.remove(line)
# Check this line is the last one or not.
if (not line.endswith('\n') and (index > 0)):
lines[index - 1] = lines[index - 1].rstrip("\n")
self._write_local(lines)
self._push_file()
def sub_else_add(self, pattern2repl_dict):
"""
Replace the string which match the pattern.
If no match in the all lines, append the value
to the end of file.
"""
lines = self._read_local()
for pattern, repl in pattern2repl_dict.items():
no_line_match = True
for index in range(len(lines)):
line = lines[index]
if re.match(pattern, line):
no_line_match = False
lines[index] = re.sub(pattern, repl, line)
if no_line_match:
lines.append("\n%s" % repl)
self._write_local(lines)
self._push_file()
class RemoteRunner(object):
"""
Class to provide a utils.run-like method to execute command on
remote host or guest. Provide a similar interface with utils.run
on local.
"""
def __init__(self, client="ssh", host=None, port="22", username="root",
password=None, prompt=r"[\#\$]\s*$", linesep="\n",
log_filename=None, timeout=240, internal_timeout=10,
session=None):
"""
Initialization of RemoteRunner. Init a session login to remote host or
guest.
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param host: Hostname or IP address
:param port: Port to connect to
:param username: Username (if required)
:param password: Password (if required)
:param prompt: Shell prompt (regular expression)
:param linesep: The line separator to use when sending lines
(e.g. '\\n' or '\\r\\n')
:param log_filename: If specified, log all output to this file
:param timeout: Total time duration to wait for a successful login
:param internal_timeout: The maximal time duration (in seconds) to wait
for each step of the login procedure (e.g. the "Are you sure"
prompt or the password prompt)
:param session: An existing session
:see: wait_for_login()
:raise: Whatever wait_for_login() raises
"""
if session is None:
if host is None:
raise error.TestError("Neither host, nor session was defined!")
self.session = wait_for_login(client, host, port, username,
password, prompt, linesep,
log_filename, timeout,
internal_timeout)
else:
self.session = session
# Init stdout pipe and stderr pipe.
self.stdout_pipe = tempfile.mktemp()
self.stderr_pipe = tempfile.mktemp()
def run(self, command, timeout=60, ignore_status=False):
"""
Method to provide a utils.run-like interface to execute command on
remote host or guest.
:param timeout: Total time duration to wait for command return.
:param ignore_status: If ignore_status=True, do not raise an exception,
no matter what the exit code of the command is.
Else, raise CmdError if exit code of command is not
zero.
"""
# Redirect the stdout and stderr to file, Deviding error message
# from output, and taking off the color of output. To return the same
# result with utils.run() function.
command = "%s 1>%s 2>%s" % (command, self.stdout_pipe, self.stderr_pipe)
status, _ = self.session.cmd_status_output(command, timeout=timeout)
output = self.session.cmd_output("cat %s;rm -f %s" %
(self.stdout_pipe, self.stdout_pipe))
errput = self.session.cmd_output("cat %s;rm -f %s" %
(self.stderr_pipe, self.stderr_pipe))
cmd_result = utils.CmdResult(command=command, exit_status=status,
stdout=output, stderr=errput)
if (status and (not ignore_status)):
raise error.CmdError(command, cmd_result)
return cmd_result
| gpl-2.0 | 2,365,160,773,720,525,300 | 3,613,446,052,075,506,700 | 38.778761 | 105 | 0.573593 | false |
jgoclawski/django | django/test/utils.py | 14 | 20900 | import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.core.urlresolvers import get_script_prefix, set_script_prefix
from django.db import reset_queries
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
def override_system_checks(new_checks, deployment_checks=None):
""" Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks. """
from django.core.checks.registry import registry
def outer(test_func):
@wraps(test_func)
def inner(*args, **kwargs):
old_checks = registry.registered_checks
registry.registered_checks = new_checks
old_deployment_checks = registry.deployment_checks
if deployment_checks is not None:
registry.deployment_checks = deployment_checks
try:
return test_func(*args, **kwargs)
finally:
registry.registered_checks = old_checks
registry.deployment_checks = old_deployment_checks
return inner
return outer
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n', '\n')
got = got.replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(object):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
def __call__(self, decorated):
if isinstance(decorated, type):
# A class is decorated
saved_setUp = decorated.setUp
saved_tearDown = decorated.tearDown
def setUp(inner_self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
saved_setUp(inner_self)
def tearDown(inner_self):
saved_tearDown(inner_self)
self.catch_warnings.__exit__(*sys.exc_info())
decorated.setUp = setUp
decorated.tearDown = tearDown
return decorated
else:
@wraps(decorated)
def inner(*args, **kwargs):
with warnings.catch_warnings():
self.filter_func('ignore', **self.ignore_kwargs)
return decorated(*args, **kwargs)
return inner
@contextmanager
def patch_logger(logger_name, log_level):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
calls.append(msg % args)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class ScriptPrefix(ContextDecorator):
def __enter__(self):
set_script_prefix(self.prefix)
def __exit__(self, exc_type, exc_val, traceback):
set_script_prefix(self.old_prefix)
def __init__(self, prefix):
self.prefix = prefix
self.old_prefix = get_script_prefix()
def override_script_prefix(prefix):
"""
Decorator or context manager to temporary override the script prefix.
"""
return ScriptPrefix(prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
| bsd-3-clause | -4,823,348,276,163,738,000 | -5,168,635,972,314,605,000 | 31.503888 | 81 | 0.620287 | false |
culots/meld | meld/undo.py | 5 | 8280 | # Copyright (C) 2002-2006 Stephen Kennedy <[email protected]>
# Copyright (C) 2010-2011 Kai Willadsen <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module to help implement undo functionality.
Usage:
t = TextWidget()
s = UndoSequence()
def on_textwidget_text_inserted():
s.begin_group()
if not t.is_modified():
s.add_action( TextWidgetModifiedAction() )
s.add_action( InsertionAction() )
s.end_group()
def on_undo_button_pressed():
s.undo()
"""
from gi.repository import GObject
class GroupAction(object):
"""A group action combines several actions into one logical action.
"""
def __init__(self, seq):
self.seq = seq
# TODO: If a GroupAction affects more than one sequence, our logic
# breaks. Currently, this isn't a problem.
self.buffer = seq.actions[0].buffer
def undo(self):
while self.seq.can_undo():
self.seq.undo()
def redo(self):
while self.seq.can_redo():
self.seq.redo()
class UndoSequence(GObject.GObject):
"""A manager class for operations which can be undone/redone.
"""
__gsignals__ = {
'can-undo': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_BOOLEAN,)),
'can-redo': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_BOOLEAN,)),
'checkpointed': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_OBJECT, GObject.TYPE_BOOLEAN,)),
}
def __init__(self):
"""Create an empty UndoSequence.
"""
GObject.GObject.__init__(self)
self.actions = []
self.next_redo = 0
self.checkpoints = {}
self.group = None
self.busy = False
def clear(self):
"""Remove all undo and redo actions from this sequence
If the sequence was previously able to undo and/or redo, the
'can-undo' and 'can-redo' signals are emitted.
Raises an AssertionError if a group is in progress.
"""
assert self.group is None
if self.can_undo():
self.emit('can-undo', 0)
if self.can_redo():
self.emit('can-redo', 0)
self.actions = []
self.next_redo = 0
self.checkpoints = {}
def can_undo(self):
"""Return if an undo is possible.
"""
return self.next_redo > 0
def can_redo(self):
"""Return if a redo is possible.
"""
return self.next_redo < len(self.actions)
def add_action(self, action):
"""Add an action to the undo list.
Arguments:
action -- A class with two callable attributes: 'undo' and 'redo'
which are called by this sequence during an undo or redo.
"""
if self.busy:
return
if self.group is None:
if self.checkpointed(action.buffer):
self.checkpoints[action.buffer][1] = self.next_redo
self.emit('checkpointed', action.buffer, False)
else:
# If we go back in the undo stack before the checkpoint starts,
# and then modify the buffer, we lose the checkpoint altogether
start, end = self.checkpoints.get(action.buffer, (None, None))
if start is not None and start > self.next_redo:
self.checkpoints[action.buffer] = (None, None)
could_undo = self.can_undo()
could_redo = self.can_redo()
self.actions[self.next_redo:] = []
self.actions.append(action)
self.next_redo += 1
if not could_undo:
self.emit('can-undo', 1)
if could_redo:
self.emit('can-redo', 0)
else:
self.group.add_action(action)
def undo(self):
"""Undo an action.
Raises an AssertionError if the sequence is not undoable.
"""
assert self.next_redo > 0
self.busy = True
buf = self.actions[self.next_redo - 1].buffer
if self.checkpointed(buf):
self.emit('checkpointed', buf, False)
could_redo = self.can_redo()
self.next_redo -= 1
self.actions[self.next_redo].undo()
self.busy = False
if not self.can_undo():
self.emit('can-undo', 0)
if not could_redo:
self.emit('can-redo', 1)
if self.checkpointed(buf):
self.emit('checkpointed', buf, True)
def redo(self):
"""Redo an action.
Raises and AssertionError if the sequence is not undoable.
"""
assert self.next_redo < len(self.actions)
self.busy = True
buf = self.actions[self.next_redo].buffer
if self.checkpointed(buf):
self.emit('checkpointed', buf, False)
could_undo = self.can_undo()
a = self.actions[self.next_redo]
self.next_redo += 1
a.redo()
self.busy = False
if not could_undo:
self.emit('can-undo', 1)
if not self.can_redo():
self.emit('can-redo', 0)
if self.checkpointed(buf):
self.emit('checkpointed', buf, True)
def checkpoint(self, buf):
start = self.next_redo
while start > 0 and self.actions[start - 1].buffer != buf:
start -= 1
end = self.next_redo
while (end < len(self.actions) - 1 and
self.actions[end + 1].buffer != buf):
end += 1
if end == len(self.actions):
end = None
self.checkpoints[buf] = [start, end]
self.emit('checkpointed', buf, True)
def checkpointed(self, buf):
# While the main undo sequence should always have checkpoints
# recorded, grouped subsequences won't.
start, end = self.checkpoints.get(buf, (None, None))
if start is None:
return False
if end is None:
end = len(self.actions)
return start <= self.next_redo <= end
def begin_group(self):
"""Group several actions into a single logical action.
When you wrap several calls to add_action() inside begin_group()
and end_group(), all the intervening actions are considered
one logical action. For instance a 'replace' action may be
implemented as a pair of 'delete' and 'create' actions, but
undoing should undo both of them.
"""
if self.busy:
return
if self.group:
self.group.begin_group()
else:
self.group = UndoSequence()
def end_group(self):
"""End a logical group action. See also begin_group().
Raises an AssertionError if there was not a matching call to
begin_group().
"""
if self.busy:
return
assert self.group is not None
if self.group.group is not None:
self.group.end_group()
else:
group = self.group
self.group = None
# Collapse single action groups
if len(group.actions) == 1:
self.add_action(group.actions[0])
elif len(group.actions) > 1:
self.add_action(GroupAction(group))
def abort_group(self):
"""Revert the sequence to the state before begin_group() was called.
Raises an AssertionError if there was no a matching call to begin_group().
"""
if self.busy:
return
assert self.group is not None
if self.group.group is not None:
self.group.abort_group()
else:
self.group = None
def in_grouped_action(self):
return self.group is not None
| gpl-2.0 | -8,324,860,515,095,718,000 | -4,131,637,595,912,994,300 | 31.598425 | 108 | 0.57971 | false |
hehongliang/tensorflow | tensorflow/tools/common/public_api.py | 24 | 4920 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'compiler',
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
# Externalized modules of the Keras API.
'tf.keras': ['applications', 'preprocessing']
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
del obj # Unused.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
| apache-2.0 | -6,971,027,085,989,868,000 | -6,157,174,978,881,740,000 | 33.893617 | 80 | 0.625813 | false |
c-o-m-m-a-n-d-e-r/CouchPotatoServer | libs/pyutil/hashexpand.py | 106 | 2890 | # Copyright (c) 2002-2012 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
"""
Cryptographically strong pseudo-random number generator based on SHA256.
"""
class SHA256Expander:
"""
Provide a cryptographically strong pseudo-random number generator based on
SHA256. Hopefully this means that no attacker will be able to predict any
bit of output that he hasn't seen, given that he doesn't know anything about
the seed and given that he can see as many bits of output as he desires
except for the bit that he is trying to predict. Further it is hoped that
an attacker will not even be able to determine whether a given stream of
random bytes was generated by this PRNG or by flipping a coin repeatedly.
The safety of this technique has not been verified by a Real Cryptographer.
... but it is similar to the PRNG in FIPS-186...
The seed and counter are encoded in DJB's netstring format so that I
don't have to think about the possibility of ambiguity.
Note: I've since learned more about the theory of secure hash functions
and the above is a strong assumption about a secure hash function. Use
of this class should be considered deprecated and you should use a more
well-analyzed KDF (such as the nascent standard HKDF) or stream cipher or
whatever it is that you need.
"""
def __init__(self, seed=None):
warnings.warn("deprecated", DeprecationWarning)
if seed is not None:
self.seed(seed)
def seed(self, seed):
import hashlib
self.starth = hashlib.sha256('24:pyutil hash expansion v2,10:algorithm:,6:SHA256,6:value:,')
seedlen = len(seed)
seedlenstr = str(seedlen)
self.starth.update(seedlenstr)
self.starth.update(':')
self.starth.update(seed)
self.starth.update(',')
self.avail = ""
self.counter = 0
def get(self, bytes):
bytesleft = bytes
res = []
while bytesleft > 0:
if len(self.avail) == 0:
h = self.starth.copy()
counterstr = str(self.counter)
counterstrlen = len(counterstr)
counterstrlenstr = str(counterstrlen)
h.update(counterstrlenstr)
h.update(':')
h.update(counterstr)
h.update(',')
self.avail = h.digest()
self.counter += 1
numb = min(len(self.avail), bytesleft)
(chunk, self.avail,) = (self.avail[:numb], self.avail[numb:],)
res.append(chunk)
bytesleft = bytesleft - numb
resstr = ''.join(res)
assert len(resstr) == bytes
return resstr
def sha256expand(inpstr, expbytes):
return SHA256Expander(inpstr).get(expbytes)
| gpl-3.0 | -881,090,428,053,830,400 | -8,819,129,239,363,992,000 | 34.679012 | 100 | 0.630796 | false |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib-python/2.7/test/test_code.py | 8 | 3307 | """This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... return x + y
... return g
...
>>> dump(f.func_code)
name: f
argcount: 1
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('None', '<code object g>')
>>> dump(f(4).func_code)
name: g
argcount: 1
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('None',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... return c
...
>>> dump(h.func_code)
name: h
argcount: 2
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('None',)
>>> def attrs(obj):
... print obj.attr1
... print obj.attr2
... print obj.attr3
>>> dump(attrs.func_code)
name: attrs
argcount: 1
names: ('attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('None',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 53L
>>> dump(optimize_away.func_code)
name: optimize_away
argcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 1048643
consts: ("'doc string'", 'None')
"""
import unittest
import weakref
from test.test_support import run_doctest, run_unittest, cpython_only
from test.test_support import gc_collect
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
for elt in t:
r = repr(elt)
if r.startswith("<code object"):
yield "<code object %s>" % elt.co_name
else:
yield r
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "names", "varnames", "cellvars",
"freevars", "nlocals", "flags"]:
print "%s: %s" % (attr, getattr(co, "co_" + attr))
print "consts:", tuple(consts(co.co_consts))
class CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
class CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object in a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec "def f(): pass" in globals(), namespace
f = namespace["f"]
del namespace
self.called = False
def callback(code):
self.called = True
# f is now the last reference to the function, and through it, the code
# object. While we hold it, check that we can create a weakref and
# deref it. Then delete it, and check that the callback gets called and
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertTrue(bool(coderef()))
del f
gc_collect()
self.assertFalse(bool(coderef()))
self.assertTrue(self.called)
def test_main(verbose=None):
from test import test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeWeakRefTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 | -4,975,771,455,494,990,000 | -7,605,061,521,135,432,000 | 21.046667 | 80 | 0.589658 | false |
fdecourcelle/meanfde | node_modules/meanio/node_modules/mean-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common.py | 497 | 17406 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| mit | -2,148,576,990,384,219,600 | 1,956,750,138,096,751,600 | 32.408829 | 83 | 0.672297 | false |
GoogleCloudPlatform/dataflow-sample-applications | timeseries-streaming/timeseries-python-applications/ml_pipeline/timeseries/encoder_decoder/transforms/process_encdec_inf_rtn.py | 1 | 9368 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from datetime import datetime
from typing import Dict, Text, Any
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_serving.apis import prediction_log_pb2
import apache_beam as beam
from apache_beam.utils.windowed_value import WindowedValue
from ml_pipeline.timeseries.utils import timeseries_transform_utils
class ProcessReturn(beam.DoFn):
"""
We need to match the input to the output to compare the example to the encoded-decoded value.
The transform component preprocessing_fn creates lexical order of the features in scope for the model.
This function mimics the preprocessing_fn structure.
"""
def __init__(self, config: Dict[Text, Any], batching_size: int = 1000):
beam.DoFn.__init__(self)
self.tf_transform_graph_dir = config['tf_transform_graph_dir']
self.model_config = config['model_config']
self.batching_size = batching_size
def setup(self):
self.transform_output = tft.TFTransformOutput(self.tf_transform_graph_dir)
self.tft_layer = self.transform_output.transform_features_layer()
def start_bundle(self):
self.batch: [WindowedValue] = []
def finish_bundle(self):
for prediction in self.process_result(self.batch):
yield prediction
def process(
self,
element: prediction_log_pb2.PredictionLog,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
if len(element.predict_log.request.inputs['examples'].string_val) > 1:
raise Exception("Only support single input string.")
if len(self.batch) > self.batching_size:
for k in self.process_result(self.batch):
yield k
self.batch.clear()
else:
self.batch.append(WindowedValue(element, timestamp, [window]))
def process_result(self, element: [WindowedValue]):
"""
A input example has shape : [timesteps, all_features] all_features is
not always == to features used in model.
An output example has shape : [timesteps, model_features]
In order to compare these we need to match the (timestep, feature) from
(timestep,all_features) to (timestep, model_features)
There are also Metadata fields which provide context
"""
element_value = [k.value for k in element]
processed_inputs = []
request_inputs = []
request_outputs = []
for k in element_value:
request_inputs.append(
k.predict_log.request.inputs['examples'].string_val[0])
request_outputs.append(k.predict_log.response.outputs['output_0'])
# The output of tf.io.parse_example is a set of feature tensors which
# have shape for non Metadata of [batch,
# timestep]
batched_example = tf.io.parse_example(
request_inputs, self.transform_output.raw_feature_spec())
# The tft layer gives us two labels 'FLOAT32' and 'LABEL' which have
# shape [batch, timestep, model_features]
inputs = self.tft_layer(batched_example)
# Determine which of the features was used in the model
feature_labels = timeseries_transform_utils.create_feature_list_from_list(
features=batched_example.keys(), config=self.model_config)
# The outer loop gives us the batch label which has
# shape [timestep, model_features]
# For the metadata the shape is [timestep, 1]
metadata_span_start_timestamp = tf.sparse.to_dense(
batched_example['METADATA_SPAN_START_TS']).numpy()
metadata_span_end_timestamp = tf.sparse.to_dense(
batched_example['METADATA_SPAN_END_TS']).numpy()
batch_pos = 0
for batch_input in inputs['LABEL'].numpy():
# Get the Metadata from the original request
span_start_timestamp = datetime.fromtimestamp(
metadata_span_start_timestamp[batch_pos][0] / 1000)
span_end_timestamp = datetime.fromtimestamp(
metadata_span_end_timestamp[batch_pos][0] / 1000)
# Add the metadata to the result
result = {
'span_start_timestamp': span_start_timestamp,
'span_end_timestamp': span_end_timestamp
}
# In this loop we need to compare the last timestep
# [timestep , model_feature] for the input and the output.
# Get the output that matches this input
results = tf.io.parse_tensor(
request_outputs[batch_pos].SerializeToString(),
tf.float32).numpy()[0]
# The last time step is the last value in the input batch,
# ordinal pos starts from 0
last_timestep_pos = len(batch_input) - 1
# From the input batch get the last time step
last_timestep_input = batch_input[last_timestep_pos]
# Get the last timestep from the results
last_timestep_output = results[last_timestep_pos]
feature_results = {}
for model_feature_pos in range(len(last_timestep_output)):
label = (feature_labels[model_feature_pos])
# The num of features should == number of results
if len(feature_labels) != len(last_timestep_input):
raise ValueError(f'Features list {feature_labels} in config is '
f'len {len(feature_labels)} which '
f'does not match output length '
f'{len(last_timestep_output)} '
f' This normally is a result of using a configuration '
f'file that does not match '
f'tf_transform dir / saved model dir.')
# The num of features should == number of results
if len(last_timestep_output) != len(last_timestep_input):
raise ValueError(f'Input len {len(last_timestep_input)} does not '
f'match output length {len(last_timestep_output)} '
f' This normally is a result of mis matched tf_transform dir and saved model dir.')
feature_results[label] = {
'input_value': last_timestep_input[model_feature_pos],
'output_value': last_timestep_output[model_feature_pos]
}
if not str(label).endswith('-TIMESTAMP'):
feature_results[label].update({
# Outliers will effect the head of their array, so we need to keep the array
# to show in the outlier detection.
'raw_data_array': str(
tf.sparse.to_dense(
batched_example[label]).numpy()
[batch_pos])
})
result.update({'feature_results': feature_results})
processed_inputs.append(result)
batch_pos += 1
# Add back windows
windowed_value = []
for input_pos in range(len(processed_inputs) - 1):
windowed_value.append(
element[input_pos].with_value(processed_inputs[input_pos]))
return windowed_value
class CheckAnomalous(beam.DoFn):
"""
Naive threshold based entirely on % difference cutoff value.
"""
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
def __init__(self, threshold: float = 0.05):
beam.DoFn.__init__(self)
self.threshold = threshold
def process(self, element: Dict[Text, Any], *unused_args, **unused_kwargs):
result = {
'span_start_timestamp': element['span_start_timestamp'],
'span_end_timestamp': element['span_end_timestamp']
}
for key, value in element['feature_results'].items():
input_value = value['input_value']
output_value = value['output_value']
diff = abs(input_value - output_value)
value.update({'diff': diff})
if not key.endswith('-TIMESTAMP'):
value.update({'anomaly': diff > self.threshold})
result.update({key: value})
yield result
| apache-2.0 | -8,400,041,486,761,252,000 | 8,208,160,866,984,687,000 | 41.776256 | 120 | 0.596392 | false |
jonyachen/hearboi | record_2.py | 1 | 1234 | """
PyAudio exmple: Record a few seconds of audio and save to a WAVE
file.
"""
import pyaudio
import wave
import sys
from dejavu import Dejavu
CHUNK = 8192
FORMAT = pyaudio.paInt16
CHANNELS = 1L
RATE = 48000
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "/sounds/output.wav"
if sys.platform == 'darwin':
CHANNELS = 1
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=1,
frames_per_buffer=CHUNK,
rate=RATE,
input=True,
input_device_index= 4)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "rasp",
"db": "sound_db",
}
}
# create a Dejavu instance
djv = Dejavu(config)
# Fingerprint all the mp3's in the directory we give it
djv.fingerprint_directory("sounds", [".wav"])
| mit | 1,786,416,886,583,202,000 | 5,864,558,651,270,104,000 | 18.587302 | 64 | 0.62966 | false |
benoitc/dj-revproxy | revproxy/store.py | 1 | 4409 | # -*- coding: utf-8 -
#
# This file is part of dj-revproxy released under the MIT license.
# See the NOTICE for more information.
import os
import types
import uuid
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from restkit.tee import TeeInput, ResponseTeeInput
from restkit.client import USER_AGENT
from .filters import Filter
from .models import RequestSession
class RequestBodyWrapper(TeeInput):
def __init__(self, request, fobject):
self.fobject = fobject
super(RequestBodyWrapper, self).__init__(request.body)
def _tee(self, length):
data = super(RequestBodyWrapper, self)._tee(length)
if not data:
return data
self.fobject.write(data)
return data
def _finalize(self):
self.fobject.close()
return super(RequestBodyWrapper, self)._finalize()
class ResponseBodyWrapper(ResponseTeeInput):
def __init__(self, response, fobject):
self.fobject = fobject
super(ResponseBodyWrapper, self).__init__(response,
response.connection, response.should_close)
def _tee(self, length):
data = super(ResponseBodyWrapper, self)._tee(length)
if not data:
return data
self.fobject.write(data)
return data
def _finalize(self):
self.fobject.close()
return super(ResponseBodyWrapper, self)._finalize()
class RequestStore(Filter):
def __init__(self, request, **kwargs):
proxy_sid = kwargs.get("proxy_sid")
store_path = kwargs.get("store_path", "/tmp")
request_id = uuid.uuid4().hex
dirs = os.path.join(*request_id[0:8])
fdir = os.path.join(store_path, dirs)
self.fprefix = os.path.join(fdir, request_id[8:])
if not os.path.exists(fdir):
os.makedirs(fdir)
self.freq = None
self.frep = None
# save session
session = RequestSession(
sid = proxy_sid,
request_id = request_id,
store_path = store_path)
session.save()
super(RequestStore, self).__init__(request, **kwargs)
def on_request(self, request):
self.freq = open("%s.req" % self.fprefix, "w+")
headers_str = headers_request_str(request)
self.freq.write(headers_str)
if request.body is None:
self.freq.close()
self.freq = None
else:
request.body = RequestBodyWrapper(request,
self.freq)
def on_response(self, response, request):
if self.freq is not None:
try:
self.freq.close()
except OSError:
pass
self.frep = open("%s.rep" % self.fprefix, "w+")
headers_str = headers_response_str(response)
self.frep.write(headers_str)
if request.method == "HEAD":
self.frep.close()
else:
response._body = ResponseBodyWrapper(response,
self.frep)
def headers_request_str(request, extra_headers=None):
""" create final header string """
headers = request.headers.copy()
if extra_headers is not None:
for k, v in extra_headers:
headers[k] = v
if not request.body and request.method in ('POST', 'PUT',):
headers['Content-Length'] = 0
httpver = "HTTP/1.1"
ua = headers.iget('user_agent')
if not ua:
ua = USER_AGENT
host = request.host
accept_encoding = headers.iget('accept-encoding')
if not accept_encoding:
accept_encoding = 'identity'
lheaders = [
"%s %s %s\r\n" % (request.method, request.path, httpver),
"Host: %s\r\n" % host,
"User-Agent: %s\r\n" % ua,
"Accept-Encoding: %s\r\n" % accept_encoding
]
lheaders.extend(["%s: %s\r\n" % (k, str(v)) for k, v in \
headers.items() if k.lower() not in \
('user-agent', 'host', 'accept-encoding',)])
return "%s\r\n" % "".join(lheaders)
def headers_response_str(response):
version_str = "HTTP/%s.%s" % response.version
headers = ["%s %s\r\n" % (version_str, response.status)]
headers.extend(["%s: %s\r\n" % (k, str(v)) for k, v in \
response.headers.items()])
return "%s\r\n" % "".join(headers)
| mit | 4,523,958,915,021,091,300 | -1,086,113,775,662,412,900 | 27.62987 | 67 | 0.57927 | false |
addition-it-solutions/project-all | addons/mrp/wizard/change_production_qty.py | 7 | 4789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
class change_production_qty(osv.osv_memory):
_name = 'change.production.qty'
_description = 'Change Quantity of Products'
_columns = {
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
res = super(change_production_qty, self).default_get(cr, uid, fields, context=context)
prod_obj = self.pool.get('mrp.production')
prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context)
if 'product_qty' in fields:
res.update({'product_qty': prod.product_qty})
return res
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
move_lines_obj = self.pool.get('stock.move')
for m in prod.move_created_ids:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
def change_prod_qty(self, cr, uid, ids, context=None):
"""
Changes the Quantity of Product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
record_id = context and context.get('active_id',False)
assert record_id, _('Active Id not found')
prod_obj = self.pool.get('mrp.production')
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
for wiz_qty in self.browse(cr, uid, ids, context=context):
prod = prod_obj.browse(cr, uid, record_id, context=context)
prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty})
prod_obj.action_compute(cr, uid, [prod.id])
for move in prod.move_lines:
bom_point = prod.bom_id
bom_id = prod.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=prod.product_id.id, context=context)
if not bom_id:
raise UserError(_("Cannot find bill of material for this product."))
prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id})
bom_point = bom_obj.browse(cr, uid, [bom_id])[0]
if not bom_id:
raise UserError(_("Cannot find bill of material for this product."))
factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor
product_details, workcenter_details = \
bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [], context=context)
for r in product_details:
if r['product_id'] == move.product_id.id:
move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']})
if prod.move_prod_id:
move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty})
self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context)
return {}
| agpl-3.0 | -714,708,424,747,585,200 | 7,635,980,751,788,355,000 | 46.89 | 130 | 0.591564 | false |
rohitwaghchaure/erpnext_develop | erpnext/shopping_cart/test_shopping_cart.py | 2 | 6744 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import nowdate, add_months
from erpnext.shopping_cart.cart import _get_cart_quotation, update_cart, get_party
from erpnext.tests.utils import create_test_contact_and_address
test_dependencies = ['Payment Terms Template']
class TestShoppingCart(unittest.TestCase):
"""
Note:
Shopping Cart == Quotation
"""
def setUp(self):
frappe.set_user("Administrator")
create_test_contact_and_address()
self.enable_shopping_cart()
def tearDown(self):
frappe.set_user("Administrator")
self.disable_shopping_cart()
def test_get_cart_new_user(self):
self.login_as_new_user()
# test if lead is created and quotation with new lead is fetched
quotation = _get_cart_quotation()
self.assertEquals(quotation.quotation_to, "Customer")
self.assertEquals(quotation.contact_person,
frappe.db.get_value("Contact", dict(email_id="[email protected]")))
self.assertEquals(quotation.lead, None)
self.assertEquals(quotation.contact_email, frappe.session.user)
return quotation
def test_get_cart_customer(self):
self.login_as_customer()
# test if quotation with customer is fetched
quotation = _get_cart_quotation()
self.assertEquals(quotation.quotation_to, "Customer")
self.assertEquals(quotation.customer, "_Test Customer")
self.assertEquals(quotation.lead, None)
self.assertEquals(quotation.contact_email, frappe.session.user)
return quotation
def test_add_to_cart(self):
self.login_as_customer()
# remove from cart
self.remove_all_items_from_cart()
# add first item
update_cart("_Test Item", 1)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item")
self.assertEquals(quotation.get("items")[0].qty, 1)
self.assertEquals(quotation.get("items")[0].amount, 10)
# add second item
update_cart("_Test Item 2", 1)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[1].item_code, "_Test Item 2")
self.assertEquals(quotation.get("items")[1].qty, 1)
self.assertEquals(quotation.get("items")[1].amount, 20)
self.assertEquals(len(quotation.get("items")), 2)
def test_update_cart(self):
# first, add to cart
self.test_add_to_cart()
# update first item
update_cart("_Test Item", 5)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item")
self.assertEquals(quotation.get("items")[0].qty, 5)
self.assertEquals(quotation.get("items")[0].amount, 50)
self.assertEquals(quotation.net_total, 70)
self.assertEquals(len(quotation.get("items")), 2)
def test_remove_from_cart(self):
# first, add to cart
self.test_add_to_cart()
# remove first item
update_cart("_Test Item", 0)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item 2")
self.assertEquals(quotation.get("items")[0].qty, 1)
self.assertEquals(quotation.get("items")[0].amount, 20)
self.assertEquals(quotation.net_total, 20)
self.assertEquals(len(quotation.get("items")), 1)
def test_tax_rule(self):
self.login_as_customer()
quotation = self.create_quotation()
from erpnext.accounts.party import set_taxes
tax_rule_master = set_taxes(quotation.customer, "Customer", \
quotation.transaction_date, quotation.company, None, None, \
quotation.customer_address, quotation.shipping_address_name, 1)
self.assertEquals(quotation.taxes_and_charges, tax_rule_master)
self.assertEquals(quotation.total_taxes_and_charges, 1000.0)
self.remove_test_quotation(quotation)
def create_quotation(self):
quotation = frappe.new_doc("Quotation")
values = {
"doctype": "Quotation",
"quotation_to": "Customer",
"order_type": "Shopping Cart",
"customer": get_party(frappe.session.user).name,
"docstatus": 0,
"contact_email": frappe.session.user,
"selling_price_list": "_Test Price List Rest of the World",
"currency": "USD",
"taxes_and_charges" : "_Test Tax 1",
"conversion_rate":1,
"transaction_date" : nowdate(),
"valid_till" : add_months(nowdate(), 1),
"items": [{
"item_code": "_Test Item",
"qty": 1
}],
"taxes": frappe.get_doc("Sales Taxes and Charges Template", "_Test Tax 1").taxes,
"company": "_Test Company"
}
quotation.update(values)
quotation.insert(ignore_permissions=True)
return quotation
def remove_test_quotation(self, quotation):
frappe.set_user("Administrator")
quotation.delete()
# helper functions
def enable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.update({
"enabled": 1,
"company": "_Test Company",
"default_customer_group": "_Test Customer Group",
"quotation_series": "_T-Quotation-",
"price_list": "_Test Price List India"
})
# insert item price
if not frappe.db.get_value("Item Price", {"price_list": "_Test Price List India",
"item_code": "_Test Item"}):
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item",
"price_list_rate": 10
}).insert()
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item 2",
"price_list_rate": 20
}).insert()
settings.save()
frappe.local.shopping_cart_settings = None
def disable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.enabled = 0
settings.save()
frappe.local.shopping_cart_settings = None
def login_as_new_user(self):
self.create_user_if_not_exists("[email protected]")
frappe.set_user("[email protected]")
def login_as_customer(self):
self.create_user_if_not_exists("[email protected]",
"_Test Contact For _Test Customer")
frappe.set_user("[email protected]")
def remove_all_items_from_cart(self):
quotation = _get_cart_quotation()
quotation.flags.ignore_permissions=True
quotation.delete()
def create_user_if_not_exists(self, email, first_name = None):
if frappe.db.exists("User", email):
return
frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0]
}).insert(ignore_permissions=True)
test_dependencies = ["Sales Taxes and Charges Template", "Price List", "Item Price", "Shipping Rule", "Currency Exchange",
"Customer Group", "Lead", "Customer", "Contact", "Address", "Item", "Tax Rule"]
| gpl-3.0 | -8,538,668,681,391,708,000 | 8,317,289,063,927,343,000 | 30.078341 | 122 | 0.700623 | false |
mavit/ansible | lib/ansible/modules/network/avi/avi_vrfcontext.py | 20 | 4674 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vrfcontext
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of VrfContext Avi RESTful Object
description:
- This module is used to configure VrfContext object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
bgp_profile:
description:
- Bgp local and peer info.
cloud_ref:
description:
- It is a reference to an object of type cloud.
debugvrfcontext:
description:
- Configure debug flags for vrf.
- Field introduced in 17.1.1.
description:
description:
- User defined description for the object.
gateway_mon:
description:
- Configure ping based heartbeat check for gateway in service engines of vrf.
internal_gateway_monitor:
description:
- Configure ping based heartbeat check for all default gateways in service engines of vrf.
- Field introduced in 17.1.1.
name:
description:
- Name of the object.
required: true
static_routes:
description:
- List of staticroute.
system_default:
description:
- Boolean flag to set system_default.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VrfContext object
avi_vrfcontext:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vrfcontext
"""
RETURN = '''
obj:
description: VrfContext (api/vrfcontext) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
bgp_profile=dict(type='dict',),
cloud_ref=dict(type='str',),
debugvrfcontext=dict(type='dict',),
description=dict(type='str',),
gateway_mon=dict(type='list',),
internal_gateway_monitor=dict(type='dict',),
name=dict(type='str', required=True),
static_routes=dict(type='list',),
system_default=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vrfcontext',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | 5,903,870,172,083,286,000 | 1,932,714,323,562,439,400 | 30.795918 | 106 | 0.606761 | false |
ZhuangER/robot_path_planning | gui/pyqtgraph/python2_3.py | 4 | 1727 | """
Helper functions which smooth out the differences between python 2 and 3.
"""
import sys
def asUnicode(x):
if sys.version_info[0] == 2:
if isinstance(x, unicode):
return x
elif isinstance(x, str):
return x.decode('UTF-8')
else:
return unicode(x)
else:
return str(x)
def cmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def sortList(l, cmpFunc):
if sys.version_info[0] == 2:
l.sort(cmpFunc)
else:
l.sort(key=cmpToKey(cmpFunc))
if sys.version_info[0] == 3:
import builtins
builtins.basestring = str
#builtins.asUnicode = asUnicode
#builtins.sortList = sortList
basestring = str
def cmp(a,b):
if a>b:
return 1
elif b > a:
return -1
else:
return 0
builtins.cmp = cmp
builtins.xrange = range
#else: ## don't use __builtin__ -- this confuses things like pyshell and ActiveState's lazy import recipe
#import __builtin__
#__builtin__.asUnicode = asUnicode
#__builtin__.sortList = sortList
| mit | 6,549,783,947,260,222,000 | -5,961,736,775,915,065,000 | 27.783333 | 109 | 0.547771 | false |
jmighion/ansible | lib/ansible/modules/cloud/amazon/elasticache.py | 26 | 20767 | #!/usr/bin/python
#
# Copyright (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used.
required: false
default: memcached
choices: ['redis', 'memcached']
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: None
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have. Required when state=present.
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: None
cache_parameter_group:
description:
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
for the specified engine will be used.
required: false
default: None
version_added: "2.0"
aliases: [ 'parameter_group' ]
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: false
default: None
version_added: "2.0"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: None
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
required: false
default: None
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
default: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- elasticache:
name: "test-please-delete"
state: rebooted
"""
from time import sleep
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
try:
import boto3
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
self.name = name
self.engine = engine.lower()
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_parameter_group = cache_parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.region = region
self.aws_connect_kwargs = aws_connect_kwargs
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
kwargs = dict(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeType=self.node_type,
Engine=self.engine,
EngineVersion=self.cache_engine_version,
CacheSecurityGroupNames=self.cache_security_groups,
SecurityGroupIds=self.security_group_ids,
CacheParameterGroupName=self.cache_parameter_group,
CacheSubnetGroupName=self.cache_subnet_group,
PreferredAvailabilityZone=self.zone)
if self.cache_port is not None:
kwargs['Port'] = self.cache_port
try:
self.conn.create_cache_cluster(**kwargs)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (self.name, self.status))
try:
response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
cache_cluster_data = response['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('gone')
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
msg = "'%s' is %s. Cannot sync."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
return
if self._requires_destroy_and_create():
if not self.hard_modify:
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
if not self.wait:
msg = "'%s' requires destructive modification. 'wait' must be set to true."
self.module.fail_json(msg=msg % self.name)
self.delete()
self.create()
return
if self._requires_modification():
self.modify()
def modify(self):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
self.conn.modify_cache_cluster(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeIdsToRemove=nodes_to_remove,
CacheSecurityGroupNames=self.cache_security_groups,
CacheParameterGroupName=self.cache_parameter_group,
SecurityGroupIds=self.security_group_ids,
ApplyImmediately=True,
EngineVersion=self.cache_engine_version)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
msg = "'%s' is %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status == 'rebooting':
return
if self.status in ['creating', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
# Collect ALL nodes for reboot
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
try:
self.conn.reboot_cache_cluster(CacheClusterId=self.name,
CacheNodeIdsToReboot=cache_node_ids)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
def get_info(self):
"""Return basic info about the cache cluster"""
info = {
'name': self.name,
'status': self.status
}
if self.data:
info['data'] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
status_map = {
'creating': 'available',
'rebooting': 'available',
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))
if awaited_status not in set(status_map.values()):
msg = "'%s' is not a valid awaited status."
self.module.fail_json(msg=msg % awaited_status)
while True:
sleep(1)
self._refresh_data()
if self.status == awaited_status:
break
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
modifiable_data = {
'NumCacheNodes': self.num_nodes,
'EngineVersion': self.cache_engine_version
}
for key, value in modifiable_data.items():
if value is not None and self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) != set(self.cache_security_groups):
return True
# check vpc security groups
if self.security_group_ids:
vpc_security_groups = []
security_groups = self.data['SecurityGroups'] or []
for sg in security_groups:
vpc_security_groups.append(sg['SecurityGroupId'])
if set(vpc_security_groups) != set(self.security_group_ids):
return True
return False
def _requires_destroy_and_create(self):
"""
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
'node_type': self.data['CacheNodeType'],
'engine': self.data['Engine'],
'cache_port': self._get_port()
}
# Only check for modifications if zone is specified
if self.zone is not None:
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
for key, value in unmodifiable_data.items():
if getattr(self, key) is not None and getattr(self, key) != value:
return True
return False
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True)
if region:
return boto3_conn(self.module, conn_type='client', resource='elasticache',
region=region, endpoint=ec2_url, **aws_connect_params)
else:
self.module.fail_json(msg="region must be specified")
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
if self.data['Engine'] == 'memcached':
return self.data['ConfigurationEndpoint']['Port']
elif self.data['Engine'] == 'redis':
# Redis only supports a single node (presently) so just use
# the first and only
return self.data['CacheNodes'][0]['Endpoint']['Port']
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'CacheClusterNotFound':
self.data = None
self.status = 'gone'
return
else:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
cache_cluster_data = response['CacheClusters'][0]
self.data = cache_cluster_data
self.status = self.data['CacheClusterStatus']
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
if self.status == 'rebooting cache cluster nodes':
self.status = 'rebooting'
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
if num_nodes_to_remove <= 0:
return []
if not self.hard_modify:
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
return cache_node_ids[-num_nodes_to_remove:]
def main():
""" elasticache ansible module """
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'rebooted']),
name=dict(required=True),
engine=dict(default='memcached'),
cache_engine_version=dict(default=""),
node_type=dict(default='cache.t2.small'),
num_nodes=dict(default=1, type='int'),
# alias for compat with the original PR 1950
cache_parameter_group=dict(default="", aliases=['parameter_group']),
cache_port=dict(type='int'),
cache_subnet_group=dict(default=""),
cache_security_groups=dict(default=[], type='list'),
security_group_ids=dict(default=[], type='list'),
zone=dict(default=""),
wait=dict(default=True, type='bool'),
hard_modify=dict(type='bool')
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
name = module.params['name']
state = module.params['state']
engine = module.params['engine']
cache_engine_version = module.params['cache_engine_version']
node_type = module.params['node_type']
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_subnet_group = module.params['cache_subnet_group']
cache_security_groups = module.params['cache_security_groups']
security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
cache_parameter_group = module.params['cache_parameter_group']
if cache_subnet_group and cache_security_groups:
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
if state == 'present' and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
cache_parameter_group,
cache_subnet_group,
cache_security_groups,
security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs)
if state == 'present':
elasticache_manager.ensure_present()
elif state == 'absent':
elasticache_manager.ensure_absent()
elif state == 'rebooted':
elasticache_manager.ensure_rebooted()
facts_result = dict(changed=elasticache_manager.changed,
elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,789,822,694,232,306,000 | -9,124,420,706,542,281,000 | 36.350719 | 146 | 0.588145 | false |
t3dev/odoo | addons/website_sale_digital/controllers/main.py | 5 | 4569 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import io
from werkzeug.utils import redirect
from odoo import http
from odoo.http import request
from odoo.addons.sale.controllers.portal import CustomerPortal
from odoo.addons.website_sale.controllers.main import WebsiteSale
class WebsiteSaleDigitalConfirmation(WebsiteSale):
@http.route([
'/shop/confirmation',
], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
response = super(WebsiteSaleDigitalConfirmation, self).payment_confirmation(**post)
order_lines = response.qcontext['order'].order_line
digital_content = any(x.product_id.type == 'digital' for x in order_lines)
response.qcontext.update(digital=digital_content)
return response
class WebsiteSaleDigital(CustomerPortal):
orders_page = '/my/orders'
@http.route([
'/my/orders/<int:order_id>',
], type='http', auth='public', website=True)
def portal_order_page(self, order_id=None, **post):
response = super(WebsiteSaleDigital, self).portal_order_page(order_id=order_id, **post)
if not 'sale_order' in response.qcontext:
return response
order = response.qcontext['sale_order']
invoiced_lines = request.env['account.invoice.line'].sudo().search([('invoice_id', 'in', order.invoice_ids.ids), ('invoice_id.state', '=', 'paid')])
products = invoiced_lines.mapped('product_id') | order.order_line.filtered(lambda r: not r.price_subtotal).mapped('product_id')
if not order.amount_total:
# in that case, we should add all download links to the products
# since there is nothing to pay, so we shouldn't wait for an invoice
products = order.order_line.mapped('product_id')
purchased_products_attachments = {}
for product in products:
# Search for product attachments
Attachment = request.env['ir.attachment']
product_id = product.id
template = product.product_tmpl_id
att = Attachment.search_read(
domain=['|', '&', ('res_model', '=', product._name), ('res_id', '=', product_id), '&', ('res_model', '=', template._name), ('res_id', '=', template.id), ('product_downloadable', '=', True)],
fields=['name', 'write_date'],
order='write_date desc',
)
# Ignore products with no attachments
if not att:
continue
purchased_products_attachments[product_id] = att
response.qcontext.update({
'digital_attachments': purchased_products_attachments,
})
return response
@http.route([
'/my/download',
], type='http', auth='public')
def download_attachment(self, attachment_id):
# Check if this is a valid attachment id
attachment = request.env['ir.attachment'].sudo().search_read(
[('id', '=', int(attachment_id))],
["name", "datas", "file_type", "res_model", "res_id", "type", "url"]
)
if attachment:
attachment = attachment[0]
else:
return redirect(self.orders_page)
# Check if the user has bought the associated product
res_model = attachment['res_model']
res_id = attachment['res_id']
purchased_products = request.env['account.invoice.line'].get_digital_purchases()
if res_model == 'product.product':
if res_id not in purchased_products:
return redirect(self.orders_page)
# Also check for attachments in the product templates
elif res_model == 'product.template':
template_ids = request.env['product.product'].sudo().browse(purchased_products).mapped('product_tmpl_id').ids
if res_id not in template_ids:
return redirect(self.orders_page)
else:
return redirect(self.orders_page)
# The client has bought the product, otherwise it would have been blocked by now
if attachment["type"] == "url":
if attachment["url"]:
return redirect(attachment["url"])
else:
return request.not_found()
elif attachment["datas"]:
data = io.BytesIO(base64.standard_b64decode(attachment["datas"]))
return http.send_file(data, filename=attachment['name'], as_attachment=True)
else:
return request.not_found()
| gpl-3.0 | -7,171,628,004,683,059,000 | 669,826,218,252,332,500 | 40.536364 | 206 | 0.612607 | false |
doheekim/chuizonetest | lib/sqlalchemy/dialects/sqlite/pysqlite.py | 10 | 13249 | # sqlite/pysqlite.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: http://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable Transaction Isolation
----------------------------------
The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
To work around this issue, the ``BEGIN`` keyword can be emitted
at the start of each transaction. The following recipe establishes
a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db",
isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN")
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
}
)
if not util.py2k:
description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError as e:
try:
from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| apache-2.0 | 1,480,339,473,138,478,600 | 2,178,213,402,795,432,000 | 38.082596 | 78 | 0.698392 | false |
q1ang/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause | 2,433,239,449,210,166,000 | 5,953,286,903,838,166,000 | 30 | 77 | 0.664911 | false |
chilleo/ALPHA | module/CalculateGeneralizedDStatisticClass.py | 1 | 11045 | from PyQt4 import QtCore
from sys import *
import os
import subprocess
# if platform == 'win32':
# path.insert(0, "../CommandLineFiles")
# elif platform == 'darwin':
# path.insert(0, "..\\CommandLineFiles")
# import CalculateGeneralizedDStatistic
import sys
from CommandLineFiles.RunDGEN import run_saved_dgen, Create_Network_Helper
sys.path.append('..\\')
from CommandLineFiles import CalculateGeneralizedDStatistic
"""
Functions:
~
Chabrielle Allen
Travis Benedict
Peter Dulworth
"""
class CalculateGeneralizedDStatisticClass(QtCore.QThread):
def __init__(self, parent=None):
super(CalculateGeneralizedDStatisticClass, self).__init__(parent)
def calculate_generalized(self, alignments, species_tree=None, reticulations=None, outgroup=None, window_size=100000000000,
window_offset=100000000000, verbose=False, alpha=0.01, use_inv=False, useDir=False,
directory="", statistic=False, save=False, f="DGenStatistic_", plot=False, meta=False, useAlreadyGeneratedStat=True):
self.emit(QtCore.SIGNAL('GEN_D_10'))
if(useAlreadyGeneratedStat == False): #generate a dgen stat
# run the java jar lines goes here
# FOR REFERENCE, THE ARGS AND JAVA COMMAND
# String treeString = args[0];
# String networkString = args[1];
# String outGroupName = args[2];
# String saveStatHere = args[3];
# int numberOfRandomRuns = Integer.parseInt(args[4]);
# GenerateDgenStatistic(treeString, networkString, outGroupName, saveStatHere, numberOfRandomRuns);
# Get the global path name to the jar file
dir_path = os.path.dirname(os.path.realpath(__file__))
jarPath = os.path.join(dir_path, "DGEN2.jar")
# Run PhyloNet dgen maker jar file
numberRandomRuns = 100
networkString = Create_Network_Helper(species_tree, reticulations, 0.9)
#species tree and network string need 's to work properly
species_tree = "'"+species_tree+"'"
networkString = "'"+networkString+"'"
jarRunOutput = subprocess.Popen("java -jar {0} {1} {2} {3} {4} {5}".format(jarPath, species_tree, networkString, outgroup, statistic, numberRandomRuns), stdout=subprocess.PIPE,
shell=True)
# Read output and convert to float
#pgtst = float(p.stdout.readline())
self.emit(QtCore.SIGNAL('GEN_D_50'))
#and then always run the statistic on data (just doing it this way for now to save time. could be chagned later to be slightly more user friendly. but also all users should want to analyze data probably)
#run the dstat. making temp variables just to keep clear on what is named what (cuz i am changing some things around without messing with some of the code rewriting)
runInVerboseMode = use_inv
saveResultsHere = f
resultsString = run_saved_dgen(statistic, alignments, window_size=window_size, window_offset=window_offset, verbose=runInVerboseMode, alpha=alpha)
#line here to save results string to file saveResultsHere (do i want to do this or just output to screen?
# If users want to save the statistic and speed up future runs
if len(saveResultsHere) > 0:
num = 0
file_name = saveResultsHere + ".txt"
while os.path.exists(file_name):
file_name = "DGenResults_{0}.txt".format(num)
num += 1
with open(file_name, "w") as text_file:
#output_str = "Taxa: {0}\n".format(taxa)
#text_file.write(output_str)
#output_str = "Statistic: {0}\n".format(generate_statistic_string((increase_resized, decrease_resized)))
#text_file.write(output_str)
text_file.write(resultsString)
text_file.close()
#put a line to either print results or to save em to a file. printing to screen done here
#self.emit(QtCore.SIGNAL('GEN_D_COMPLETE'))
self.emit(QtCore.SIGNAL('GEN_D_100'))
self.emit(QtCore.SIGNAL('DGEN2_FINISHED'), resultsString)
debugHere = 0
#run_saved_dgen(?,
# ['/Users/leo/rice/res/data/cichlid/alignment/cichlid6tax.phylip-sequential.txt'],
# verbose=True, plot='/Users/leo/rice/res/data/dgen/tmp/figC/plot_figCVerbose', meta='Dgen')
# OLD WAY
# alignments_to_d_resized, alignments_to_windows_to_d, standard_o, verbose_o = CalculateGeneralizedDStatistic.calculate_generalized\
# (alignments, species_tree, reticulations, outgroup, window_size, window_offset, verbose, alpha, use_inv,
# useDir, directory, statistic, save, f, plot, meta)
# self.emit(QtCore.SIGNAL("L_FINISHED"), alignments_to_d_resized, alignments_to_windows_to_d, standard_o, verbose_o)
def run(self):
"""
Starts PyQt Thread. Called with "start()".
"""
# try:
# self.window_splitter(self.inputFilename, self.windowSize, self.windowOffset)
# except IOError:
# self.emit(QtCore.SIGNAL('INVALID_ALIGNMENT_FILE'), self.inputFilename)
# return
self.calculate_generalized(self.alignments,
species_tree=self.species_tree,
reticulations=self.r,
outgroup=self.o,
window_size=self.window_size,
window_offset=self.window_offset,
verbose=self.verbose,
alpha=self.alpha,
use_inv=self.use_inv,
useDir=self.useDir,
directory=self.directory,
statistic=self.statistic,
save=self.save,
f=self.save_location,
plot=self.plot,
meta=self.meta,
useAlreadyGeneratedStat=self.useAlreadyGeneratedStat)
#self.emit(QtCore.SIGNAL('GEN_D_COMPLETE'), None)
if __name__ == '__main__':
gd = CalculateGeneralizedDStatisticClass()
species_tree = '((P1,P2),(P3,O));'
# species_tree = '(((P1,P2),(P3,(P4,P5))),O);'
r = [('P3', 'P1')]
alignments = ["exampleFiles/seqfile.txt"]
if platform == "darwin":
alignments = ["/Users/Peter/PycharmProjects/ALPHA/exampleFiles/seqfile.txt"]
else:
alignments = ["C:\\Users\\travi\Desktop\\dFoilStdPlusOneFar50kbp\\dFoilStdPlusOneFar50kbp\\sim2\\seqfile.txt"]
# gd.calculate_generalized(alignments, species_tree, r, window_size=50000, window_offset=50000, verbose=True, alpha=0.01, save=True)
gd.calculate(alignments, species_tree, r, outgroup="O", window_size=50000, window_offset=50000, verbose=True, alpha=0.01, save=True)
# save_file = "C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_35.txt"
# plot_formatting(calculate_generalized(alignments, statistic=save_file))
# print calculate_generalized(alignments, statistic="C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_10.txt", verbose=True)
# calculate_generalized(alignments, statistic="C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_35.txt")
# python - c "from CalculateGeneralizedDStatistic import *; calculate_generalized(['C:\\Users\\travi\\Documents\\PhyloVis\\exampleFiles\\ExampleDFOIL.phylip'], statistic='C:\\Users\\travi\\Documents\\ALPHA\\CommandLineFiles\\DGenStatistic_35.txt')"
# species_tree, r = '(((P1,P2),(P3,(P4,P5))),O);', [('P1', 'P3')]
# alignments = ["C:\\Users\\travi\\Documents\\PhyloVis\\exampleFiles\\ExampleDFOIL.phylip"]
# alignments = ["C:\\Users\\travi\\Desktop\\sixtaxa.txt"]
# i = calculate_generalized(alignments, species_tree, r, 100000, 100000, True, save=True)
# for j in range(10):
# k = calculate_generalized(alignments, species_tree, r, 100000, 100000, True, save=True)
# if i != k:
# print "FAIL"
# print i
# print k
# print j
# print pattern_string_generator(['A', 'A', 'A', 'A', 'A'])
# Inputs for paper
# file = "C:\\Users\\travi\\Desktop\\concatFile.phylip.txt"
# species_tree = '((C,G),(((A,Q),L),R));'
#
# window_size, window_offset = 10000, 1000
# r = [('L', 'R')]
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# window_size, window_offset = 100000, 10000
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
#
# window_size, window_offset = 10000, 1000
# r = [('Q', 'R')]
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# window_size, window_offset = 100000, 10000
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
#
# window_size, window_offset = 10000, 1000
# r = [('Q', 'G')]
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# window_size, window_offset = 100000, 10000
# plot_formatting(calculate_generalized(file, species_tree, r, window_size, window_offset, True))
# concat_directory("/Users/Peter/PycharmProjects/ALPHA/test_phylip_dir")
# print calculate_generalized('/Users/Peter/PycharmProjects/ALPHA/CLFILE', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 50000, 50000, True)
# file = 'C:\\Users\\travi\\Desktop\\clphylipseq.txt'
# # r = [('L', 'R')]
# r = [('Q', 'R')]
# # r = [('Q', 'G')]
# print calculate_generalized(file , '((C,G),(((A,Q),L),R));', r, 100000, 100000, True)
# concat_directory("/Users/Peter/PycharmProjects/ALPHA/travy_test")
# print calculate_generalized('/Users/Peter/PycharmProjects/ALPHA/CLFILE', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 50000, 50000, True)
# plot_formatting(calculate_generalized(alignments, species_tree, r, 1000, 1000, True))
# # lstat, signif, windows_to_l = calculate_generalized(alignment, species_tree, r, 1000, 1000, True, 0.05)
# # plot_formatting((lstat, signif, windows_to_l))
# plot_formatting(calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P3', 'P1')], 1000, 1000, False, 0.99), False)
# print calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 50000, 50000, True)
# python -c"from CalculateGeneralizedDStatistic import *; plot_formatting(calculate_generalized('C:\\Users\\travi\\Desktop\\seqfileNamed', '(((P1,P2),(P3,P4)),O);', [('P1', 'P3')], 100000, 100000, True, 0.01), True)"
| mit | -4,306,757,253,497,213,400 | 8,073,091,099,892,512,000 | 48.308036 | 252 | 0.612404 | false |
yoki/phantomjs | src/breakpad/src/tools/gyp/test/sibling/gyptest-relocate.py | 151 | 1144 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('build/all.gyp', test.ALL, chdir='relocate/src')
chdir = 'relocate/src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'relocate/src'
if test.format == 'xcode':
chdir = 'relocate/src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'relocate/src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause | 8,254,033,439,657,449,000 | 5,034,024,984,852,384,000 | 26.902439 | 76 | 0.65035 | false |
pli3/enigma2-pli | lib/python/Plugins/Extensions/DVDBurn/Process.py | 14 | 36761 | from Components.Task import Task, Job, DiskspacePrecondition, Condition, ToolExistsPrecondition
from Components.Harddisk import harddiskmanager
from Screens.MessageBox import MessageBox
import os
class png2yuvTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Creating menu video")
self.setTool("png2yuv")
self.args += ["-n1", "-Ip", "-f25", "-j", inputfile]
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
def processStderr(self, data):
print "[png2yuvTask]", data[:-1]
class mpeg2encTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Encoding menu video")
self.setTool("mpeg2enc")
self.args += ["-f8", "-np", "-a2", "-o", outputfile]
self.inputFile = inputfile
self.weighting = 25
def run(self, callback):
Task.run(self, callback)
self.container.readFromFile(self.inputFile)
def processOutputLine(self, line):
print "[mpeg2encTask]", line[:-1]
class spumuxTask(Task):
def __init__(self, job, xmlfile, inputfile, outputfile):
Task.__init__(self, job, "Muxing buttons into menu")
self.setTool("spumux")
self.args += [xmlfile]
self.inputFile = inputfile
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
self.container.readFromFile(self.inputFile)
def processStderr(self, data):
print "[spumuxTask]", data[:-1]
class MakeFifoNode(Task):
def __init__(self, job, number):
Task.__init__(self, job, "Make FIFO nodes")
self.setTool("mknod")
nodename = self.job.workspace + "/dvd_title_%d" % number + ".mpg"
self.args += [nodename, "p"]
self.weighting = 10
class LinkTS(Task):
def __init__(self, job, sourcefile, link_name):
Task.__init__(self, job, "Creating symlink for source titles")
self.setTool("ln")
self.args += ["-s", sourcefile, link_name]
self.weighting = 10
class CopyMeta(Task):
def __init__(self, job, sourcefile):
Task.__init__(self, job, "Copy title meta files")
self.setTool("cp")
from os import listdir
path, filename = sourcefile.rstrip("/").rsplit("/",1)
tsfiles = listdir(path)
for file in tsfiles:
if file.startswith(filename+"."):
self.args += [path+'/'+file]
self.args += [self.job.workspace]
self.weighting = 15
class DemuxTask(Task):
def __init__(self, job, inputfile):
Task.__init__(self, job, "Demux video into ES")
title = job.project.titles[job.i]
self.global_preconditions.append(DiskspacePrecondition(title.estimatedDiskspace))
self.setTool("projectx")
self.args += [inputfile, "-demux", "-set", "ExportPanel.Streamtype.Subpicture=0", "-set", "ExportPanel.Streamtype.Teletext=0", "-out", self.job.workspace ]
self.end = 300
self.prog_state = 0
self.weighting = 1000
self.cutfile = self.job.workspace + "/cut_%d.Xcl" % (job.i+1)
self.cutlist = title.cutlist
self.currentPID = None
self.relevantAudioPIDs = [ ]
self.getRelevantAudioPIDs(title)
self.generated_files = [ ]
self.mplex_audiofiles = { }
self.mplex_videofile = ""
self.mplex_streamfiles = [ ]
if len(self.cutlist) > 1:
self.args += [ "-cut", self.cutfile ]
def prepare(self):
self.writeCutfile()
def getRelevantAudioPIDs(self, title):
for audiotrack in title.properties.audiotracks:
if audiotrack.active.getValue():
self.relevantAudioPIDs.append(audiotrack.pid.getValue())
def processOutputLine(self, line):
line = line[:-1]
#print "[DemuxTask]", line
MSG_NEW_FILE = "---> new File: "
MSG_PROGRESS = "[PROGRESS] "
MSG_NEW_MP2 = "++> Mpg Audio: PID 0x"
MSG_NEW_AC3 = "++> AC3/DTS Audio: PID 0x"
if line.startswith(MSG_NEW_FILE):
file = line[len(MSG_NEW_FILE):]
if file[0] == "'":
file = file[1:-1]
self.haveNewFile(file)
elif line.startswith(MSG_PROGRESS):
progress = line[len(MSG_PROGRESS):]
self.haveProgress(progress)
elif line.startswith(MSG_NEW_MP2) or line.startswith(MSG_NEW_AC3):
try:
self.currentPID = str(int(line.split(': PID 0x',1)[1].split(' ',1)[0],16))
except ValueError:
print "[DemuxTask] ERROR: couldn't detect Audio PID (projectx too old?)"
def haveNewFile(self, file):
print "[DemuxTask] produced file:", file, self.currentPID
self.generated_files.append(file)
if self.currentPID in self.relevantAudioPIDs:
self.mplex_audiofiles[self.currentPID] = file
elif file.endswith("m2v"):
self.mplex_videofile = file
def haveProgress(self, progress):
#print "PROGRESS [%s]" % progress
MSG_CHECK = "check & synchronize audio file"
MSG_DONE = "done..."
if progress == "preparing collection(s)...":
self.prog_state = 0
elif progress[:len(MSG_CHECK)] == MSG_CHECK:
self.prog_state += 1
else:
try:
p = int(progress)
p = p - 1 + self.prog_state * 100
if p > self.progress:
self.progress = p
except ValueError:
pass
def writeCutfile(self):
f = open(self.cutfile, "w")
f.write("CollectionPanel.CutMode=4\n")
for p in self.cutlist:
s = p / 90000
m = s / 60
h = m / 60
m %= 60
s %= 60
f.write("%02d:%02d:%02d\n" % (h, m, s))
f.close()
def cleanup(self, failed):
print "[DemuxTask::cleanup]"
self.mplex_streamfiles = [ self.mplex_videofile ]
for pid in self.relevantAudioPIDs:
if pid in self.mplex_audiofiles:
self.mplex_streamfiles.append(self.mplex_audiofiles[pid])
print self.mplex_streamfiles
if failed:
import os
for file in self.generated_files:
try:
os.remove(file)
except OSError:
pass
class MplexTaskPostcondition(Condition):
def check(self, task):
if task.error == task.ERROR_UNDERRUN:
return True
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_UNDERRUN: ("Can't multiplex source video!"),
task.ERROR_UNKNOWN: ("An unknown error occurred!")
}[task.error]
class MplexTask(Task):
ERROR_UNDERRUN, ERROR_UNKNOWN = range(2)
def __init__(self, job, outputfile, inputfiles=None, demux_task=None, weighting = 500):
Task.__init__(self, job, "Mux ES into PS")
self.weighting = weighting
self.demux_task = demux_task
self.postconditions.append(MplexTaskPostcondition())
self.setTool("mplex")
self.args += ["-f8", "-o", outputfile, "-v1"]
if inputfiles:
self.args += inputfiles
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
# we don't want the ReturncodePostcondition in this case because for right now we're just gonna ignore the fact that mplex fails with a buffer underrun error on some streams (this always at the very end)
def prepare(self):
self.error = None
if self.demux_task:
self.args += self.demux_task.mplex_streamfiles
def processOutputLine(self, line):
print "[MplexTask] ", line[:-1]
if line.startswith("**ERROR:"):
if line.find("Frame data under-runs detected") != -1:
self.error = self.ERROR_UNDERRUN
else:
self.error = self.ERROR_UNKNOWN
class RemoveESFiles(Task):
def __init__(self, job, demux_task):
Task.__init__(self, job, "Remove temp. files")
self.demux_task = demux_task
self.setTool("rm")
self.weighting = 10
def prepare(self):
self.args += ["-f"]
self.args += self.demux_task.generated_files
self.args += [self.demux_task.cutfile]
class ReplexTask(Task):
def __init__(self, job, outputfile, inputfile):
Task.__init__(self, job, "ReMux TS into PS")
self.weighting = 1000
self.setTool("replex")
self.args += ["-t", "DVD", "-j", "-o", outputfile, inputfile]
def processOutputLine(self, line):
print "[ReplexTask] ", line[:-1]
class DVDAuthorTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Authoring DVD")
self.weighting = 20
self.setTool("dvdauthor")
self.CWD = self.job.workspace
self.args += ["-x", self.job.workspace+"/dvdauthor.xml"]
self.menupreview = job.menupreview
def processOutputLine(self, line):
print "[DVDAuthorTask] ", line[:-1]
if not self.menupreview and line.startswith("STAT: Processing"):
self.callback(self, [], stay_resident=True)
elif line.startswith("STAT: VOBU"):
try:
progress = int(line.split("MB")[0].split(" ")[-1])
if progress:
self.job.mplextask.progress = progress
print "[DVDAuthorTask] update mplextask progress:", self.job.mplextask.progress, "of", self.job.mplextask.end
except:
print "couldn't set mux progress"
class DVDAuthorFinalTask(Task):
def __init__(self, job):
Task.__init__(self, job, "dvdauthor finalize")
self.setTool("dvdauthor")
self.args += ["-T", "-o", self.job.workspace + "/dvd"]
class WaitForResidentTasks(Task):
def __init__(self, job):
Task.__init__(self, job, "waiting for dvdauthor to finalize")
def run(self, callback):
print "waiting for %d resident task(s) %s to finish..." % (len(self.job.resident_tasks),str(self.job.resident_tasks))
self.callback = callback
if self.job.resident_tasks == 0:
callback(self, [])
class BurnTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
if task.returncode == 0:
return True
elif task.error is None or task.error is task.ERROR_MINUSRWBUG:
return True
return False
def getErrorMessage(self, task):
return {
task.ERROR_NOTWRITEABLE: _("Medium is not a writeable DVD!"),
task.ERROR_LOAD: _("Could not load medium! No disc inserted?"),
task.ERROR_SIZE: _("Content does not fit on DVD!"),
task.ERROR_WRITE_FAILED: _("Write failed!"),
task.ERROR_DVDROM: _("No (supported) DVDROM found!"),
task.ERROR_ISOFS: _("Medium is not empty!"),
task.ERROR_FILETOOLARGE: _("TS file is too large for ISO9660 level 1!"),
task.ERROR_ISOTOOLARGE: _("ISO file is too large for this filesystem!"),
task.ERROR_UNKNOWN: _("An unknown error occurred!")
}[task.error]
class BurnTask(Task):
ERROR_NOTWRITEABLE, ERROR_LOAD, ERROR_SIZE, ERROR_WRITE_FAILED, ERROR_DVDROM, ERROR_ISOFS, ERROR_FILETOOLARGE, ERROR_ISOTOOLARGE, ERROR_MINUSRWBUG, ERROR_UNKNOWN = range(10)
def __init__(self, job, extra_args=[], tool="growisofs"):
Task.__init__(self, job, job.name)
self.weighting = 500
self.end = 120 # 100 for writing, 10 for buffer flush, 10 for closing disc
self.postconditions.append(BurnTaskPostcondition())
self.setTool(tool)
self.args += extra_args
def prepare(self):
self.error = None
def processOutputLine(self, line):
line = line[:-1]
print "[GROWISOFS] %s" % line
if line[8:14] == "done, ":
self.progress = float(line[:6])
print "progress:", self.progress
elif line.find("flushing cache") != -1:
self.progress = 100
elif line.find("closing disc") != -1:
self.progress = 110
elif line.startswith(":-["):
if line.find("ASC=30h") != -1:
self.error = self.ERROR_NOTWRITEABLE
elif line.find("ASC=24h") != -1:
self.error = self.ERROR_LOAD
elif line.find("SK=5h/ASC=A8h/ACQ=04h") != -1:
self.error = self.ERROR_MINUSRWBUG
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith(":-("):
if line.find("No space left on device") != -1:
self.error = self.ERROR_SIZE
elif self.error == self.ERROR_MINUSRWBUG:
print "*sigh* this is a known bug. we're simply gonna assume everything is fine."
self.postconditions = []
elif line.find("write failed") != -1:
self.error = self.ERROR_WRITE_FAILED
elif line.find("unable to open64(") != -1 and line.find(",O_RDONLY): No such file or directory") != -1:
self.error = self.ERROR_DVDROM
elif line.find("media is not recognized as recordable DVD") != -1:
self.error = self.ERROR_NOTWRITEABLE
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith("FATAL:"):
if line.find("already carries isofs!"):
self.error = self.ERROR_ISOFS
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.find("-allow-limited-size was not specified. There is no way do represent this file size. Aborting.") != -1:
self.error = self.ERROR_FILETOOLARGE
elif line.startswith("genisoimage: File too large."):
self.error = self.ERROR_ISOTOOLARGE
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
class RemoveDVDFolder(Task):
def __init__(self, job):
Task.__init__(self, job, "Remove temp. files")
self.setTool("rm")
self.args += ["-rf", self.job.workspace]
self.weighting = 10
class CheckDiskspaceTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Checking free space")
totalsize = 0 # require an extra safety 50 MB
maxsize = 0
for title in job.project.titles:
titlesize = title.estimatedDiskspace
if titlesize > maxsize: maxsize = titlesize
totalsize += titlesize
diskSpaceNeeded = totalsize + maxsize
job.estimateddvdsize = totalsize / 1024 / 1024
totalsize += 50*1024*1024 # require an extra safety 50 MB
self.global_preconditions.append(DiskspacePrecondition(diskSpaceNeeded))
self.weighting = 5
def abort(self):
self.finish(aborted = True)
def run(self, callback):
self.callback = callback
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if len(failed_preconditions):
callback(self, failed_preconditions)
return
Task.processFinished(self, 0)
class PreviewTask(Task):
def __init__(self, job, path):
Task.__init__(self, job, "Preview")
self.postconditions.append(PreviewTaskPostcondition())
self.job = job
self.path = path
self.weighting = 10
def run(self, callback):
self.callback = callback
if self.job.menupreview:
self.previewProject()
else:
import Screens.Standby
if Screens.Standby.inStandby:
self.previewCB(False)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.previewCB, MessageBox, _("Do you want to preview this DVD before burning?"), timeout = 60, default = False)
def abort(self):
self.finish(aborted = True)
def previewCB(self, answer):
if answer == True:
self.previewProject()
else:
self.closedCB(True)
def playerClosed(self):
if self.job.menupreview:
self.closedCB(True)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.closedCB, MessageBox, _("Do you want to burn this collection to DVD medium?") )
def closedCB(self, answer):
if answer == True:
Task.processFinished(self, 0)
else:
Task.processFinished(self, 1)
def previewProject(self):
from Screens.DVD import DVDPlayer
self.job.project.session.openWithCallback(self.playerClosed, DVDPlayer, dvd_filelist= [ self.path ])
class PreviewTaskPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return "Cancel"
class ImagingPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return _("Failed") + ": python-imaging"
class ImagePrepareTask(Task):
def __init__(self, job):
Task.__init__(self, job, _("please wait, loading picture..."))
self.postconditions.append(ImagingPostcondition())
self.weighting = 20
self.job = job
self.Menus = job.Menus
def run(self, callback):
self.callback = callback
# we are doing it this weird way so that the TaskView Screen actually pops up before the spinner comes
from enigma import eTimer
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.conduct)
self.delayTimer.start(10,1)
def conduct(self):
try:
from ImageFont import truetype
from Image import open as Image_open
s = self.job.project.menutemplate.settings
(width, height) = s.dimensions.getValue()
self.Menus.im_bg_orig = Image_open(s.menubg.getValue())
if self.Menus.im_bg_orig.size != (width, height):
self.Menus.im_bg_orig = self.Menus.im_bg_orig.resize((width, height))
self.Menus.fontsizes = [s.fontsize_headline.getValue(), s.fontsize_title.getValue(), s.fontsize_subtitle.getValue()]
self.Menus.fonts = [(truetype(s.fontface_headline.getValue(), self.Menus.fontsizes[0])), (truetype(s.fontface_title.getValue(), self.Menus.fontsizes[1])),(truetype(s.fontface_subtitle.getValue(), self.Menus.fontsizes[2]))]
Task.processFinished(self, 0)
except:
Task.processFinished(self, 1)
class MenuImageTask(Task):
def __init__(self, job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename):
Task.__init__(self, job, "Create Menu %d Image" % menu_count)
self.postconditions.append(ImagingPostcondition())
self.weighting = 10
self.job = job
self.Menus = job.Menus
self.menu_count = menu_count
self.spuxmlfilename = spuxmlfilename
self.menubgpngfilename = menubgpngfilename
self.highlightpngfilename = highlightpngfilename
def run(self, callback):
self.callback = callback
#try:
import ImageDraw, Image, os
s = self.job.project.menutemplate.settings
s_top = s.margin_top.getValue()
s_bottom = s.margin_bottom.getValue()
s_left = s.margin_left.getValue()
s_right = s.margin_right.getValue()
s_rows = s.space_rows.getValue()
s_cols = s.space_cols.getValue()
nr_cols = s.cols.getValue()
nr_rows = s.rows.getValue()
thumb_size = s.thumb_size.getValue()
if thumb_size[0]:
from Image import open as Image_open
(s_width, s_height) = s.dimensions.getValue()
fonts = self.Menus.fonts
im_bg = self.Menus.im_bg_orig.copy()
im_high = Image.new("P", (s_width, s_height), 0)
im_high.putpalette(self.Menus.spu_palette)
draw_bg = ImageDraw.Draw(im_bg)
draw_high = ImageDraw.Draw(im_high)
if self.menu_count == 1:
headlineText = self.job.project.settings.name.getValue().decode("utf-8")
headlinePos = self.getPosition(s.offset_headline.getValue(), 0, 0, s_width, s_top, draw_bg.textsize(headlineText, font=fonts[0]))
draw_bg.text(headlinePos, headlineText, fill=self.Menus.color_headline, font=fonts[0])
spuxml = """<?xml version="1.0" encoding="utf-8"?>
<subpictures>
<stream>
<spu
highlight="%s"
transparent="%02x%02x%02x"
start="00:00:00.00"
force="yes" >""" % (self.highlightpngfilename, self.Menus.spu_palette[0], self.Menus.spu_palette[1], self.Menus.spu_palette[2])
#rowheight = (self.Menus.fontsizes[1]+self.Menus.fontsizes[2]+thumb_size[1]+s_rows)
menu_start_title = (self.menu_count-1)*self.job.titles_per_menu + 1
menu_end_title = (self.menu_count)*self.job.titles_per_menu + 1
nr_titles = len(self.job.project.titles)
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
col = 1
row = 1
for title_no in range( menu_start_title , menu_end_title ):
title = self.job.project.titles[title_no-1]
col_width = ( s_width - s_left - s_right ) / nr_cols
row_height = ( s_height - s_top - s_bottom ) / nr_rows
left = s_left + ( (col-1) * col_width ) + s_cols/2
right = left + col_width - s_cols
top = s_top + ( (row-1) * row_height) + s_rows/2
bottom = top + row_height - s_rows
width = right - left
height = bottom - top
if bottom > s_height:
bottom = s_height
#draw_bg.rectangle((left, top, right, bottom), outline=(255,0,0))
im_cell_bg = Image.new("RGBA", (width, height),(0,0,0,0))
draw_cell_bg = ImageDraw.Draw(im_cell_bg)
im_cell_high = Image.new("P", (width, height), 0)
im_cell_high.putpalette(self.Menus.spu_palette)
draw_cell_high = ImageDraw.Draw(im_cell_high)
if thumb_size[0]:
thumbPos = self.getPosition(s.offset_thumb.getValue(), 0, 0, width, height, thumb_size)
box = (thumbPos[0], thumbPos[1], thumbPos[0]+thumb_size[0], thumbPos[1]+thumb_size[1])
try:
thumbIm = Image_open(title.inputfile.rsplit('.',1)[0] + ".png")
im_cell_bg.paste(thumbIm,thumbPos)
except:
draw_cell_bg.rectangle(box, fill=(64,127,127,127))
border = s.thumb_border.getValue()
if border:
draw_cell_high.rectangle(box, fill=1)
draw_cell_high.rectangle((box[0]+border, box[1]+border, box[2]-border, box[3]-border), fill=0)
titleText = title.formatDVDmenuText(s.titleformat.getValue(), title_no).decode("utf-8")
titlePos = self.getPosition(s.offset_title.getValue(), 0, 0, width, height, draw_bg.textsize(titleText, font=fonts[1]))
draw_cell_bg.text(titlePos, titleText, fill=self.Menus.color_button, font=fonts[1])
draw_cell_high.text(titlePos, titleText, fill=1, font=self.Menus.fonts[1])
subtitleText = title.formatDVDmenuText(s.subtitleformat.getValue(), title_no).decode("utf-8")
subtitlePos = self.getPosition(s.offset_subtitle.getValue(), 0, 0, width, height, draw_cell_bg.textsize(subtitleText, font=fonts[2]))
draw_cell_bg.text(subtitlePos, subtitleText, fill=self.Menus.color_button, font=fonts[2])
del draw_cell_bg
del draw_cell_high
im_bg.paste(im_cell_bg,(left, top, right, bottom), mask=im_cell_bg)
im_high.paste(im_cell_high,(left, top, right, bottom))
spuxml += """
<button name="button%s" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (str(title_no).zfill(2),left,right,top,bottom )
if col < nr_cols:
col += 1
else:
col = 1
row += 1
top = s_height - s_bottom - s_rows/2
if self.menu_count < self.job.nr_menus:
next_page_text = s.next_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(next_page_text, font=fonts[1])
pos = ( s_width-textsize[0]-s_right, top )
draw_bg.text(pos, next_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, next_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_next" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
if self.menu_count > 1:
prev_page_text = s.prev_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(prev_page_text, font=fonts[1])
pos = ( (s_left+s_cols/2), top )
draw_bg.text(pos, prev_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, prev_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_prev" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
del draw_bg
del draw_high
fd=open(self.menubgpngfilename,"w")
im_bg.save(fd,"PNG")
fd.close()
fd=open(self.highlightpngfilename,"w")
im_high.save(fd,"PNG")
fd.close()
spuxml += """
</spu>
</stream>
</subpictures>"""
f = open(self.spuxmlfilename, "w")
f.write(spuxml)
f.close()
Task.processFinished(self, 0)
#except:
#Task.processFinished(self, 1)
def getPosition(self, offset, left, top, right, bottom, size):
pos = [left, top]
if offset[0] != -1:
pos[0] += offset[0]
else:
pos[0] += ( (right-left) - size[0] ) / 2
if offset[1] != -1:
pos[1] += offset[1]
else:
pos[1] += ( (bottom-top) - size[1] ) / 2
return tuple(pos)
class Menus:
def __init__(self, job):
self.job = job
job.Menus = self
s = self.job.project.menutemplate.settings
self.color_headline = tuple(s.color_headline.getValue())
self.color_button = tuple(s.color_button.getValue())
self.color_highlight = tuple(s.color_highlight.getValue())
self.spu_palette = [ 0x60, 0x60, 0x60 ] + s.color_highlight.getValue()
ImagePrepareTask(job)
nr_titles = len(job.project.titles)
job.titles_per_menu = s.cols.getValue()*s.rows.getValue()
job.nr_menus = ((nr_titles+job.titles_per_menu-1)/job.titles_per_menu)
#a new menu_count every 4 titles (1,2,3,4->1 ; 5,6,7,8->2 etc.)
for menu_count in range(1 , job.nr_menus+1):
num = str(menu_count)
spuxmlfilename = job.workspace+"/spumux"+num+".xml"
menubgpngfilename = job.workspace+"/dvd_menubg"+num+".png"
highlightpngfilename = job.workspace+"/dvd_highlight"+num+".png"
MenuImageTask(job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename)
png2yuvTask(job, menubgpngfilename, job.workspace+"/dvdmenubg"+num+".yuv")
menubgm2vfilename = job.workspace+"/dvdmenubg"+num+".mv2"
mpeg2encTask(job, job.workspace+"/dvdmenubg"+num+".yuv", menubgm2vfilename)
menubgmpgfilename = job.workspace+"/dvdmenubg"+num+".mpg"
menuaudiofilename = s.menuaudio.getValue()
MplexTask(job, outputfile=menubgmpgfilename, inputfiles = [menubgm2vfilename, menuaudiofilename], weighting = 20)
menuoutputfilename = job.workspace+"/dvdmenu"+num+".mpg"
spumuxTask(job, spuxmlfilename, menubgmpgfilename, menuoutputfilename)
def CreateAuthoringXML_singleset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n', )
if mode.startswith("menu"):
authorxml.append(' <post> jump titleset 1 menu; </post>\n')
else:
authorxml.append(' <post> jump title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
authorxml.append(' <titleset>\n')
if mode.startswith("menu"):
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc entry="root">\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump title ' + str(i) +'; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for i in range( nr_titles ):
chapters = ','.join(job.project.titles[i].getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump title %d;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def CreateAuthoringXML_multiset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '" jumppad="yes">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
if mode.startswith("menu"):
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc>\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump titleset ' + str(i) +' title 1; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
else:
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n' )
authorxml.append(' <post> jump titleset 1 title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
for i in range( nr_titles ):
title = job.project.titles[i]
authorxml.append(' <titleset>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc entry="root">\n')
authorxml.append(' <pre>\n')
authorxml.append(' jump vmgm menu entry title;\n')
authorxml.append(' </pre>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for audiotrack in title.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
format = audiotrack.format.getValue()
language = audiotrack.language.getValue()
audio_tag = ' <audio format="%s"' % format
if language != "nolang":
audio_tag += ' lang="%s"' % language
audio_tag += ' />\n'
authorxml.append(audio_tag)
aspect = title.properties.aspect.getValue()
video_tag = ' <video aspect="'+aspect+'"'
if title.properties.widescreen.getValue() == "4:3":
video_tag += ' widescreen="'+title.properties.widescreen.getValue()+'"'
video_tag += ' />\n'
authorxml.append(video_tag)
chapters = ','.join(title.getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump titleset %d title 1;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def getISOfilename(isopath, volName):
from Tools.Directories import fileExists
i = 0
filename = isopath+'/'+volName+".iso"
while fileExists(filename):
i = i+1
filename = isopath+'/'+volName + str(i).zfill(3) + ".iso"
return filename
class DVDJob(Job):
def __init__(self, project, menupreview=False):
Job.__init__(self, "DVDBurn Job")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S")
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.menupreview = menupreview
self.conduct()
def conduct(self):
CheckDiskspaceTask(self)
if self.project.settings.authormode.getValue().startswith("menu") or self.menupreview:
Menus(self)
if self.project.settings.titlesetmode.getValue() == "multi":
CreateAuthoringXML_multiset(self)
else:
CreateAuthoringXML_singleset(self)
DVDAuthorTask(self)
nr_titles = len(self.project.titles)
if self.menupreview:
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
else:
hasProjectX = os.path.exists('/usr/bin/projectx')
print "[DVDJob] hasProjectX=", hasProjectX
for self.i in range(nr_titles):
self.title = self.project.titles[self.i]
link_name = self.workspace + "/source_title_%d.ts" % (self.i+1)
title_filename = self.workspace + "/dvd_title_%d.mpg" % (self.i+1)
LinkTS(self, self.title.inputfile, link_name)
if not hasProjectX:
ReplexTask(self, outputfile=title_filename, inputfile=link_name).end = self.estimateddvdsize
else:
demux = DemuxTask(self, link_name)
self.mplextask = MplexTask(self, outputfile=title_filename, demux_task=demux)
self.mplextask.end = self.estimateddvdsize
RemoveESFiles(self, demux)
WaitForResidentTasks(self)
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
if output == "dvd":
self.name = _("Burn DVD")
tool = "growisofs"
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
self.name = _("Create DVD-ISO")
tool = "genisoimage"
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, self.workspace + "/dvd" ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDdataJob(Job):
def __init__(self, project):
Job.__init__(self, "Data DVD Burn")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S") + "/dvd/"
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.conduct()
def conduct(self):
if self.project.settings.output.getValue() == "iso":
CheckDiskspaceTask(self)
nr_titles = len(self.project.titles)
for self.i in range(nr_titles):
title = self.project.titles[self.i]
filename = title.inputfile.rstrip("/").rsplit("/",1)[1]
link_name = self.workspace + filename
LinkTS(self, title.inputfile, link_name)
CopyMeta(self, title.inputfile)
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
tool = "growisofs"
if output == "dvd":
self.name = _("Burn DVD")
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
tool = "genisoimage"
self.name = _("Create DVD-ISO")
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
if self.project.settings.dataformat.getValue() == "iso9660_1":
burnargs += ["-iso-level", "1" ]
elif self.project.settings.dataformat.getValue() == "iso9660_4":
burnargs += ["-iso-level", "4", "-allow-limited-size" ]
elif self.project.settings.dataformat.getValue() == "udf":
burnargs += ["-udf", "-allow-limited-size" ]
burnargs += [ "-publisher", "Dreambox", "-V", volName, "-follow-links", self.workspace ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDisoJob(Job):
def __init__(self, project, imagepath):
Job.__init__(self, _("Burn DVD"))
self.project = project
self.menupreview = False
from Tools.Directories import getSize
if imagepath.endswith(".iso"):
PreviewTask(self, imagepath)
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD() + '='+imagepath, "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
else:
PreviewTask(self, imagepath + "/VIDEO_TS/")
volName = self.project.settings.name.getValue()
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, imagepath ]
tool = "growisofs"
BurnTask(self, burnargs, tool)
| gpl-2.0 | 7,395,267,085,810,911,000 | 57,300,064,805,164,960 | 36.132323 | 225 | 0.671146 | false |
wolverineav/neutron | neutron/tests/fullstack/base.py | 1 | 3133 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db.sqlalchemy import test_base
from neutron.db.migration import cli as migration
from neutron.tests.common import base
from neutron.tests.fullstack.resources import client as client_resource
from neutron.tests import tools
class BaseFullStackTestCase(base.MySQLTestCase):
"""Base test class for full-stack tests."""
def setUp(self, environment):
super(BaseFullStackTestCase, self).setUp()
# NOTE(ihrachys): seed should be reset before environment fixture below
# since the latter starts services that may rely on generated port
# numbers
tools.reset_random_seed()
self.create_db_tables()
self.environment = environment
self.environment.test_name = self.get_name()
self.useFixture(self.environment)
self.client = self.environment.neutron_server.client
self.safe_client = self.useFixture(
client_resource.ClientFixture(self.client))
def get_name(self):
class_name, test_name = self.id().split(".")[-2:]
return "%s.%s" % (class_name, test_name)
def create_db_tables(self):
"""Populate the new database.
MySQLTestCase creates a new database for each test, but these need to
be populated with the appropriate tables. Before we can do that, we
must change the 'connection' option which the Neutron code knows to
look at.
Currently, the username and password options are hard-coded by
oslo.db and neutron/tests/functional/contrib/gate_hook.sh. Also,
we only support MySQL for now, but the groundwork for adding Postgres
is already laid.
"""
conn = ("mysql+pymysql://%(username)s:%(password)s"
"@127.0.0.1/%(db_name)s" % {
'username': test_base.DbFixture.USERNAME,
'password': test_base.DbFixture.PASSWORD,
'db_name': self.engine.url.database})
alembic_config = migration.get_neutron_config()
alembic_config.neutron_config = cfg.CONF
self.original_conn = cfg.CONF.database.connection
self.addCleanup(self._revert_connection_address)
cfg.CONF.set_override('connection', conn, group='database')
migration.do_alembic_command(alembic_config, 'upgrade', 'heads')
def _revert_connection_address(self):
cfg.CONF.set_override('connection',
self.original_conn,
group='database')
| apache-2.0 | 2,131,294,345,193,305,600 | -2,466,418,474,685,378,600 | 40.773333 | 79 | 0.662624 | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_router_rip.py | 14 | 25268 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_rip
short_description: Configure RIP in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and rip category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
router_rip:
description:
- Configure RIP.
default: null
type: dict
suboptions:
default_information_originate:
description:
- Enable/disable generation of default route.
type: str
choices:
- enable
- disable
default_metric:
description:
- Default metric.
type: int
distance:
description:
- distance
type: list
suboptions:
access_list:
description:
- Access list for route destination. Source router.access-list.name.
type: str
distance:
description:
- Distance (1 - 255).
type: int
id:
description:
- Distance ID.
required: true
type: int
prefix:
description:
- Distance prefix.
type: str
distribute_list:
description:
- Distribute list.
type: list
suboptions:
direction:
description:
- Distribute list direction.
type: str
choices:
- in
- out
id:
description:
- Distribute list ID.
required: true
type: int
interface:
description:
- Distribute list interface name. Source system.interface.name.
type: str
listname:
description:
- Distribute access/prefix list name. Source router.access-list.name router.prefix-list.name.
type: str
status:
description:
- status
type: str
choices:
- enable
- disable
garbage_timer:
description:
- Garbage timer in seconds.
type: int
interface:
description:
- RIP interface configuration.
type: list
suboptions:
auth_keychain:
description:
- Authentication key-chain name. Source router.key-chain.name.
type: str
auth_mode:
description:
- Authentication mode.
type: str
choices:
- none
- text
- md5
auth_string:
description:
- Authentication string/password.
type: str
flags:
description:
- flags
type: int
name:
description:
- Interface name. Source system.interface.name.
required: true
type: str
receive_version:
description:
- Receive version.
type: str
choices:
- 1
- 2
send_version:
description:
- Send version.
type: str
choices:
- 1
- 2
send_version2_broadcast:
description:
- Enable/disable broadcast version 1 compatible packets.
type: str
choices:
- disable
- enable
split_horizon:
description:
- Enable/disable split horizon.
type: str
choices:
- poisoned
- regular
split_horizon_status:
description:
- Enable/disable split horizon.
type: str
choices:
- enable
- disable
max_out_metric:
description:
- Maximum metric allowed to output(0 means 'not set').
type: int
neighbor:
description:
- neighbor
type: list
suboptions:
id:
description:
- Neighbor entry ID.
required: true
type: int
ip:
description:
- IP address.
type: str
network:
description:
- network
type: list
suboptions:
id:
description:
- Network entry ID.
required: true
type: int
prefix:
description:
- Network prefix.
type: str
offset_list:
description:
- Offset list.
type: list
suboptions:
access_list:
description:
- Access list name. Source router.access-list.name.
type: str
direction:
description:
- Offset list direction.
type: str
choices:
- in
- out
id:
description:
- Offset-list ID.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
offset:
description:
- offset
type: int
status:
description:
- status
type: str
choices:
- enable
- disable
passive_interface:
description:
- Passive interface configuration.
type: list
suboptions:
name:
description:
- Passive interface name. Source system.interface.name.
required: true
type: str
recv_buffer_size:
description:
- Receiving buffer size.
type: int
redistribute:
description:
- Redistribute configuration.
type: list
suboptions:
metric:
description:
- Redistribute metric setting.
type: int
name:
description:
- Redistribute name.
required: true
type: str
routemap:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- status
type: str
choices:
- enable
- disable
timeout_timer:
description:
- Timeout timer in seconds.
type: int
update_timer:
description:
- Update timer in seconds.
type: int
version:
description:
- RIP version.
type: str
choices:
- 1
- 2
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure RIP.
fortios_router_rip:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_rip:
default_information_originate: "enable"
default_metric: "4"
distance:
-
access_list: "<your_own_value> (source router.access-list.name)"
distance: "7"
id: "8"
prefix: "<your_own_value>"
distribute_list:
-
direction: "in"
id: "12"
interface: "<your_own_value> (source system.interface.name)"
listname: "<your_own_value> (source router.access-list.name router.prefix-list.name)"
status: "enable"
garbage_timer: "16"
interface:
-
auth_keychain: "<your_own_value> (source router.key-chain.name)"
auth_mode: "none"
auth_string: "<your_own_value>"
flags: "21"
name: "default_name_22 (source system.interface.name)"
receive_version: "1"
send_version: "1"
send_version2_broadcast: "disable"
split_horizon: "poisoned"
split_horizon_status: "enable"
max_out_metric: "28"
neighbor:
-
id: "30"
ip: "<your_own_value>"
network:
-
id: "33"
prefix: "<your_own_value>"
offset_list:
-
access_list: "<your_own_value> (source router.access-list.name)"
direction: "in"
id: "38"
interface: "<your_own_value> (source system.interface.name)"
offset: "40"
status: "enable"
passive_interface:
-
name: "default_name_43 (source system.interface.name)"
recv_buffer_size: "44"
redistribute:
-
metric: "46"
name: "default_name_47"
routemap: "<your_own_value> (source router.route-map.name)"
status: "enable"
timeout_timer: "50"
update_timer: "51"
version: "1"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_rip_data(json):
option_list = ['default_information_originate', 'default_metric', 'distance',
'distribute_list', 'garbage_timer', 'interface',
'max_out_metric', 'neighbor', 'network',
'offset_list', 'passive_interface', 'recv_buffer_size',
'redistribute', 'timeout_timer', 'update_timer',
'version']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_rip(data, fos):
vdom = data['vdom']
router_rip_data = data['router_rip']
filtered_data = underscore_to_hyphen(filter_router_rip_data(router_rip_data))
return fos.set('router',
'rip',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_rip']:
resp = router_rip(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"router_rip": {
"required": False, "type": "dict", "default": None,
"options": {
"default_information_originate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"default_metric": {"required": False, "type": "int"},
"distance": {"required": False, "type": "list",
"options": {
"access_list": {"required": False, "type": "str"},
"distance": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"}
}},
"distribute_list": {"required": False, "type": "list",
"options": {
"direction": {"required": False, "type": "str",
"choices": ["in", "out"]},
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"listname": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"garbage_timer": {"required": False, "type": "int"},
"interface": {"required": False, "type": "list",
"options": {
"auth_keychain": {"required": False, "type": "str"},
"auth_mode": {"required": False, "type": "str",
"choices": ["none", "text", "md5"]},
"auth_string": {"required": False, "type": "str"},
"flags": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"receive_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"send_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"send_version2_broadcast": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"split_horizon": {"required": False, "type": "str",
"choices": ["poisoned", "regular"]},
"split_horizon_status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"max_out_metric": {"required": False, "type": "int"},
"neighbor": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"}
}},
"network": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"}
}},
"offset_list": {"required": False, "type": "list",
"options": {
"access_list": {"required": False, "type": "str"},
"direction": {"required": False, "type": "str",
"choices": ["in", "out"]},
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"offset": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"passive_interface": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"recv_buffer_size": {"required": False, "type": "int"},
"redistribute": {"required": False, "type": "list",
"options": {
"metric": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"routemap": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"timeout_timer": {"required": False, "type": "int"},
"update_timer": {"required": False, "type": "int"},
"version": {"required": False, "type": "str",
"choices": ["1", "2"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,354,939,234,972,852,000 | 403,571,408,760,903,230 | 36.049853 | 121 | 0.425004 | false |
rspavel/spack | var/spack/repos/builtin.mock/packages/garply/package.py | 3 | 4269 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import sys
class Garply(Package):
"""Toy package for testing dependencies"""
homepage = "https://www.example.com"
url = "https://github.com/gartung/garply/archive/v3.0.0.tar.gz"
version('3.0.0',
sha256='534ac8ba7a6fed7e8bbb543bd43ca04999e65337445a531bd296939f5ac2f33d')
def install(self, spec, prefix):
garply_h = '''#ifndef GARPLY_H_
class Garply
{
private:
static const int version_major;
static const int version_minor;
public:
Garply();
int get_version() const;
int garplinate() const;
};
#endif // GARPLY_H_
'''
garply_cc = '''#include "garply.h"
#include "garply_version.h"
#include <iostream>
const int Garply::version_major = garply_version_major;
const int Garply::version_minor = garply_version_minor;
Garply::Garply() {}
int
Garply::get_version() const
{
return 10 * version_major + version_minor;
}
int
Garply::garplinate() const
{
std::cout << "Garply::garplinate version " << get_version()
<< " invoked" << std::endl;
std::cout << "Garply config dir = %s" << std::endl;
return get_version();
}
'''
garplinator_cc = '''#include "garply.h"
#include <iostream>
int
main()
{
Garply garply;
garply.garplinate();
return 0;
}
'''
garply_version_h = '''const int garply_version_major = %s;
const int garply_version_minor = %s;
'''
mkdirp('%s/garply' % prefix.include)
mkdirp('%s/garply' % self.stage.source_path)
with open('%s/garply_version.h' % self.stage.source_path, 'w') as f:
f.write(garply_version_h % (self.version[0], self.version[1:]))
with open('%s/garply/garply.h' % self.stage.source_path, 'w') as f:
f.write(garply_h)
with open('%s/garply/garply.cc' % self.stage.source_path, 'w') as f:
f.write(garply_cc % prefix.config)
with open('%s/garply/garplinator.cc' %
self.stage.source_path, 'w') as f:
f.write(garplinator_cc)
gpp = which('/usr/bin/g++')
if sys.platform == 'darwin':
gpp = which('/usr/bin/clang++')
gpp('-Dgarply_EXPORTS',
'-I%s' % self.stage.source_path,
'-O2', '-g', '-DNDEBUG', '-fPIC',
'-o', 'garply.cc.o',
'-c', '%s/garply/garply.cc' % self.stage.source_path)
gpp('-Dgarply_EXPORTS',
'-I%s' % self.stage.source_path,
'-O2', '-g', '-DNDEBUG', '-fPIC',
'-o', 'garplinator.cc.o',
'-c', '%s/garply/garplinator.cc' % self.stage.source_path)
if sys.platform == 'darwin':
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-dynamiclib',
'-Wl,-headerpad_max_install_names', '-o', 'libgarply.dylib',
'-install_name', '@rpath/libgarply.dylib',
'garply.cc.o')
gpp('-O2', '-g', '-DNDEBUG', '-Wl,-search_paths_first',
'-Wl,-headerpad_max_install_names',
'garplinator.cc.o', '-o', 'garplinator',
'-Wl,-rpath,%s' % prefix.lib64,
'libgarply.dylib')
mkdirp(prefix.lib64)
copy('libgarply.dylib', '%s/libgarply.dylib' % prefix.lib64)
else:
gpp('-fPIC', '-O2', '-g', '-DNDEBUG', '-shared',
'-Wl,-soname,libgarply.so',
'-o', 'libgarply.so', 'garply.cc.o')
gpp('-O2', '-g', '-DNDEBUG', '-rdynamic',
'garplinator.cc.o', '-o', 'garplinator',
'-Wl,-rpath,%s' % prefix.lib64,
'libgarply.so')
mkdirp(prefix.lib64)
copy('libgarply.so', '%s/libgarply.so' % prefix.lib64)
copy('garplinator', '%s/garplinator' % prefix.lib64)
copy('%s/garply/garply.h' % self.stage.source_path,
'%s/garply/garply.h' % prefix.include)
mkdirp(prefix.bin)
copy('garply_version.h', '%s/garply_version.h' % prefix.bin)
os.symlink('%s/garplinator' % prefix.lib64,
'%s/garplinator' % prefix.bin)
| lgpl-2.1 | 8,176,925,950,174,025,000 | -5,615,342,186,903,008,000 | 32.093023 | 86 | 0.555399 | false |
foursquare/pants | tests/python/pants_test/engine/legacy/test_graph_integration.py | 1 | 5480 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.option.scope import GLOBAL_SCOPE_CONFIG_SECTION
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class GraphIntegrationTest(PantsRunIntegrationTest):
_SOURCES_TARGET_BASE = 'testprojects/src/python/sources'
_SOURCES_ERR_MSGS = {
'missing-globs': ("globs('*.a')", ['*.a']),
'missing-rglobs': ("rglobs('*.a')", ['**/*.a']),
'missing-zglobs': ("zglobs('**/*.a')", ['**/*.a']),
'missing-literal-files': (
"['nonexistent_test_file.txt', 'another_nonexistent_file.txt']", [
'nonexistent_test_file.txt',
'another_nonexistent_file.txt',
]),
'some-missing-some-not': ("globs('*.txt', '*.rs')", ['*.rs']),
'overlapping-globs': ("globs('sources.txt', '*.txt')", ['*.txt']),
}
_WARN_FMT = "WARN] In target {base}:{name} with {desc}={glob}: glob pattern '{as_zsh_glob}' did not match any files."
def _list_target_check_warnings_sources(self, target_name):
target_full = '{}:{}'.format(self._SOURCES_TARGET_BASE, target_name)
glob_str, expected_globs = self._SOURCES_ERR_MSGS[target_name]
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
for as_zsh_glob in expected_globs:
warning_msg = self._WARN_FMT.format(
base=self._SOURCES_TARGET_BASE,
name=target_name,
desc='sources',
glob=glob_str,
as_zsh_glob=as_zsh_glob)
self.assertIn(warning_msg, pants_run.stderr_data)
_ERR_TARGETS = {
'testprojects/src/python/sources:some-missing-some-not': [
"globs('*.txt', '*.rs')",
"Snapshot(PathGlobs(include=(u\'testprojects/src/python/sources/*.txt\', u\'testprojects/src/python/sources/*.rs\'), exclude=(), glob_match_error_behavior<=GlobMatchErrorBehavior>=GlobMatchErrorBehavior(failure_behavior=error)))",
"Globs did not match. Excludes were: []. Unmatched globs were: [\"testprojects/src/python/sources/*.rs\"].",
],
'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-bundle-fileset': [
"['a/b/file1.txt']",
"RGlobs('*.aaaa', '*.bbbb')",
"Globs('*.aaaa')",
"ZGlobs('**/*.abab')",
"['file1.aaaa', 'file2.aaaa']",
"Snapshot(PathGlobs(include=(u\'testprojects/src/java/org/pantsbuild/testproject/bundle/*.aaaa\',), exclude=(), glob_match_error_behavior<=GlobMatchErrorBehavior>=GlobMatchErrorBehavior(failure_behavior=error)))",
"Globs did not match. Excludes were: []. Unmatched globs were: [\"testprojects/src/java/org/pantsbuild/testproject/bundle/*.aaaa\"].",
]
}
def _list_target_check_error(self, target_name):
expected_excerpts = self._ERR_TARGETS[target_name]
pants_run = self.run_pants(['list', target_name], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'error',
},
})
self.assert_failure(pants_run)
self.assertIn(AddressLookupError.__name__, pants_run.stderr_data)
for excerpt in expected_excerpts:
self.assertIn(excerpt, pants_run.stderr_data)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_missing_sources_warnings(self):
for target_name in self._SOURCES_ERR_MSGS.keys():
self._list_target_check_warnings_sources(target_name)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_existing_sources(self):
target_full = '{}:text'.format(self._SOURCES_TARGET_BASE)
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
self.assertNotIn("WARN]", pants_run.stderr_data)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_missing_bundles_warnings(self):
target_full = '{}:{}'.format(self._BUNDLE_TARGET_BASE, self._BUNDLE_TARGET_NAME)
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
for glob_str, expected_globs in self._BUNDLE_ERR_MSGS:
for as_zsh_glob in expected_globs:
warning_msg = self._WARN_FMT.format(
base=self._BUNDLE_TARGET_BASE,
name=self._BUNDLE_TARGET_NAME,
desc='fileset',
glob=glob_str,
as_zsh_glob=as_zsh_glob)
self.assertIn(warning_msg, pants_run.stderr_data)
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_existing_bundles(self):
target_full = '{}:mapper'.format(self._BUNDLE_TARGET_BASE)
pants_run = self.run_pants(['list', target_full], config={
GLOBAL_SCOPE_CONFIG_SECTION: {
'glob_expansion_failure': 'warn',
},
})
self.assert_success(pants_run)
self.assertNotIn("WARN]", pants_run.stderr_data)
def test_error_message(self):
self._list_target_check_error('testprojects/src/python/sources:some-missing-some-not')
self._list_target_check_error(
'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-bundle-fileset')
| apache-2.0 | -3,169,150,927,020,087,000 | -593,830,312,705,931,000 | 39 | 236 | 0.657117 | false |
maK-/weevely3 | testsuite/test_file_enum.py | 14 | 4630 | from testfixtures import log_capture
from testsuite.base_fs import BaseFilesystem
from testsuite import config
from core.sessions import SessionURL
from core import modules
import utils
from core import messages
import subprocess
import tempfile
import os
class FileEnum(BaseFilesystem):
def setUp(self):
self.session = SessionURL(
self.url,
self.password,
volatile = True
)
modules.load_modules(self.session)
# Create the folder tree
self.folders_abs, self.folders_rel = self.populate_folders()
self.files_abs, self.files_rel = self.populate_files(
self.folders_abs,
[ 'executable', 'writable', 'write-executable', 'readable' ]
)
# Change mode of the first file to ---x--x--x 0111 execute
self.check_call(
config.cmd_env_chmod_s_s % ('0111', self.files_abs[0]),
shell=True)
# Change mode of the second file to --w--w--w- 0222 write
self.check_call(
config.cmd_env_chmod_s_s % ('0222', self.files_abs[1]),
shell=True)
# Change mode of the third file to 0000
self.check_call(
config.cmd_env_chmod_s_s % ('0000', self.files_abs[2]),
shell=True)
self.run_argv = modules.loaded['file_enum'].run_argv
def tearDown(self):
# Reset recursively all the permissions to 0777
self.check_call(
config.cmd_env_chmod_s_s % ('-R 0777', self.folders_abs[0]),
shell=True)
for folder in reversed(self.folders_abs):
self.check_call(
config.cmd_env_remove_s % (self.files_abs.pop()),
shell=True)
self.check_call(
config.cmd_env_rmdir_s % (folder),
shell=True)
def test_file_enum(self):
# Enum self.files_rel[:2] passed with arguments
self.assertItemsEqual(self.run_argv( self.files_rel[:3] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
# Enum self.files_rel[:2] + bogus passed with arguments
self.assertItemsEqual(self.run_argv( self.files_rel[:3] + [ 'bogus' ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
# Enum self.files_rel[:2] + bogus passed with arguments and -print
self.assertItemsEqual(self.run_argv( self.files_rel[:3] + [ 'bogus', '-print' ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ],
'bogus' : []
})
def test_file_enum_lpath(self):
# Enum self.files_rel[:2] passed with lfile
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('\n'.join(self.files_rel[:3]))
temp_file.flush()
self.assertItemsEqual(self.run_argv( [ '-lpath-list', temp_file.name ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
temp_file.close()
# Enum self.files_rel[:2] + bogus passed with lfile
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('\n'.join(self.files_rel[:3] + [ 'bogus' ]))
temp_file.flush()
self.assertItemsEqual(self.run_argv( [ '-lpath-list', temp_file.name ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ]
})
temp_file.close()
# Enum self.files_rel[:2] + bogus passed with lfile and -print
temp_file = tempfile.NamedTemporaryFile()
temp_file.write('\n'.join(self.files_rel[:3] + [ 'bogus' ]))
temp_file.flush()
self.assertItemsEqual(self.run_argv( [ '-lpath-list', temp_file.name, '-print' ] ), {
self.files_rel[0] : [ 'ex' ],
self.files_rel[1] : [ 'ew' ],
self.files_rel[2] : [ 'e' ],
'bogus' : []
})
temp_file.close()
@log_capture()
def test_err(self, log_captured):
self.assertIsNone(self.run_argv( [ '-lpath-list', 'bogus' ] ))
self.assertEqual(messages.generic.error_loading_file_s_s[:19],
log_captured.records[-1].msg[:19])
| gpl-3.0 | -5,039,515,699,618,924,000 | -7,338,914,672,083,018,000 | 34.891473 | 93 | 0.512311 | false |
yao-matrix/mLearning | kaggle/distracted_driver/app/train.py | 2 | 3468 | #!/usr/bin/env python
# coding=utf-8
import os
import cv2
import glob
import datetime
import logging
import numpy as np
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from ml_utils import split_cv, save_model
current_dir = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(current_dir, os.path.pardir, 'log', datetime.date.today().strftime('%Y%m%d') + '.log')
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
ch = logging.StreamHandler()
fh.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s]: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
def load_img(img_path, img_rows, img_cols):
# read image to a grayscale buffer
# print img_path
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
rows, cols = img.shape
# print rows, cols
# print img_rows, img_cols
resized = cv2.resize(img, (img_cols, img_rows), interpolation = cv2.INTER_CUBIC);
return resized
def load_train(img_rows, img_cols):
X_train = []
Y_train = []
i = 0
for j in range(10):
path = os.path.join('/workshop2/data/driver-distraction', 'train', 'c' + str(j), '*.jpg')
files = glob.glob(path)
for fl in files:
i += 1
# print fl
img = load_img(fl, img_rows, img_cols)
X_train.append(img)
Y_train.append(j)
logger.info("%d samples in total" % (i))
return X_train, Y_train
img_rows = 96
img_cols = 128
batch_size = 64
nb_classes = 10
nb_epoch = 2
nb_filters = 32
nb_pool = 2
nb_conv = 3
if __name__ == "__main__":
logger.info("start training")
# read training data
train_data, train_labels = load_train(img_rows, img_cols)
train_data = np.array(train_data, dtype = np.uint8)
train_labels = np.array(train_labels, dtype = np.uint8)
train_data = train_data.reshape(train_data.shape[0], 1, img_rows, img_cols)
train_labels = np_utils.to_categorical(train_labels, nb_classes)
train_data = train_data.astype('float32')
train_data /= 127.5
train_data -= 1.0
logger.info("read training data complete")
# split for cross validation
train, train_label, validation, validation_label = split_cv(train_data, train_labels)
logger.info("data split complete")
# build stacking layers
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode = 'valid', input_shape = (1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adadelta')
model.fit(train, train_label, batch_size = batch_size, nb_epoch = nb_epoch, verbose = 1, validation_data = (validation, validation_label))
logger.info("model training complete")
score = model.evaluate(validation, validation_label, verbose = 0)
logger.info("validation score: %f" % (score))
save_model(model)
logger.info("model saved")
| apache-2.0 | -6,639,471,650,530,185,000 | -2,720,981,249,538,491,000 | 27.661157 | 140 | 0.693772 | false |
hidenori-t/chainer | chainer/optimizer.py | 7 | 8978 | import math
import numpy
from chainer import cuda
# TODO(delta2323): Make it public function and move it to common directory.
def _sqnorm(x):
with cuda.get_device(x):
x = x.ravel()
return float(x.dot(x))
class Optimizer(object):
"""Base class of all numerical optimizers.
Optimizer is set up with references to parameters and gradients, and
then on every call of :meth:`update`, it updates parameters based on
corresponding gradients. Optimizer implementations must override
:meth:`update_one` method, which updates one parameter array using the
corresponding gradient array.
Optimizer can optionally use state for each parameter/gradient pair. It is
initialized by :meth:`init_state` method at set up.
Attributes:
t (int): Number of update steps. It can be used in :meth:`update_one`
implementation, where :attr:`t` is incremented beforehand.
"""
def setup(self, params_grads):
"""Prepares states for all given parameter/gradient pairs.
Args:
params_grads: FunctionSet or tuple (pair) of two tuples.
For tuple, the first element is a tuple of parameter arrays,
and the second is a tuple of corresponding gradient arrays.
"""
if hasattr(params_grads, 'parameters') and \
hasattr(params_grads, 'gradients'):
params = getattr(params_grads, 'parameters')
grads = getattr(params_grads, 'gradients')
elif isinstance(params_grads, tuple):
params = params_grads[0]
grads = params_grads[1]
else:
msg = ("'params_grads' must have 'parameters' and 'gradients'"
" attributes or tuples, {0} is given")
raise ValueError(msg)
self.t = 0
self.tuples = []
for p, g in zip(params, grads):
with cuda.get_device(p):
state = self.init_state(p, g)
self.tuples.append((p, g, state))
def init_state(self, param, grad):
"""Returns the initial state for given parameter and gradient.
Default implementation delegates the procedure to
:meth:`init_state_cpu` or :meth:`init_state_gpu` depending on the type
of ``param``.
Args:
param: Parameter array.
grad: Gradient array corresponding to ``param``.
Returns:
Initial state value.
.. warning::
Note that, on every call of :meth:`update_one`, the state value
is passed by value and then the method updates its content, so
the state must be a reference. Especiallly, one cannot use a
value of built-in numeric type. If the state is one scalar
value, it is recommended to use a zero-dimensional array, i.e.
:class:`numpy.ndarray` with shape ``()``.
"""
if isinstance(param, cuda.ndarray):
return self.init_state_gpu(param, grad)
return self.init_state_cpu(param, grad)
def init_state_cpu(self, param, grad):
"""Returns the initial state for given parameter and gradient on GPU.
Args:
param (numpy.ndarray): Parameter array.
grad (numpy.ndarray): Gradient array.
Returns:
Initial state value.
.. seealso:: :meth:`init_state`, :meth:`init_state_gpu`
"""
return None
def init_state_gpu(self, param, grad):
"""Returns the initial state for given parameter and gradient on CPU.
Args:
param (cupy.ndarray): Parameter array.
grad (cupy.ndarray): Gradient array.
Returns:
Initial state value.
.. seealso:: :meth:`init_state`, :meth:`init_state_gpu`
"""
return None
def zero_grads(self):
"""Fills all gradient arrays by zeros.
This method should be call before backprop takes place, since
gradients are accumulated on backprop.
"""
for _, g, _ in self.tuples:
if isinstance(g, cuda.ndarray):
with cuda.get_device(g):
g.fill(0)
else:
g.fill(0)
def compute_grads_norm(self):
"""Computes the norm of whole gradients.
Returns:
float: L2 norm of whole gradients, i.e. square root of sum of
square of all gradient elements.
.. warning::
This method returns a CPU-computed value, which means that this
method synchronizes between CPU and GPU if at least one of the
gradients reside on the GPU.
"""
# TODO(beam2d): Make it asynchronous to CPU when gradients exist on GPU
sqnorm = 0
for _, g, _ in self.tuples:
sqnorm += _sqnorm(g)
return math.sqrt(sqnorm)
def clip_grads(self, maxnorm):
"""Clips the norm of whole gradients up to given threshold.
Args:
maxnorm (float): Threshold of gradient L2 norm.
.. seealso::
:meth:`compute_grads_norm`
It uses this method to compute the gradient norm to be clipped.
"""
norm = self.compute_grads_norm()
if norm > maxnorm:
ratio = maxnorm / norm
for _, g, _ in self.tuples:
with cuda.get_device(g):
g *= ratio
def weight_decay(self, decay):
"""Applies weight decay to the parameter/gradient pairs.
Args:
decay (float): Coefficient of weight decay
"""
for p, g, _ in self.tuples:
if isinstance(p, cuda.ndarray):
with cuda.get_device(p):
cuda.elementwise('T p, T decay', 'T g',
'g += decay * p',
'weight_decay')(p, decay, g)
else:
g += decay * p
def accumulate_grads(self, grads):
"""Accumulates gradients from other source.
This method just adds given gradient arrays to gradients that this
optimizer holds. It is typically used in data-parallel optimization,
where gradients for different shards are computed in parallel and
aggregated by this method. This method correctly treats multiple GPU
devices.
Args:
grads (Iterable): Iterable of gradient arrays to be accumulated.
"""
for (_, g_dst, _), g_src in zip(self.tuples, grads):
if isinstance(g_dst, numpy.ndarray):
g_dst += cuda.to_cpu(g_src)
continue
with cuda.get_device(g_dst):
if (isinstance(g_src, cuda.ndarray) and
g_dst.gpudata.device != g_src.gpudata.device):
g_dst += cuda.copy(g_src, out_device=g_dst.gpudata.device)
else:
g_dst += cuda.to_gpu(g_src)
def update(self):
"""Updates all parameters and states using corresponding gradients.
This method iteratively calls :meth:`update_one` for each parameter/
gradient/state tuple. Beforehand, :attr:`t` attribute is incremented.
"""
self.t += 1
for p, g, s in self.tuples:
with cuda.get_device(p):
self.update_one(p, g, s)
def update_one(self, param, grad, state):
"""Updates a parameter array and its state using given gradient.
The default implementation delegates the procedure to
:meth:`update_one_cpu` or :meth:`update_one_gpu` depending on the type
of the parameter array. Optimizer implmentation must override these
type-specific methods or this :meth:`update_one` method directly.
Args:
param: Parameter array.
grad: Gradient array.
state: State value.
.. seealso:: :meth:`update_one_cpu`, :meth:`update_one_gpu`
"""
if isinstance(param, cuda.ndarray):
self.update_one_gpu(param, grad, state)
else:
self.update_one_cpu(param, grad, state)
def update_one_cpu(self, param, grad, state):
"""Updates a parameter array and its state using given gradient on CPU.
Args:
param (numpy.ndarray): Parameter array.
grad (numpy.ndarray): Gradient array.
state: State value.
.. seealso:: :meth:`update_one`, :meth:`update_one_gpu`
"""
raise NotImplementedError()
def update_one_gpu(self, param, grad, state):
"""Updates a parameter array and its state using given gradient on GPU.
Args:
param (cupy.ndarray): Parameter array.
grad (cupy.ndarray): Gradient array.
state: State value.
.. seealso:: :meth:`update_one`, :meth:`update_one_cpu`
"""
raise NotImplementedError()
| mit | -8,983,853,737,194,627,000 | -6,125,096,293,401,667,000 | 32.007353 | 79 | 0.57407 | false |
jayme-github/headphones | lib/unidecode/x1d6.py | 248 | 3974 | data = (
's', # 0x00
't', # 0x01
'u', # 0x02
'v', # 0x03
'w', # 0x04
'x', # 0x05
'y', # 0x06
'z', # 0x07
'A', # 0x08
'B', # 0x09
'C', # 0x0a
'D', # 0x0b
'E', # 0x0c
'F', # 0x0d
'G', # 0x0e
'H', # 0x0f
'I', # 0x10
'J', # 0x11
'K', # 0x12
'L', # 0x13
'M', # 0x14
'N', # 0x15
'O', # 0x16
'P', # 0x17
'Q', # 0x18
'R', # 0x19
'S', # 0x1a
'T', # 0x1b
'U', # 0x1c
'V', # 0x1d
'W', # 0x1e
'X', # 0x1f
'Y', # 0x20
'Z', # 0x21
'a', # 0x22
'b', # 0x23
'c', # 0x24
'd', # 0x25
'e', # 0x26
'f', # 0x27
'g', # 0x28
'h', # 0x29
'i', # 0x2a
'j', # 0x2b
'k', # 0x2c
'l', # 0x2d
'm', # 0x2e
'n', # 0x2f
'o', # 0x30
'p', # 0x31
'q', # 0x32
'r', # 0x33
's', # 0x34
't', # 0x35
'u', # 0x36
'v', # 0x37
'w', # 0x38
'x', # 0x39
'y', # 0x3a
'z', # 0x3b
'A', # 0x3c
'B', # 0x3d
'C', # 0x3e
'D', # 0x3f
'E', # 0x40
'F', # 0x41
'G', # 0x42
'H', # 0x43
'I', # 0x44
'J', # 0x45
'K', # 0x46
'L', # 0x47
'M', # 0x48
'N', # 0x49
'O', # 0x4a
'P', # 0x4b
'Q', # 0x4c
'R', # 0x4d
'S', # 0x4e
'T', # 0x4f
'U', # 0x50
'V', # 0x51
'W', # 0x52
'X', # 0x53
'Y', # 0x54
'Z', # 0x55
'a', # 0x56
'b', # 0x57
'c', # 0x58
'd', # 0x59
'e', # 0x5a
'f', # 0x5b
'g', # 0x5c
'h', # 0x5d
'i', # 0x5e
'j', # 0x5f
'k', # 0x60
'l', # 0x61
'm', # 0x62
'n', # 0x63
'o', # 0x64
'p', # 0x65
'q', # 0x66
'r', # 0x67
's', # 0x68
't', # 0x69
'u', # 0x6a
'v', # 0x6b
'w', # 0x6c
'x', # 0x6d
'y', # 0x6e
'z', # 0x6f
'A', # 0x70
'B', # 0x71
'C', # 0x72
'D', # 0x73
'E', # 0x74
'F', # 0x75
'G', # 0x76
'H', # 0x77
'I', # 0x78
'J', # 0x79
'K', # 0x7a
'L', # 0x7b
'M', # 0x7c
'N', # 0x7d
'O', # 0x7e
'P', # 0x7f
'Q', # 0x80
'R', # 0x81
'S', # 0x82
'T', # 0x83
'U', # 0x84
'V', # 0x85
'W', # 0x86
'X', # 0x87
'Y', # 0x88
'Z', # 0x89
'a', # 0x8a
'b', # 0x8b
'c', # 0x8c
'd', # 0x8d
'e', # 0x8e
'f', # 0x8f
'g', # 0x90
'h', # 0x91
'i', # 0x92
'j', # 0x93
'k', # 0x94
'l', # 0x95
'm', # 0x96
'n', # 0x97
'o', # 0x98
'p', # 0x99
'q', # 0x9a
'r', # 0x9b
's', # 0x9c
't', # 0x9d
'u', # 0x9e
'v', # 0x9f
'w', # 0xa0
'x', # 0xa1
'y', # 0xa2
'z', # 0xa3
'i', # 0xa4
'j', # 0xa5
'', # 0xa6
'', # 0xa7
'Alpha', # 0xa8
'Beta', # 0xa9
'Gamma', # 0xaa
'Delta', # 0xab
'Epsilon', # 0xac
'Zeta', # 0xad
'Eta', # 0xae
'Theta', # 0xaf
'Iota', # 0xb0
'Kappa', # 0xb1
'Lamda', # 0xb2
'Mu', # 0xb3
'Nu', # 0xb4
'Xi', # 0xb5
'Omicron', # 0xb6
'Pi', # 0xb7
'Rho', # 0xb8
'Theta', # 0xb9
'Sigma', # 0xba
'Tau', # 0xbb
'Upsilon', # 0xbc
'Phi', # 0xbd
'Chi', # 0xbe
'Psi', # 0xbf
'Omega', # 0xc0
'nabla', # 0xc1
'alpha', # 0xc2
'beta', # 0xc3
'gamma', # 0xc4
'delta', # 0xc5
'epsilon', # 0xc6
'zeta', # 0xc7
'eta', # 0xc8
'theta', # 0xc9
'iota', # 0xca
'kappa', # 0xcb
'lamda', # 0xcc
'mu', # 0xcd
'nu', # 0xce
'xi', # 0xcf
'omicron', # 0xd0
'pi', # 0xd1
'rho', # 0xd2
'sigma', # 0xd3
'sigma', # 0xd4
'tai', # 0xd5
'upsilon', # 0xd6
'phi', # 0xd7
'chi', # 0xd8
'psi', # 0xd9
'omega', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
| gpl-3.0 | 4,030,084,914,697,049,000 | 7,615,175,469,902,297,000 | 14.403101 | 20 | 0.354051 | false |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/numpy/lib/nanfunctions.py | 11 | 50827 | """
Functions that ignore NaN.
Functions
---------
- `nanmin` -- minimum non-NaN value
- `nanmax` -- maximum non-NaN value
- `nanargmin` -- index of minimum non-NaN value
- `nanargmax` -- index of maximum non-NaN value
- `nansum` -- sum of non-NaN values
- `nanprod` -- product of non-NaN values
- `nancumsum` -- cumulative sum of non-NaN values
- `nancumprod` -- cumulative product of non-NaN values
- `nanmean` -- mean of non-NaN values
- `nanvar` -- variance of non-NaN values
- `nanstd` -- standard deviation of non-NaN values
- `nanmedian` -- median of non-NaN values
- `nanpercentile` -- qth percentile of non-NaN values
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.lib.function_base import _ureduce as _ureduce
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
'nancumsum', 'nancumprod'
]
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
a = np.array(a, subok=True, copy=True)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
mask = a != a
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
np.copyto(a, val, where=mask)
return a, mask
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then then a is used instead so that the
division is in place. Note that this is only called with `a` an inexact
type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore', divide='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
else:
if out is None:
return a.dtype.type(a / b)
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
Nan is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `min` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmin : ndarray
An array with the same shape as `a`, with the specified axis
removed. If `a` is a 0-d array, or if axis is None, an ndarray
scalar is returned. The same dtype as `a` is returned.
See Also
--------
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amax, fmax, maximum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
raised and NaN is returned for that slice.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `max` method
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, an ndarray scalar is
returned. The same dtype as `a` is returned.
See Also
--------
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
isnan :
Shows which elements are Not a Number (NaN).
isfinite:
Shows which elements are neither NaN nor infinity.
amin, fmin, minimum
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative
infinity is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is np.ndarray and a.dtype != np.object_:
# Fast, but not safe for subclasses of ndarray, or object arrays,
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
return res
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
results cannot be trusted if a slice contains only NaNs and -Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
a, mask = _replace_nan(a, -np.inf)
res = np.argmax(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
In NumPy versions <= 1.8.0 Nan is returned for slices that are all-NaN or
empty. In later versions zero is returned.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
.. versionadded:: 1.8.0
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
Returns
-------
nansum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.sum : Sum across array propagating NaNs.
isnan : Show which elements are NaN.
isfinite: Show which elements are not NaN or +/-inf.
Notes
-----
If both positive and negative infinity are present, the sum will be Not
A Number (NaN).
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
a, mask = _replace_nan(a, 0)
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as ones.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
nanprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
encountered and leading NaNs are replaced by zeros.
Zeros are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumsum(1)
array([1])
>>> np.nancumsum([1])
array([1])
>>> np.nancumsum([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a)
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0)
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1)
array([[ 1., 3.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
Numbers (NaNs) as one. The cumulative product does not change when NaNs are
encountered and leading NaNs are replaced by ones.
Ones are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumprod(1)
array([1])
>>> np.nancumprod([1])
array([1])
>>> np.nancumprod([1, np.nan])
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1)
array([[ 1., 2.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for inexact inputs, it is the same as the input
dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If the value is anything but the default, then
`keepdims` will be passed through to the `mean` or `sum` methods
of sub-classes of `ndarray`. If the sub-classes methods
does not implement `keepdims` any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned. Nan is
returned for slices that contain only NaNs.
See Also
--------
average : Weighted average
mean : Arithmetic mean taken while not ignoring NaNs
var, nanvar
Notes
-----
The arithmetic mean is the sum of the non-NaN elements along the axis
divided by the number of non-NaN elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32`. Specifying a
higher-precision accumulator using the `dtype` keyword can alleviate
this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
array([ 2., 4.])
>>> np.nanmean(a, axis=1)
array([ 1., 3.5])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims)
tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
avg = _divide_by_count(tot, cnt, out=out)
isbad = (cnt == 0)
if isbad.any():
warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
def _nanmedian1d(arr1d, overwrite_input=False):
"""
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
return np.nan
elif s.size == 0:
return np.median(arr1d, overwrite_input=overwrite_input)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.median(x[:-s.size], overwrite_input=True)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanmedian for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
if out is None:
return _nanmedian1d(part, overwrite_input)
else:
out[...] = _nanmedian1d(part, overwrite_input)
return out
else:
# for small medians use sort + indexing which is still faster than
# apply_along_axis
# benchmarked with shuffled (50, 50, x) containing a few NaN
if a.shape[axis] < 600:
return _nanmedian_small(a, axis, out, overwrite_input)
result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
"""
sort + indexing median, faster for small medians along multiple
dimensions due to the high overhead of apply_along_axis
see nanmedian for parameter usage
"""
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if out is not None:
out[...] = m.filled(np.nan)
return out
return m.filled(np.nan)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
Returns the median of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
>>> a[0, 1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.median(a)
nan
>>> np.nanmedian(a)
3.0
>>> np.nanmedian(a, axis=0)
array([ 6.5, 2., 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> b = a.copy()
>>> np.nanmedian(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.nanmedian(b, axis=None, overwrite_input=True)
3.0
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
# apply_along_axis in _nanmedian doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims and keepdims is not np._NoValue:
return r.reshape(k)
else:
return r
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth percentile of the data along the specified axis,
while ignoring nan values.
Returns the qth percentile(s) of the array elements.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100
inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
If this is anything but the default value it will be passed
through (in the special case of an empty array) to the
`mean` function of the underlying array. If the array is
a sub-class and `mean` does not have the kwarg `keepdims` this
will raise a RuntimeError.
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
nanmean, nanmedian, percentile, median, mean
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
[ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
3.5
>>> np.nanpercentile(a, 50, axis=0)
array([ 6.5, 2., 2.5])
>>> np.nanpercentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanpercentile(a, 50, axis=0, out=out)
array([ 6.5, 2., 2.5])
>>> m
array([ 6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
q = np.asanyarray(q)
# apply_along_axis in _nanpercentile doesn't handle empty arrays well,
# so deal them upfront
if a.size == 0:
return np.nanmean(a, axis, out=out, keepdims=keepdims)
r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims and keepdims is not np._NoValue:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None or a.ndim == 1:
part = a.ravel()
result = _nanpercentile1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
overwrite_input, interpolation)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.rollaxis(result, axis)
if out is not None:
out[...] = result
return result
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
Private function for rank 1 arrays. Compute percentile ignoring
NaNs.
See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
if q.ndim == 0:
return np.nan
else:
return np.nan * np.ones((len(q),))
elif s.size == 0:
return np.percentile(arr1d, q, overwrite_input=overwrite_input,
interpolation=interpolation)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
return np.percentile(x[:-s.size], q, overwrite_input=True,
interpolation=interpolation)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
Returns the variance of the array elements, a measure of the spread of
a distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
Returns
-------
variance : ndarray, see dtype parameter above
If `out` is None, return a new array containing the variance,
otherwise return a reference to the output array. If ddof is >= the
number of non-NaN elements in a slice or the slice contains only
NaNs, then the result for that slice is NaN.
See Also
--------
std : Standard deviation
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite
population. ``ddof=0`` provides a maximum likelihood estimate of the
variance for normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
For this function to work on sub-classes of ndarray, they must define
`sum` with the kwarg `keepdims`
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.var(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
array([ 1., 0.])
>>> np.nanvar(a, axis=1)
array([ 0., 0.25])
"""
arr, mask = _replace_nan(a, 0)
if mask is None:
return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if dtype is not None:
dtype = np.dtype(dtype)
if dtype is not None and not issubclass(dtype.type, np.inexact):
raise TypeError("If a is inexact, then dtype must be inexact")
if out is not None and not issubclass(out.dtype.type, np.inexact):
raise TypeError("If a is inexact, then out must be inexact")
# Compute mean
if type(arr) is np.matrix:
_keepdims = np._NoValue
else:
_keepdims = True
# we need to special case matrix for reverse compatibility
# in order for this to work, these sums need to be called with
# keepdims=True, however matrix now raises an error in this case, but
# the reason that it drops the keepdims kwarg is to force keepdims=True
# so this used to work by serendipity.
cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
np.subtract(arr, avg, out=arr, casting='unsafe')
arr = _copyto(arr, 0, mask)
if issubclass(arr.dtype.type, np.complexfloating):
sqr = np.multiply(arr, arr.conj(), out=arr).real
else:
sqr = np.multiply(arr, arr, out=arr)
# Compute variance.
var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
if var.ndim < cnt.ndim:
# Subclasses of ndarray may ignore keepdims, so check here.
cnt = cnt.squeeze(axis)
dof = cnt - ddof
var = _divide_by_count(var, dof)
isbad = (dof <= 0)
if np.any(isbad):
warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
Returns the standard deviation, a measure of the spread of a
distribution, of the non-NaN array elements. The standard deviation is
computed for the flattened array by default, otherwise over the
specified axis.
For all-NaN slices or slices with zero degrees of freedom, NaN is
returned and a `RuntimeWarning` is raised.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it
is the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `a`.
If this value is anything but the default it is passed through
as-is to the relevant functions of the sub-classes. If these
functions do not have a `keepdims` kwarg, a RuntimeError will
be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard
deviation, otherwise return a reference to the output array. If
ddof is >= the number of non-NaN elements in a slice or the slice
contains only NaNs, then the result for that slice is NaN.
See Also
--------
var, mean, std
nanvar, nanmean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
specified, the divisor ``N - ddof`` is used instead. In standard
statistical practice, ``ddof=1`` provides an unbiased estimator of the
variance of the infinite population. ``ddof=0`` provides a maximum
likelihood estimate of the variance for normally distributed variables.
The standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute value before
squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example
below). Specifying a higher-accuracy accumulator using the `dtype`
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
array([ 1., 0.])
>>> np.nanstd(a, axis=1)
array([ 0., 0.5])
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(var, np.ndarray):
std = np.sqrt(var, out=var)
else:
std = var.dtype.type(np.sqrt(var))
return std
| apache-2.0 | 7,898,143,847,881,121,000 | -3,400,211,085,921,399,300 | 34.493715 | 89 | 0.622602 | false |
kevinrigney/PlaylistDatabase | ReplaceVideoUrl.py | 1 | 4628 | #!/usr/bin/env python3
# Find videos with duplicates: SELECT Track.track_name, Artist.artist_name,Track.youtube_link FROM Track JOIN Artist WHERE Track.artist_id = Artist.id GROUP BY Track.youtube_link HAVING count(*) >=2
from PlaylistDatabase import PlaylistDatabase
db = PlaylistDatabase(config_file='PlaylistDatabaseConfig.ini')
video = input('Enter the video ID: ')
if video.startswith('https://youtu.be/'):
pass
elif video.startswith('https://www.youtube.com/watch?v='):
video.replace('https://www.youtube.com/watch?v=','https://youtu.be/')
else:
video = 'https://youtu.be/'+video
db._cur.execute('''SELECT Track.id, Track.track_name, Artist.artist_name, Album.album_name, Track.artist_id, Track.album_id, Track.youtube_link from Track JOIN Artist JOIN Album WHERE Track.youtube_link=%s AND Track.album_id=Album.id AND Track.artist_id=Artist.id''',(video,))
track = db._cur.fetchall()
if len(track) > 1:
print('\nWARNING: More than one track has the same video.\n')
for ii,t in enumerate(track):
print
track_id,track_name,artist_name,album_name,artist_id,album_id,youtube_link = track[ii]
print('Track '+str(ii)+' is: ',track_id,track_name,artist_name,album_name,artist_id,album_id, youtube_link)
ii=int(input('\nWhat track do you want to use? '))
else:
ii=0
track_id,track_name,artist_name,album_name,artist_id,album_id,youtube_link = track[ii]
print('Track '+str(ii)+' is: ',track_id,track_name,artist_name,album_name,artist_id,album_id, youtube_link)
#yesorno = input('Do you want to delete this track and add it to the ignore lists? (yes/no): ')
yesorno='no'
if yesorno.lower()=='yes':
db._cur.execute('''SELECT Playlist.*,Station.* FROM Playlist JOIN Station WHERE Playlist.track_id=%s AND Playlist.station_id=Station.id''',(track_id,))
stations = db._cur.fetchall()
unique_station = {}
for s in stations:
playlist_entry_id, track_id, pl_station_id,playtime,station_id,station_name,station_url,ignore_artists,ignore_titles,playlist_url = s
unique_station[station_id] = (station_name,station_url,ignore_artists,ignore_titles,playlist_url)
print(unique_station)
for id in unique_station:
exec('ignore_artists = ' + unique_station[id][2])
exec('ignore_titles = ' + unique_station[id][3])
if artist_name not in ignore_artists:
ignore_artists.append(artist_name)
if track_name not in ignore_titles:
ignore_titles.append(track_name)
unique_station[id] = unique_station[id][0],unique_station[id][1],str(ignore_artists),str(ignore_titles),unique_station[id][4]
db._cur.execute('''
UPDATE Station
SET ignore_artists=%s, ignore_titles=%s
WHERE Station.id=%s
''',(str(ignore_artists),str(ignore_titles), id))
db._conn.commit()
print(unique_station)
# Get all tracks with the matching artist id and album id
all_tracks = []
db._cur.execute('''SELECT Track.id FROM Track WHERE Track.album_id=%s AND Track.artist_id=%s''',(album_id,artist_id))
for id in db._cur.fetchall():
if id not in all_tracks:
all_tracks.append(id[0])
for id in all_tracks:
# Remove the station entries
db._cur.execute('''DELETE FROM Playlist WHERE Playlist.track_id=%s''',(id,))
# Remove the track entries
db._cur.execute('''DELETE FROM Track WHERE Track.id=%s''',(id,))
# Remove the album entries
db._cur.execute('''DELETE FROM Album WHERE Album.id=%s''',(album_id,))
# Remove the artist entries
db._cur.execute('''DELETE FROM Artist WHERE Artist.id=%s''',(artist_id,))
db._conn.commit()
#Tracks = db._cur.fetchall()
else:
#yesorno = input('Do you want to update the youtube URL for this track? (yes/no): ')
yesorno='yes'
if yesorno.lower() == 'yes':
url = input('Enter the new youtube url: ')
if url == '':
print('No URL Specified... Exiting.')
else:
if url.startswith('https://youtu.be/'):
pass
elif url.startswith('https://www.youtube.com/watch?v='):
url.replace('https://www.youtube.com/watch?v=','https://youtu.be/')
else:
url = 'https://youtu.be/'+url
db._cur.execute('''
UPDATE Track
SET youtube_link=%s
WHERE Track.id=%s
''',(url,track_id))
db._conn.commit()
else:
print('Not modifying database.')
| mit | -4,542,897,999,102,990,300 | 6,303,612,461,071,692,000 | 37.566667 | 276 | 0.624028 | false |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/textwrap.py | 53 | 17265 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
__revision__ = "$Id$"
import string, re
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks) # remove empty chunks
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| bsd-2-clause | 1,772,721,885,560,043,300 | 531,490,101,032,689,800 | 39.402878 | 80 | 0.578511 | false |
1st/django | django/contrib/flatpages/templatetags/flatpages.py | 472 | 3632 | from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
if 'request' in context:
site_pk = get_current_site(context['request']).pk
else:
site_pk = settings.SITE_ID
flatpages = FlatPage.objects.filter(sites__id=site_pk)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
@register.tag
def get_flatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
| bsd-3-clause | 5,075,388,904,206,985,000 | -8,598,300,684,205,964,000 | 34.960396 | 74 | 0.617291 | false |
araisrobo/machinekit | src/hal/user_comps/hal_temp_ads7828.py | 12 | 3854 | #!/usr/bin/python
# encoding: utf-8
"""
Temperature.py
Created by Alexander Rössler on 2014-03-24.
"""
from drivers.ADS7828 import ADS7828
from fdm.r2temp import R2Temp
import argparse
import time
import sys
import hal
class Pin:
def __init__(self):
self.pin = 0
self.r2temp = None
self.halValuePin = 0
self.halRawPin = 0
self.filterSamples = []
self.filterSize = 10
self.rawValue = 0.0
self.filterSamples = []
self.rawValue = 0.0
def addSample(self, value):
self.filterSamples.append(value)
if (len(self.filterSamples) > self.filterSize):
self.filterSamples.pop(0)
sampleSum = 0.0
for sample in self.filterSamples:
sampleSum += sample
self.rawValue = sampleSum / len(self.filterSamples)
def getHalName(pin):
return "ch-" + '{0:02d}'.format(pin.pin)
def adc2Temp(pin):
R1 = 4700.0
R2 = R1 / max(4095.0 / pin.rawValue - 1.0, 0.000001)
return round(pin.r2temp.r2t(R2) * 10.0) / 10.0
parser = argparse.ArgumentParser(description='HAL component to read Temperature values over I2C')
parser.add_argument('-n', '--name', help='HAL component name', required=True)
parser.add_argument('-b', '--bus_id', help='I2C bus id', default=2)
parser.add_argument('-a', '--address', help='I2C device address', default=0x20)
parser.add_argument('-i', '--interval', help='I2C update interval', default=0.05)
parser.add_argument('-c', '--channels', help='Komma separated list of channels and thermistors to use e.g. 01:semitec_103GT_2,02:epcos_B57560G1104', required=True)
parser.add_argument('-f', '--filter_size', help='Size of the low pass filter to use', default=10)
parser.add_argument('-d', '--delay', help='Delay before the i2c should be updated', default=0.0)
args = parser.parse_args()
updateInterval = float(args.interval)
delayInterval = float(args.delay)
filterSize = int(args.filter_size)
error = True
watchdog = True
adc = ADS7828(busId=int(args.bus_id),
address=int(args.address))
# Create pins
pins = []
if (args.channels != ""):
channelsRaw = args.channels.split(',')
for channel in channelsRaw:
pinRaw = channel.split(':')
if (len(pinRaw) != 2):
print(("wrong input"))
sys.exit(1)
pin = Pin()
pin.pin = int(pinRaw[0])
if ((pin.pin > 7) or (pin.pin < 0)):
print(("Pin not available"))
sys.exit(1)
if (pinRaw[1] != "none"):
pin.r2temp = R2Temp(pinRaw[1])
pin.filterSize = filterSize
pins.append(pin)
# Initialize HAL
h = hal.component(args.name)
for pin in pins:
pin.halRawPin = h.newpin(getHalName(pin) + ".raw", hal.HAL_FLOAT, hal.HAL_OUT)
if (pin.r2temp is not None):
pin.halValuePin = h.newpin(getHalName(pin) + ".value", hal.HAL_FLOAT, hal.HAL_OUT)
halErrorPin = h.newpin("error", hal.HAL_BIT, hal.HAL_OUT)
halNoErrorPin = h.newpin("no-error", hal.HAL_BIT, hal.HAL_OUT)
halWatchdogPin = h.newpin("watchdog", hal.HAL_BIT, hal.HAL_OUT)
h.ready()
halErrorPin.value = error
halNoErrorPin.value = not error
halWatchdogPin.value = watchdog
try:
time.sleep(delayInterval)
while (True):
try:
for pin in pins:
value = float(adc.readChannel(pin.pin))
pin.addSample(value)
pin.halRawPin.value = pin.rawValue
if (pin.r2temp is not None):
pin.halValuePin.value = adc2Temp(pin)
error = False
except IOError as e:
error = True
halErrorPin.value = error
halNoErrorPin.value = not error
watchdog = not watchdog
halWatchdogPin.value = watchdog
time.sleep(updateInterval)
except:
print(("exiting HAL component " + args.name))
h.exit()
| lgpl-2.1 | 2,995,110,629,024,636,000 | -4,571,003,402,347,404,000 | 29.338583 | 163 | 0.622891 | false |
tensorflow/tensorflow | tensorflow/python/keras/tests/model_subclassing_compiled_test.py | 6 | 14360 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compiled Model subclassing."""
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.tests import model_subclassing_test_util as model_util
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@keras_parameterized.run_all_keras_modes
class ModelSubclassCompiledTest(keras_parameterized.TestCase):
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = testing_utils.SmallSubclassMLP(
num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc', keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_datasets(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.cached_session():
model = testing_utils.SmallSubclassMLP(
num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim), dtype=np.float32)
y = np.zeros((num_samples, num_classes), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(dataset, steps=10, verbose=0)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
def test_saving(self):
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
model.save_weights(tf_format_name)
if h5py is not None:
hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
model.save_weights(hdf5_format_name)
model = model_util.get_multi_io_subclass_model(
num_classes=num_classes, use_bn=True)
if h5py is not None:
with self.assertRaises(ValueError):
model.load_weights(hdf5_format_name)
model.load_weights(tf_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
if h5py is not None:
model.load_weights(hdf5_format_name)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = model_util.NestedTestModel1(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = model_util.NestedTestModel2(num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = model_util.get_nested_model_3(
input_dim=input_dim, num_classes=num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
def test_subclass_nested_in_sequential(self):
num_classes = 2
num_samples = 100
input_dim = 50
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
model = keras.Sequential([Inner()])
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.trainable_weights), 6)
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
if __name__ == '__main__':
test.main()
| apache-2.0 | 2,048,425,381,344,486,400 | -4,582,148,599,206,025,000 | 31.785388 | 83 | 0.621866 | false |
zhengwsh/InplusTrader_Linux | InplusTrader/backtestEngine/api/ext.py | 1 | 1329 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from ..execution_context import ExecutionContext
from .api_base import instruments
def get_current_bar_dict():
bar_dict = ExecutionContext.get_current_bar_dict()
return bar_dict
def price_change(stock):
bar_dict = get_current_bar_dict()
return bar_dict[stock].close / bar_dict[stock].prev_close - 1
def symbol(order_book_id, split=", "):
if isinstance(order_book_id, six.string_types):
return "{}[{}]".format(order_book_id, instruments(order_book_id).symbol)
else:
s = split.join(symbol(item) for item in order_book_id)
return s
def now_time_str(str_format="%H:%M:%S"):
dt = ExecutionContext.get_current_trading_dt()
return dt.strftime(str_format)
| mit | 6,267,092,555,764,035,000 | 5,387,499,965,624,914,000 | 29.906977 | 80 | 0.708804 | false |
matt-jordan/mjmud | tests/game/commands/standard/test_standard_set.py | 1 | 2831 | #
# mjmud - The neverending MUD project
#
# Copyright (c) 2014, Matt Jordan
#
# See https://github.com/matt-jordan/mjmud for more information about the
# project. Please do not contact the maintainers of the project for information
# or assistance. The project uses Github for these purposes.
#
# This program is free software, distributed under the conditions of the MIT
# License (MIT). See the LICENSE file at the top of the source tree for
# details.
import unittest
from lib.commands.command_set import CommandParserError
from game.commands.standard.quit_command import QuitCommand
from game.commands.standard.say_command import SayCommand
from game.commands.standard.standard_set import StandardCommandSet, \
UnknownResponse
class TestStandardCommandSet(unittest.TestCase):
"""Test the StandardCommandSet"""
def test_parse_invalid_json(self):
"""Test providing an invalid JSON object to the command set"""
exception_raised = False
result = None
command_set = StandardCommandSet()
try:
result = command_set.parse(None)
except CommandParserError:
exception_raised = True
self.assertTrue(exception_raised)
self.assertIsNone(result)
def test_parse_no_command(self):
"""Test providing a valid JSON object that contains no command"""
exception_raised = False
result = None
command_set = StandardCommandSet()
try:
result = command_set.parse({})
except CommandParserError:
exception_raised = True
self.assertTrue(exception_raised)
self.assertIsNone(result)
def test_parse_unknown(self):
"""Test the parse function with an unknown command"""
exception_raised = False
result = None
command_set = StandardCommandSet()
try:
result = command_set.parse({'mudcommand': 'test_I_dont_exist'})
except CommandParserError:
exception_raised = True
self.assertTrue(exception_raised)
self.assertIsNone(result)
def test_parse_quit(self):
"""Test parsing a quit command"""
command_set = StandardCommandSet()
command = command_set.parse({'mudcommand': 'quit'})
self.assertTrue(isinstance(command, QuitCommand))
def test_parse_say(self):
"""Test parsing of a say command"""
command_set = StandardCommandSet()
command = command_set.parse({'mudcommand': 'say'})
self.assertTrue(isinstance(command, SayCommand))
command = command_set.parse({'mudcommand': 'say',
'text': 'hello there'})
self.assertTrue(isinstance(command, SayCommand))
self.assertEqual('hello there', command.text)
if __name__ == "__main__":
unittest.main()
| mit | 5,964,478,040,395,196,000 | 1,618,117,453,015,903,000 | 34.835443 | 79 | 0.658778 | false |
pselle/calibre | src/calibre/ptempfile.py | 17 | 7583 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
"""
Provides platform independent temporary files that persist even after
being closed.
"""
import tempfile, os, atexit
from future_builtins import map
from calibre.constants import (__version__, __appname__, filesystem_encoding,
get_unicode_windows_env_var, iswindows, get_windows_temp_path)
def cleanup(path):
try:
import os as oss
if oss.path.exists(path):
oss.remove(path)
except:
pass
_base_dir = None
def remove_dir(x):
try:
import shutil
shutil.rmtree(x, ignore_errors=True)
except:
pass
def determined_remove_dir(x):
for i in range(10):
try:
import shutil
shutil.rmtree(x)
return
except:
import os # noqa
if os.path.exists(x):
# In case some other program has one of the temp files open.
import time
time.sleep(0.1)
else:
return
try:
import shutil
shutil.rmtree(x, ignore_errors=True)
except:
pass
def app_prefix(prefix):
if iswindows:
return '%s_'%__appname__
return '%s_%s_%s'%(__appname__, __version__, prefix)
def reset_temp_folder_permissions():
# There are some broken windows installs where the permissions for the temp
# folder are set to not be executable, which means chdir() into temp
# folders fails. Try to fix that by resetting the permissions on the temp
# folder.
global _base_dir
if iswindows and _base_dir:
import subprocess
from calibre import prints
parent = os.path.dirname(_base_dir)
retcode = subprocess.Popen(['icacls.exe', parent, '/reset', '/Q', '/T']).wait()
prints('Trying to reset permissions of temp folder', parent, 'return code:', retcode)
def base_dir():
global _base_dir
if _base_dir is not None and not os.path.exists(_base_dir):
# Some people seem to think that running temp file cleaners that
# delete the temp dirs of running programs is a good idea!
_base_dir = None
if _base_dir is None:
td = os.environ.get('CALIBRE_WORKER_TEMP_DIR', None)
if td is not None:
import cPickle, binascii
try:
td = cPickle.loads(binascii.unhexlify(td))
except:
td = None
if td and os.path.exists(td):
_base_dir = td
else:
base = os.environ.get('CALIBRE_TEMP_DIR', None)
if base is not None and iswindows:
base = get_unicode_windows_env_var('CALIBRE_TEMP_DIR')
prefix = app_prefix(u'tmp_')
if base is None and iswindows:
# On windows, if the TMP env var points to a path that
# cannot be encoded using the mbcs encoding, then the
# python 2 tempfile algorithm for getting the temporary
# directory breaks. So we use the win32 api to get a
# unicode temp path instead. See
# https://bugs.launchpad.net/bugs/937389
base = get_windows_temp_path()
_base_dir = tempfile.mkdtemp(prefix=prefix, dir=base)
atexit.register(determined_remove_dir if iswindows else remove_dir, _base_dir)
try:
tempfile.gettempdir()
except:
# Widows temp vars set to a path not encodable in mbcs
# Use our temp dir
tempfile.tempdir = _base_dir
return _base_dir
def reset_base_dir():
global _base_dir
_base_dir = None
base_dir()
def force_unicode(x):
# Cannot use the implementation in calibre.__init__ as it causes a circular
# dependency
if isinstance(x, bytes):
x = x.decode(filesystem_encoding)
return x
def _make_file(suffix, prefix, base):
suffix, prefix = map(force_unicode, (suffix, prefix))
return tempfile.mkstemp(suffix, prefix, dir=base)
def _make_dir(suffix, prefix, base):
suffix, prefix = map(force_unicode, (suffix, prefix))
return tempfile.mkdtemp(suffix, prefix, base)
class PersistentTemporaryFile(object):
"""
A file-like object that is a temporary file that is available even after being closed on
all platforms. It is automatically deleted on normal program termination.
"""
_file = None
def __init__(self, suffix="", prefix="", dir=None, mode='w+b'):
if prefix is None:
prefix = ""
if dir is None:
dir = base_dir()
fd, name = _make_file(suffix, prefix, dir)
self._file = os.fdopen(fd, mode)
self._name = name
self._fd = fd
atexit.register(cleanup, name)
def __getattr__(self, name):
if name == 'name':
return self.__dict__['_name']
return getattr(self.__dict__['_file'], name)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __del__(self):
try:
self.close()
except:
pass
def PersistentTemporaryDirectory(suffix='', prefix='', dir=None):
'''
Return the path to a newly created temporary directory that will
be automatically deleted on application exit.
'''
if dir is None:
dir = base_dir()
tdir = _make_dir(suffix, prefix, dir)
atexit.register(remove_dir, tdir)
return tdir
class TemporaryDirectory(object):
'''
A temporary directory to be used in a with statement.
'''
def __init__(self, suffix='', prefix='', dir=None, keep=False):
self.suffix = suffix
self.prefix = prefix
if dir is None:
dir = base_dir()
self.dir = dir
self.keep = keep
def __enter__(self):
if not hasattr(self, 'tdir'):
self.tdir = _make_dir(self.suffix, self.prefix, self.dir)
return self.tdir
def __exit__(self, *args):
if not self.keep and os.path.exists(self.tdir):
remove_dir(self.tdir)
class TemporaryFile(object):
def __init__(self, suffix="", prefix="", dir=None, mode='w+b'):
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if dir is None:
dir = base_dir()
self.prefix, self.suffix, self.dir, self.mode = prefix, suffix, dir, mode
self._file = None
def __enter__(self):
fd, name = _make_file(self.suffix, self.prefix, self.dir)
self._file = os.fdopen(fd, self.mode)
self._name = name
self._file.close()
return name
def __exit__(self, *args):
cleanup(self._name)
class SpooledTemporaryFile(tempfile.SpooledTemporaryFile):
def __init__(self, max_size=0, suffix="", prefix="", dir=None, mode='w+b',
bufsize=-1):
if prefix is None:
prefix = ''
if suffix is None:
suffix = ''
if dir is None:
dir = base_dir()
tempfile.SpooledTemporaryFile.__init__(self, max_size=max_size,
suffix=suffix, prefix=prefix, dir=dir, mode=mode,
bufsize=bufsize)
def truncate(self, *args):
# The stdlib SpooledTemporaryFile implementation of truncate() doesn't
# allow specifying a size.
self._file.truncate(*args)
def better_mktemp(*args, **kwargs):
fd, path = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
return path
| gpl-3.0 | 2,246,489,695,610,548,000 | 3,727,125,757,190,165,000 | 29.332 | 93 | 0.579586 | false |
mogoweb/webkit_for_android5.1 | webkit/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py | 15 | 3297 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""""Tests code paths not covered by the regular unit tests."""
import unittest
from webkitpy.layout_tests.layout_package.test_failures import *
class Test(unittest.TestCase):
def assert_loads(self, cls):
failure_obj = cls()
s = failure_obj.dumps()
new_failure_obj = TestFailure.loads(s)
self.assertTrue(isinstance(new_failure_obj, cls))
self.assertEqual(failure_obj, new_failure_obj)
# Also test that != is implemented.
self.assertFalse(failure_obj != new_failure_obj)
def test_crash(self):
FailureCrash()
def test_hash_incorrect(self):
FailureImageHashIncorrect()
def test_missing(self):
FailureMissingResult()
def test_missing_image(self):
FailureMissingImage()
def test_missing_image_hash(self):
FailureMissingImageHash()
def test_timeout(self):
FailureTimeout()
def test_unknown_failure_type(self):
class UnknownFailure(TestFailure):
pass
failure_obj = UnknownFailure()
self.assertRaises(ValueError, determine_result_type, [failure_obj])
self.assertRaises(NotImplementedError, failure_obj.message)
def test_loads(self):
for c in ALL_FAILURE_CLASSES:
self.assert_loads(c)
def test_equals(self):
self.assertEqual(FailureCrash(), FailureCrash())
self.assertNotEqual(FailureCrash(), FailureTimeout())
crash_set = set([FailureCrash(), FailureCrash()])
self.assertEqual(len(crash_set), 1)
# The hash happens to be the name of the class, but sets still work:
crash_set = set([FailureCrash(), "FailureCrash"])
self.assertEqual(len(crash_set), 2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 2,352,475,732,404,601,300 | 7,467,626,008,995,357,000 | 36.465909 | 76 | 0.710039 | false |
javier-ruiz-b/docker-rasppi-images | raspberry-google-home/env/lib/python3.7/site-packages/requests_oauthlib/compliance_fixes/facebook.py | 6 | 1119 | from json import dumps
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from oauthlib.common import to_unicode
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if "application/json" in r.headers.get("content-type", {}):
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if "text/plain" in r.headers.get("content-type", {}) and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get("expires")
if expires is not None:
token["expires_in"] = expires
token["token_type"] = "Bearer"
r._content = to_unicode(dumps(token)).encode("UTF-8")
return r
session.register_compliance_hook("access_token_response", _compliance_fix)
return session
| apache-2.0 | -7,274,634,059,334,385,000 | -3,783,300,789,121,122,300 | 32.909091 | 86 | 0.635389 | false |
mkrupcale/ansible | hacking/metadata-tool.py | 14 | 20756 | #!/usr/bin/env python
import ast
import csv
import os
import sys
from collections import defaultdict
from distutils.version import StrictVersion
from pprint import pformat, pprint
import yaml
from ansible.module_utils._text import to_text
from ansible.plugins import module_loader
# There's a few files that are not new-style modules. Have to blacklist them
NONMODULE_PY_FILES = frozenset(('async_wrapper.py',))
NONMODULE_MODULE_NAMES = frozenset(os.path.splitext(p)[0] for p in NONMODULE_PY_FILES)
# Default metadata
DEFAULT_METADATA = {'version': '1.0', 'status': ['preview'], 'supported_by':'community'}
class ParseError(Exception):
"""Thrown when parsing a file fails"""
pass
class MissingModuleError(Exception):
"""Thrown when unable to find a plugin"""
pass
def usage():
print("""Usage:
metadata-tester.py report [--version X]
metadata-tester.py add [--version X] [--overwrite] CSVFILE
metadata-tester.py add-default [--version X] [--overwrite]""")
sys.exit(1)
def parse_args(arg_string):
if len(arg_string) < 1:
usage()
action = arg_string[0]
version = None
if '--version' in arg_string:
version_location = arg_string.index('--version')
arg_string.pop(version_location)
version = arg_string.pop(version_location)
overwrite = False
if '--overwrite' in arg_string:
overwrite = True
arg_string.remove('--overwrite')
csvfile = None
if len(arg_string) == 2:
csvfile = arg_string[1]
elif len(arg_string) > 2:
usage()
return action, {'version': version, 'overwrite': overwrite, 'csvfile': csvfile}
def seek_end_of_dict(module_data, start_line, start_col, next_node_line, next_node_col):
"""Look for the end of a dict in a set of lines
We know the starting position of the dict and we know the start of the
next code node but in between there may be multiple newlines and comments.
There may also be multiple python statements on the same line (separated
by semicolons)
Examples::
ANSIBLE_METADATA = {[..]}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} # Optional comments with confusing junk => {}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {
[..]
}
# Optional comments {}
DOCUMENTATION = [..]
ANSIBLE_METADATA = {[..]} ; DOCUMENTATION = [..]
ANSIBLE_METADATA = {}EOF
"""
if next_node_line == None:
# The dict is the last statement in the file
snippet = module_data.splitlines()[start_line:]
next_node_col = 0
# Include the last line in the file
last_line_offset = 0
else:
# It's somewhere in the middle so we need to separate it from the rest
snippet = module_data.splitlines()[start_line:next_node_line]
# Do not include the last line because that's where the next node
# starts
last_line_offset = 1
if next_node_col == 0:
# This handles all variants where there are only comments and blank
# lines between the dict and the next code node
# Step backwards through all the lines in the snippet
for line_idx, line in tuple(reversed(tuple(enumerate(snippet))))[last_line_offset:]:
end_col = None
# Step backwards through all the characters in the line
for col_idx, char in reversed(tuple(enumerate(c for c in line))):
if char == '}' and end_col is None:
# Potentially found the end of the dict
end_col = col_idx
elif char == '#' and end_col is not None:
# The previous '}' was part of a comment. Keep trying
end_col = None
if end_col is not None:
# Found the end!
end_line = start_line + line_idx
break
else:
# Harder cases involving multiple statements on one line
# Good Ansible Module style doesn't do this so we're just going to
# treat this as an error for now:
raise ParseError('Multiple statements per line confuses the module metadata parser.')
return end_line, end_col
def seek_end_of_string(module_data, start_line, start_col, next_node_line, next_node_col):
"""
This is much trickier than finding the end of a dict. A dict has only one
ending character, "}". Strings have four potential ending characters. We
have to parse the beginning of the string to determine what the ending
character will be.
Examples:
ANSIBLE_METADATA = '''[..]''' # Optional comment with confusing chars '''
# Optional comment with confusing chars '''
DOCUMENTATION = [..]
ANSIBLE_METADATA = '''
[..]
'''
DOCUMENTATIONS = [..]
ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
SHORT_NAME = ANSIBLE_METADATA = '''[..]''' ; DOCUMENTATION = [..]
String marker variants:
* '[..]'
* "[..]"
* '''[..]'''
* \"\"\"[..]\"\"\"
Each of these come in u, r, and b variants:
* '[..]'
* u'[..]'
* b'[..]'
* r'[..]'
* ur'[..]'
* ru'[..]'
* br'[..]'
* b'[..]'
* rb'[..]'
"""
raise NotImplementedError('Finding end of string not yet implemented')
def extract_metadata(module_data):
"""Extract the metadata from a module
:arg module_data: Byte string containing a module's code
:returns: a tuple of metadata (a dict), line the metadata starts on,
column the metadata starts on, line the metadata ends on, column the
metadata ends on, and the names the metadata is assigned to. One of
the names the metadata is assigned to will be ANSIBLE_METADATA If no
metadata is found, the tuple will be (None, -1, -1, -1, -1, None)
"""
metadata = None
start_line = -1
start_col = -1
end_line = -1
end_col = -1
targets = None
mod_ast_tree = ast.parse(module_data)
for root_idx, child in enumerate(mod_ast_tree.body):
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'ANSIBLE_METADATA':
if isinstance(child.value, ast.Dict):
metadata = ast.literal_eval(child.value)
try:
# Determine where the next node starts
next_node = mod_ast_tree.body[root_idx+1]
next_lineno = next_node.lineno
next_col_offset = next_node.col_offset
except IndexError:
# Metadata is defined in the last node of the file
next_lineno = None
next_col_offset = None
# Determine where the current metadata ends
end_line, end_col = seek_end_of_dict(module_data,
child.lineno - 1, child.col_offset, next_lineno,
next_col_offset)
elif isinstance(child.value, ast.Str):
metadata = yaml.safe_load(child.value.s)
end_line = seek_end_of_string(module_data)
elif isinstance(child.value, ast.Bytes):
metadata = yaml.safe_load(to_text(child.value.s, errors='surrogate_or_strict'))
end_line = seek_end_of_string(module_data)
else:
# Example:
# ANSIBLE_METADATA = 'junk'
# ANSIBLE_METADATA = { [..the real metadata..] }
continue
# Do these after the if-else so we don't pollute them in
# case this was a false positive
start_line = child.lineno - 1
start_col = child.col_offset
targets = [t.id for t in child.targets]
break
if metadata is not None:
# Once we've found the metadata we're done
break
return metadata, start_line, start_col, end_line, end_col, targets
def find_documentation(module_data):
"""Find the DOCUMENTATION metadata for a module file"""
start_line = -1
mod_ast_tree = ast.parse(module_data)
for child in mod_ast_tree.body:
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'DOCUMENTATION':
start_line = child.lineno - 1
break
return start_line
def remove_metadata(module_data, start_line, start_col, end_line, end_col):
"""Remove a section of a module file"""
lines = module_data.split('\n')
new_lines = lines[:start_line]
if start_col != 0:
new_lines.append(lines[start_line][:start_col])
next_line = lines[end_line]
if len(next_line) - 1 != end_col:
new_lines.append(next_line[end_col:])
if len(lines) > end_line:
new_lines.extend(lines[end_line + 1:])
return '\n'.join(new_lines)
def insert_metadata(module_data, new_metadata, insertion_line, targets=('ANSIBLE_METADATA',)):
"""Insert a new set of metadata at a specified line"""
assignments = ' = '.join(targets)
pretty_metadata = pformat(new_metadata, width=1).split('\n')
new_lines = []
new_lines.append('{} = {}'.format(assignments, pretty_metadata[0]))
if len(pretty_metadata) > 1:
for line in pretty_metadata[1:]:
new_lines.append('{}{}'.format(' ' * (len(assignments) - 1 + len(' = {')), line))
old_lines = module_data.split('\n')
lines = old_lines[:insertion_line] + new_lines + [''] + old_lines[insertion_line:]
return '\n'.join(lines)
def parse_assigned_metadata_initial(csvfile):
"""
Fields:
:0: Module name
:1: Core (x if so)
:2: Extras (x if so)
:3: Category
:4: Supported/SLA
:5: Committer
:6: Stable
:7: Deprecated
:8: Notes
:9: Team Notes
:10: Notes 2
:11: final supported_by field
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
if record[12] == 'core':
supported_by = 'core'
elif record[12] == 'curated':
supported_by = 'committer'
elif record[12] == 'community':
supported_by = 'community'
else:
print('Module %s has no supported_by field. Using community' % record[0])
supported_by = 'community'
supported_by = DEFAULT_METADATA['supported_by']
status = []
if record[6]:
status.append('stableinterface')
if record[7]:
status.append('deprecated')
if not status:
status.extend(DEFAULT_METADATA['status'])
yield (module, {'version': DEFAULT_METADATA['version'], 'supported_by': supported_by, 'status': status})
def parse_assigned_metadata(csvfile):
"""
Fields:
:0: Module name
:1: supported_by string. One of the valid support fields
core, community, unmaintained, committer
:2: stableinterface
:3: preview
:4: deprecated
:5: removed
:6: tested
https://github.com/ansible/proposals/issues/30
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
supported_by = record[1]
status = []
if record[2]:
status.append('stableinterface')
if record[4]:
status.append('deprecated')
if record[5]:
status.append('removed')
if record[6]:
status.append('tested')
if not status or record[3]:
status.append('preview')
yield (module, {'version': '1.0', 'supported_by': supported_by, 'status': status})
def write_metadata(filename, new_metadata, version=None, overwrite=False):
with open(filename, 'rb') as f:
module_data = f.read()
try:
current_metadata, start_line, start_col, end_line, end_col, targets = extract_metadata(module_data)
except SyntaxError:
if filename.endswith('.py'):
raise
# Probably non-python modules. These should all have python
# documentation files where we can place the data
raise ParseError('Could not add metadata to {}'.format(filename))
if current_metadata is None:
# No curent metadata so we can just add it
start_line = find_documentation(module_data)
if start_line < 0:
if os.path.basename(filename) in NONMODULE_PY_FILES:
# These aren't new-style modules
return
raise Exception('Module file {} had no ANSIBLE_METADATA or DOCUMENTATION'.format(filename))
module_data = insert_metadata(module_data, new_metadata, start_line, targets=('ANSIBLE_METADATA',))
elif overwrite or (version is not None and ('version' not in current_metadata or StrictVersion(current_metadata['version']) < StrictVersion(version))):
# Current metadata that we do not want. Remove the current
# metadata and put the new version in its place
module_data = remove_metadata(module_data, start_line, start_col, end_line, end_col)
module_data = insert_metadata(module_data, new_metadata, start_line, targets=targets)
else:
# Current metadata and we don't want to overwrite it
return
# Save the new version of the module
with open(filename, 'wb') as f:
f.write(module_data)
def return_metadata(plugins):
metadata = {}
for name, filename in plugins:
# There may be several files for a module (if it is written in another
# language, for instance) but only one of them (the .py file) should
# contain the metadata.
if name not in metadata or metadata[name] is not None:
with open(filename, 'rb') as f:
module_data = f.read()
metadata[name] = extract_metadata(module_data)[0]
return metadata
def metadata_summary(plugins, version=None):
"""Compile information about the metadata status for a list of modules
:arg plugins: List of plugins to look for. Each entry in the list is
a tuple of (module name, full path to module)
:kwarg version: If given, make sure the modules have this version of
metadata or higher.
:returns: A tuple consisting of a list of modules with no metadata at the
required version and a list of files that have metadata at the
required version.
"""
no_metadata = {}
has_metadata = {}
supported_by = defaultdict(set)
status = defaultdict(set)
plugins = list(plugins)
all_mods_metadata = return_metadata(plugins)
for name, filename in plugins:
# Does the module have metadata?
if name not in no_metadata and name not in has_metadata:
metadata = all_mods_metadata[name]
if metadata is None:
no_metadata[name] = filename
elif version is not None and ('version' not in metadata or StrictVersion(metadata['version']) < StrictVersion(version)):
no_metadata[name] = filename
else:
has_metadata[name] = filename
# What categories does the plugin belong in?
if all_mods_metadata[name] is None:
# No metadata for this module. Use the default metadata
supported_by[DEFAULT_METADATA['supported_by']].add(filename)
status[DEFAULT_METADATA['status'][0]].add(filename)
else:
supported_by[all_mods_metadata[name]['supported_by']].add(filename)
for one_status in all_mods_metadata[name]['status']:
status[one_status].add(filename)
return list(no_metadata.values()), list(has_metadata.values()), supported_by, status
#
# Subcommands
#
def add_from_csv(csv_file, version=None, overwrite=False):
"""Implement the subcommand to add metadata from a csv file
"""
# Add metadata for everything from the CSV file
diagnostic_messages = []
for module_name, new_metadata in parse_assigned_metadata_initial(csv_file):
filename = module_loader.find_plugin(module_name, mod_type='.py')
if filename is None:
diagnostic_messages.append('Unable to find the module file for {}'.format(module_name))
continue
try:
write_metadata(filename, new_metadata, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def add_default(version=None, overwrite=False):
"""Implement the subcommand to add default metadata to modules
Add the default metadata to any plugin which lacks it.
:kwarg version: If given, the metadata must be at least this version.
Otherwise, treat the module as not having existing metadata.
:kwarg overwrite: If True, overwrite any existing metadata. Otherwise,
do not modify files which have metadata at an appropriate version
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
# Iterate through each plugin
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
try:
write_metadata(filename, DEFAULT_METADATA, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def report(version=None):
"""Implement the report subcommand
Print out all the modules that have metadata and all the ones that do not.
:kwarg version: If given, the metadata must be at least this version.
Otherwise return it as not having metadata
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = list(plugins)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] != NONMODULE_MODULE_NAMES)
plugins = list(plugins)
no_metadata, has_metadata, support, status = metadata_summary(plugins, version=version)
print('== Has metadata ==')
pprint(sorted(has_metadata))
print('')
print('== Has no metadata ==')
pprint(sorted(no_metadata))
print('')
print('== Supported by core ==')
pprint(sorted(support['core']))
print('== Supported by committers ==')
pprint(sorted(support['committer']))
print('== Supported by community ==')
pprint(sorted(support['community']))
print('')
print('== Status: stableinterface ==')
pprint(sorted(status['stableinterface']))
print('== Status: preview ==')
pprint(sorted(status['preview']))
print('== Status: deprecated ==')
pprint(sorted(status['deprecated']))
print('== Status: removed ==')
pprint(sorted(status['removed']))
print('')
print('== Summary ==')
print('No Metadata: {0} Has Metadata: {1}'.format(len(no_metadata), len(has_metadata)))
print('Supported by core: {0} Supported by community: {1} Supported by committer: {2}'.format(len(support['core']), len(support['community']), len(support['committer'])))
print('Status StableInterface: {0} Status Preview: {1} Status Deprecated: {2} Status Removed: {3}'.format(len(status['stableinterface']), len(status['preview']), len(status['deprecated']), len(status['removed'])))
return 0
if __name__ == '__main__':
action, args = parse_args(sys.argv[1:])
### TODO: Implement upgrade metadata and upgrade metadata from csvfile
if action == 'report':
rc = report(version=args['version'])
elif action == 'add':
rc = add_from_csv(args['csvfile'], version=args['version'], overwrite=args['overwrite'])
elif action == 'add-default':
rc = add_default(version=args['version'], overwrite=args['overwrite'])
sys.exit(rc)
| gpl-3.0 | -3,802,879,877,318,531,000 | -1,368,442,100,433,437,700 | 34.724613 | 233 | 0.591347 | false |
yavalvas/yav_com | build/matplotlib/doc/mpl_toolkits/axes_grid/examples/demo_parasite_axes2.py | 16 | 1208 | from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
| mit | -4,580,448,455,276,228,000 | -1,533,855,316,489,137,200 | 24.702128 | 62 | 0.57202 | false |
fheeger/gost | PyQtTachyEmulator.py | 1 | 14872 | import sys, random, time
from glob import glob
import numpy
from PyQt5.QtCore import QObject, QTimer, QIODevice
from PyQt5.QtWidgets import *
from PyQt5 import QtSerialPort
class TachyConnection(QObject):
def __init__(self, dev=None, baut=4800, lineend="\r\n", timeout=3, log=sys.stderr):
super(TachyConnection, self).__init__()
self.log = log
self.lineend = lineend
if dev is None:
self.port = None
else:
sel.connect(dev, baut)
self.buffer = ""
self.lineBuffer = []
self.timeout = timeout
def connect(self, dev, baut=4800):
self.port = QtSerialPort.QSerialPort(dev)
self.port.open(QIODevice.ReadWrite)
self.port.setBaudRate(baut)
def readline(self):
if self.port is None:
raise NotConnectedError
if self.port.waitForReadyRead(self.timeout*1000):
line = self.port.readLine().decode("ascii")
self.log.write("READ LINE: %s" % line)
return line
raise TimeoutError("time out while reading line")
def readLines(self, n=2):
self.buffer += bytes(self.port.readAll()).decode("ascii")
#print("addinf data to buffer: %s" % repr(self.buffer))
pos = self.buffer.find(self.lineend)
while pos > 0:
self.lineBuffer.append(self.buffer[:pos])
print("adding data to line buffer: %s" % repr(self.buffer[:pos]))
self.buffer = self.buffer[pos+len(self.lineend):]
pos = self.buffer.find(self.lineend)
if len(self.lineBuffer) == n:
tmp = self.lineBuffer
self.lineBuffer = []
print("returning: %s" % tmp)
return tmp
return None
def write(self, data):
if self.port is None:
raise NotConnectedError
self.log.write("WRITE: %s\n" % repr(data))
self.port.write(bytes(data, "ascii"))
self.port.flush()
if not self.port.waitForBytesWritten(self.timeout*1000):
raise TimeoutError("time out while writing")
def read(self, bytes=1, timeout=None):
if self.port is None:
raise NotConnectedError
if not timeout is None:
self.port.timeout = timeout
if self.port.waitForReadyRead(self.timeout*1000):
data = self.port.read(bytes).decode("ascii")
self.log.write("READ: %s\n" % data)
return data
raise TimeoutError("time out while reading")
class MeassureWindow(QDialog):
def __init__(self, parent):
super(MeassureWindow, self).__init__(parent)
self.xField = QLineEdit()
self.xField.setText("0")
self.yField = QLineEdit()
self.yField.setText("0")
self.zField = QLineEdit()
self.zField.setText("0")
self.hzField = QLineEdit()
self.hzField.setText("0")
self.vertField = QLineEdit()
self.vertField.setText("0")
self.distField = QLineEdit()
self.distField.setText("0")
mainLayout = QGridLayout()
mainLayout.addWidget(QLabel("x:"), 0, 0)
mainLayout.addWidget(self.xField , 0, 1)
mainLayout.addWidget(QLabel("y:"), 1, 0)
mainLayout.addWidget(self.yField, 1, 1)
mainLayout.addWidget(QLabel("z:"), 2, 0)
mainLayout.addWidget(self.zField, 2, 1)
mainLayout.addWidget(QLabel("horizontal Angle:"), 3, 0)
mainLayout.addWidget(self.hzField, 3, 1)
mainLayout.addWidget(QLabel("vertical Angle:"), 4, 0)
mainLayout.addWidget(self.vertField, 4, 1)
mainLayout.addWidget(QLabel("Distance:"), 5, 0)
mainLayout.addWidget(self.distField, 5, 1)
self.okButton = QPushButton("Ok")
self.cancleButton = QPushButton("Cancel")
mainLayout.addWidget(self.okButton)
mainLayout.addWidget(self.cancleButton)
self.setLayout(mainLayout)
self.setWindowTitle("Meassure Point")
self.okButton.clicked.connect(self.accept)
self.cancleButton.clicked.connect(self.reject)
def accept(self):
self.parent().anyPoint(float(self.xField.text()),
float(self.yField.text()),
float(self.zField.text()),
float(self.hzField.text()),
float(self.vertField.text()),
float(self.distField.text())
)
super(MeassureWindow, self).accept()
class RandomCircleWindow(QDialog):
def __init__(self, parent):
super(RandomCircleWindow, self).__init__(parent)
self.xField = QLineEdit()
self.xField.setText("0")
self.yField = QLineEdit()
self.yField.setText("0")
self.zField = QLineEdit()
self.zField.setText("0")
self.rField = QLineEdit()
self.rField.setText("3")
self.nField = QLineEdit()
self.nField.setText("20")
self.hField = QLineEdit()
self.hField.setText("0")
self.meassureButton = QPushButton("Meassure")
mainLayout = QGridLayout()
mainLayout.addWidget(QLabel("Circle center x:"), 0, 0)
mainLayout.addWidget(self.xField , 0, 1)
mainLayout.addWidget(QLabel("Circle center y:"), 1, 0)
mainLayout.addWidget(self.yField , 1, 1)
mainLayout.addWidget(QLabel("Circle center z:"), 2, 0)
mainLayout.addWidget(self.zField , 2, 1)
mainLayout.addWidget(QLabel("Circle radius:"), 3, 0)
mainLayout.addWidget(self.rField , 3, 1)
mainLayout.addWidget(QLabel("Number of points:"), 4, 0)
mainLayout.addWidget(self.nField , 4, 1)
mainLayout.addWidget(QLabel("Circle height:"), 5, 0)
mainLayout.addWidget(self.hField , 5, 1)
self.okButton = QPushButton("Ok")
self.cancleButton = QPushButton("Cancel")
mainLayout.addWidget(self.okButton)
mainLayout.addWidget(self.cancleButton)
self.setLayout(mainLayout)
self.okButton.clicked.connect(self.accept)
self.cancleButton.clicked.connect(self.reject)
def accept(self):
x = float(self.xField.text())
y = float(self.yField.text())
z = float(self.zField.text())
r = float(self.rField.text())
n = int(self.nField.text())
h = float(self.hField.text())
self.measureRandomPolyCircle(x,y,z,r,n,h)
super(RandomCircleWindow, self).accept()
def measureRandomPolyCircle(self, x0=0, y0=0, z0=0, r=3, n=20, h=2):
angles = []
for i in range(n):
angles.append(random.uniform(0, 2*numpy.pi))
angles.sort()
for a in angles:
x = x0 + r*numpy.cos(a)
y = y0 + r*numpy.sin(a)
z = z0 + random.uniform(0, h)
self.parentWidget().anyPoint(x, y, z, a, 0, r)
time.sleep(0.5)
class TachyEmulator(QWidget):
def __init__(self, dev, parent=None):
super(TachyEmulator, self).__init__(parent)
self.x = 1.0
self.y = 2.0
self.z = 3.0
self.hzAngle = 4.0
self.vertAngle = 0.0
self.instrumentHeight = 1.7
self.reflectorHeight = 1.5
self.ptNr = 0
self.selectPort = QComboBox(self)
for port in self.avail_ports():
self.selectPort.addItem(port)
#display current state
self.xLabel = QLabel("")
self.yLabel = QLabel("")
self.zLabel = QLabel("")
self.hzAngleLabel = QLabel("")
self.vertAngleLabel = QLabel("")
self.reflectorHeightLabel = QLabel("")
self.instrumentHeightLabel = QLabel("")
stateLayout = QGridLayout()
stateLayout.addWidget(QLabel("x:"), 0, 0)
stateLayout.addWidget(self.xLabel, 0, 1)
stateLayout.addWidget(QLabel("y:"), 1, 0)
stateLayout.addWidget(self.yLabel, 1, 1)
stateLayout.addWidget(QLabel("z:"), 2, 0)
stateLayout.addWidget(self.zLabel, 2, 1)
stateLayout.addWidget(QLabel("horizontal Angle:"), 3, 0)
stateLayout.addWidget(self.hzAngleLabel, 3, 1)
stateLayout.addWidget(QLabel("vertical Angle:"), 4, 0)
stateLayout.addWidget(self.vertAngleLabel, 4, 1)
stateLayout.addWidget(QLabel("reflector Height:"), 5, 0)
stateLayout.addWidget(self.reflectorHeightLabel, 5, 1)
stateLayout.addWidget(QLabel("instrument Height:"), 6, 0)
stateLayout.addWidget(self.instrumentHeightLabel, 6, 1)
self.meassureButton = QPushButton("Meassure Point")
self.circleButton = QPushButton("Meassure random circle")
self.meassureButton.setEnabled(False)
self.circleButton.setEnabled(False)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.selectPort)
mainLayout.addLayout(stateLayout)
mainLayout.addWidget(self.meassureButton)
mainLayout.addWidget(self.circleButton)
self.setLayout(mainLayout)
self.setWindowTitle("Tachy Emulator")
self.updateStateDisplay()
self.connection = TachyConnection()
self.meassureButton.clicked.connect(self.meassurePoint)
self.circleButton.clicked.connect(self.measureRandomPolyCircle)
self.selectPort.activated[str].connect(self.connect)
def updateStateDisplay(self):
self.xLabel.setText(str(self.x))
self.yLabel.setText(str(self.y))
self.zLabel.setText(str(self.z))
self.hzAngleLabel.setText(str(self.hzAngle))
self.vertAngleLabel.setText(str(self.vertAngle))
self.reflectorHeightLabel.setText(str(self.reflectorHeight))
self.instrumentHeightLabel.setText(str(self.instrumentHeight))
def processData(self):
print("processing data")
data = self.connection.readLines(1)[0]
print(data)
if not data is None:
comArr = data.strip().split("/")
if comArr[0] == "GET":
if comArr[2] == "WI21":
self.connection.write("*21.322%0+17.d\r\n" % (self.hzAngle * 10**5))
elif comArr[2] == "WI84":
self.connection.write("*84.322%0+17.d\r\n" % (self.x * 10**3))
elif comArr[2] == "WI85":
self.connection.write("*85.322%0+17.d\r\n" % (self.y * 10**3))
elif comArr[2] == "WI86":
self.connection.write("*86.322%0+17.d\r\n" % (self.z * 10**3))
elif comArr[2] == "WI87":
self.connection.write("*87.322%0+17.d\r\n" % (self.reflectorHeight * 10**3))
elif comArr[2] == "WI88":
self.connection.write("*88.322%0+17.d\r\n" % (self.instrumentHeight * 10**3))
else:
self.connection.write("@W127\r\n")
elif comArr[0] == "PUT":
if comArr[1][:2] == "21":
self.hzAngle = float(comArr[1][-17:]) / 10**5
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "84":
self.x = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "85":
self.y = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "86":
self.z = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "87":
self.reflectorHeight = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
elif comArr[1][:2] == "88":
self.instrumentHeight = float(comArr[1][-17:]) / 10**3
self.updateStateDisplay()
self.connection.write("?\r\n")
else:
print("could not process data: " + data)
self.connection.write("@W127\r\n")
else:
print("could not process data: " + data)
self.connection.write("@W127\r\n")
print("done processing data")
def anyPoint(self, x, y, z, hz, vert, dist, reflectorH=0):
self.connection.port.readyRead.disconnect()
data = "110006%+017.f 21.322%+017.f 22.322%+017.f 31..00%+017.f 81..00%+017.f 82..00%+017.f 83..00%+017.f 87..10%+017.f" % (self.ptNr, hz*10**5, vert*10**5, dist*10**3, x*10**3, y*10**3, z*10**3, reflectorH*10**3)
self.connection.write("*%s\r\n" % data)
self.connection.write("w\r\n")
lines = None
while lines is None:
self.connection.port.waitForReadyRead(500)
lines = self.connection.readLines(1)
answer = lines[0]
self.connection.port.readyRead.connect(self.processData)
if answer.strip() != "OK":
QMessageBox.critical(self, "Unexpected Answer from Blender", "Blender answered: %s" % answer)
else:
self.ptNr += 1
print("Messung erfolgreich\n")
def meassurePoint(self):
meassureWindow = MeassureWindow(self)
meassureWindow.exec_()
def measureRandomPolyCircle(self):
circleWindow = RandomCircleWindow(self)
circleWindow.exec_()
def avail_ports(self):
return [p.portName() for p in QtSerialPort.QSerialPortInfo.availablePorts() if not p.isBusy()]
def connect(self, port):
print("connecting to port: %s" % port)
self.connection.connect(port)
self.meassureButton.setEnabled(True)
self.circleButton.setEnabled(True)
self.connection.port.readyRead.connect(self.processData)
class NotConnectedError(IOError):
pass
if __name__ == '__main__':
app = QApplication(sys.argv)
screen = TachyEmulator("COM3")
screen.show()
sys.exit(app.exec_())
| gpl-3.0 | 7,649,883,369,124,909,000 | -3,155,118,534,057,190,400 | 37.978495 | 221 | 0.545724 | false |
bspink/django | tests/template_tests/filter_tests/test_iriencode.py | 388 | 1603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import iriencode, urlencode
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class IriencodeTests(SimpleTestCase):
"""
Ensure iriencode keeps safe strings.
"""
@setup({'iriencode01': '{{ url|iriencode }}'})
def test_iriencode01(self):
output = self.engine.render_to_string('iriencode01', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode02': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode02(self):
output = self.engine.render_to_string('iriencode02', {'url': '?test=1&me=2'})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode03': '{{ url|iriencode }}'})
def test_iriencode03(self):
output = self.engine.render_to_string('iriencode03', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
@setup({'iriencode04': '{% autoescape off %}{{ url|iriencode }}{% endautoescape %}'})
def test_iriencode04(self):
output = self.engine.render_to_string('iriencode04', {'url': mark_safe('?test=1&me=2')})
self.assertEqual(output, '?test=1&me=2')
class FunctionTests(SimpleTestCase):
def test_unicode(self):
self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'), 'S%C3%B8r-Tr%C3%B8ndelag')
def test_urlencoded(self):
self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')), 'fran%C3%A7ois%20%26%20jill')
| bsd-3-clause | 7,434,502,325,462,236,000 | -3,926,586,216,477,304,000 | 36.27907 | 98 | 0.652527 | false |
Ecogenomics/CheckM | scripts/simMarkerGenesVsMarkerSet.py | 3 | 6013 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
"""
Perform simulation to show that marker sets give better completion estimations
compared to marker genes.
"""
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2013'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '1.0.0'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import argparse
import random
from lib.img import IMG
from lib.taxonomyUtils import ranksByLabel
from lib.plots.boxplot import BoxPlot
class SimMarkerGenesVsMarkerSets(object):
def __init__(self):
pass
def run(self, taxonomyStr, ubiquityThreshold, singleCopyThreshold, percentCompletion, numReplicates, numGenomes, contigLen):
img = IMG()
genomeIds = img.genomeIdsByTaxonomy(taxonomyStr, 'Final')
print '\nLineage ' + taxonomyStr + ' contains ' + str(len(genomeIds)) + ' genomes.'
# build marker genes and colocated marker sets
countTable = img.countTable(genomeIds)
markerGenes = img.markerGenes(genomeIds, countTable, ubiquityThreshold*len(genomeIds), singleCopyThreshold*len(genomeIds))
print ' Marker genes: ' + str(len(markerGenes))
geneDistTable = img.geneDistTable(genomeIds, markerGenes, spacingBetweenContigs=1e6)
colocatedGenes = img.colocatedGenes(geneDistTable)
colocatedSets = img.colocatedSets(colocatedGenes, markerGenes)
print ' Co-located gene sets: ' + str(len(colocatedSets))
# random sample genomes
if numGenomes == -1:
rndGenomeIds = genomeIds
else:
rndGenomeIds = random.sample(genomeIds, numGenomes)
# estimate completion for each genome using both the marker genes and marker sets
metadata = img.genomeMetadata('Final')
plotLabels = []
plotData = []
for genomeId in rndGenomeIds:
mgCompletion = []
msCompletion = []
for _ in xrange(0, numReplicates):
startPartialGenomeContigs = img.sampleGenome(metadata[genomeId]['genome size'], percentCompletion, contigLen)
# calculate completion with marker genes
containedMarkerGenes = img.containedMarkerGenes(markerGenes, geneDistTable[genomeId], startPartialGenomeContigs, contigLen)
mgCompletion.append(float(len(containedMarkerGenes))/len(markerGenes) - percentCompletion)
# calculate completion with marker set
comp = 0.0
for cs in colocatedSets:
present = 0
for contigId in cs:
if contigId in containedMarkerGenes:
present += 1
comp += float(present) / len(cs)
msCompletion.append(comp / len(colocatedSets) - percentCompletion)
plotData.append(mgCompletion)
plotData.append(msCompletion)
species = ' '.join(metadata[genomeId]['taxonomy'][ranksByLabel['Genus']:])
plotLabels.append(species + ' (' + genomeId + ')')
plotLabels.append('')
# plot data
boxPlot = BoxPlot()
plotFilename = './images/sim.MGvsMS.' + taxonomyStr.replace(';','_') + '.' + str(percentCompletion) + '.errorbar.png'
title = taxonomyStr.replace(';', '; ') + '\n' + 'Percent completion = %.2f' % percentCompletion
boxPlot.plot(plotFilename, plotData, plotLabels, r'$\Delta$' + ' Percent Completion', '', False, title)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-T', '--taxonomy', help='IMG taxonomy string indicating lineage of interest', default = 'prokaryotes')
parser.add_argument('-u', '--ubiquity', help='Ubiquity threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-s', '--single_copy', help='Single-copy threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-p', '--percent_complete', help='Percent completion to simulate', type=float, default = 0.75)
parser.add_argument('-r', '--replicates', help='Replicates per genome.', type=int, default = 100)
parser.add_argument('-g', '--num_genomes', help='Number of random genomes to consider (-1 for all)', type=int, default = 20)
parser.add_argument('-c', '--contig_len', help='Length of contigs to simulate', type=int, default = 5000)
args = parser.parse_args()
simMarkerGenesVsMarkerSets = SimMarkerGenesVsMarkerSets()
simMarkerGenesVsMarkerSets.run(args.taxonomy, args.ubiquity, args.single_copy, args.percent_complete, args.replicates, args.num_genomes, args.contig_len)
| gpl-3.0 | -6,281,815,936,467,439,000 | 247,366,290,468,187,650 | 48.694215 | 157 | 0.595377 | false |
woozzu/pylearn2 | pylearn2/scripts/tests/test_print_monitor_cv.py | 48 | 1927 | """
Test print_monitor_cv.py by training on a short TrainCV YAML file and
analyzing the output pickle.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.scripts import print_monitor_cv
from pylearn2.testing.skip import skip_if_no_sklearn
def test_print_monitor_cv():
"""Test print_monitor_cv.py."""
skip_if_no_sklearn()
handle, filename = tempfile.mkstemp()
trainer = yaml_parse.load(test_print_monitor_cv_yaml %
{'filename': filename})
trainer.main_loop()
# run print_monitor_cv.py main
print_monitor_cv.main(filename)
# run print_monitor_cv.py main with all=True
print_monitor_cv.main(filename, all=True)
# cleanup
os.remove(filename)
test_print_monitor_cv_yaml = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 8,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.05,
},
],
nvis: 10,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
save_path: %(filename)s,
}
"""
| bsd-3-clause | 3,493,507,221,504,930,000 | 7,399,326,937,623,761,000 | 26.927536 | 77 | 0.56357 | false |
jianglu/mojo | build/android/pylib/perf/surface_stats_collector.py | 47 | 6781 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import Queue
import datetime
import logging
import re
import threading
from pylib import android_commands
from pylib.device import device_utils
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
class SurfaceStatsCollector(object):
"""Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
Args:
device: A DeviceUtils instance.
"""
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
self._collector_thread = None
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
self._warn_about_empty_data = True
def DisableWarningAboutEmptyData(self):
self._warn_about_empty_data = False
def Start(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = Queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
raise Exception('SurfaceFlinger not supported on this device.')
def Stop(self):
assert self._collector_thread
(refresh_period, timestamps) = self._GetDataFromThread()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
return (refresh_period, timestamps)
def _CollectorThread(self):
last_timestamp = 0
timestamps = []
retries = 0
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
if refresh_period is None or timestamps is None:
retries += 1
if retries < 3:
continue
if last_timestamp:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
timestamps += [timestamp for timestamp in new_timestamps
if timestamp > last_timestamp]
if len(timestamps):
last_timestamp = timestamps[-1]
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put((refresh_period, timestamps))
timestamps = []
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency-clear SurfaceView')
return not len(results)
def GetSurfaceFlingerPid(self):
results = self._device.RunShellCommand('ps | grep surfaceflinger')
if not results:
raise Exception('Unable to get surface flinger process id')
pid = results[0].split()[1]
return pid
def _GetSurfaceFlingerFrameData(self):
"""Returns collected SurfaceFlinger frame timing data.
Returns:
A tuple containing:
- The display's nominal refresh period in milliseconds.
- A list of timestamps signifying frame presentation times in
milliseconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# We use the special "SurfaceView" window name because the statistics for
# the activity's main window are not updated when the main web content is
# composited into a SurfaceView.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency SurfaceView')
if not len(results):
return (None, None)
timestamps = []
nanoseconds_per_millisecond = 1e6
refresh_period = long(results[0]) / nanoseconds_per_millisecond
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
pending_fence_timestamp = (1 << 63) - 1
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = long(fields[1])
if timestamp == pending_fence_timestamp:
continue
timestamp /= nanoseconds_per_millisecond
timestamps.append(timestamp)
return (refresh_period, timestamps)
| bsd-3-clause | 6,786,698,918,721,366,000 | -1,270,015,410,650,151,400 | 34.502618 | 80 | 0.675122 | false |
jaruba/chromium.src | tools/telemetry/telemetry/value/list_of_scalar_values_unittest.py | 12 | 6051 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import page as page_module
from telemetry import value
from telemetry.page import page_set
from telemetry.value import list_of_scalar_values
from telemetry.value import none_values
class TestBase(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
self.page_set = ps
@property
def pages(self):
return self.page_set.pages
class ValueTest(TestBase):
def testListSamePageMergingWithSamePageConcatenatePolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3, 4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListSamePageMergingWithPickFirstPolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.PICK_FIRST)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3, 4], same_page_merge_policy=value.PICK_FIRST)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.PICK_FIRST, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
def testListDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page1, 'x', 'unit',
[3, 4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromDifferentPages([v0, v1]))
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
None, same_page_merge_policy=value.CONCATENATE, none_value_reason='n')
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(None, vM.values)
self.assertEquals(none_values.MERGE_FAILURE_REASON,
vM.none_value_reason)
def testListWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', None))
def testListWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', [1, 2],
none_value_reason='n'))
def testAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', [1, 2],
same_page_merge_policy=value.PICK_FIRST, important=False)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': [1, 2]
})
def testNoneValueAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', None, same_page_merge_policy=value.PICK_FIRST,
important=False, none_value_reason='n')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': None,
'none_value_reason': 'n'
})
def testFromDictInts(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1, 2]
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1, 2])
def testFromDictFloats(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1.3, 2.7]
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1.3, 2.7])
def testFromDictNoneValue(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': None,
'none_value_reason': 'n'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, None)
self.assertEquals(v.none_value_reason, 'n')
| bsd-3-clause | -6,802,610,864,805,667,000 | 8,126,711,226,412,901,000 | 34.385965 | 78 | 0.652124 | false |
Yannig/ansible | lib/ansible/modules/network/netvisor/pn_vrouterlbif.py | 29 | 10152 | #!/usr/bin/python
""" PN CLI vrouter-loopback-interface-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouterlbif
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove vrouter-loopback-interface.
description:
- Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove
commands.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add vrouter loopback
interface and 'absent' to remove vrouter loopback interface.
required: True
choices: ['present', 'absent']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: True
pn_index:
description:
- Specify the interface index from 1 to 255.
pn_interface_ip:
description:
- Specify the IP address.
required: True
"""
EXAMPLES = """
- name: add vrouter-loopback-interface
pn_vrouterlbif:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
- name: remove vrouter-loopback-interface
pn_vrouterlbif:
state: 'absent'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the vrouterlb command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterlb command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
LB_INTERFACE_EXISTS = None
# Index range
MIN_INDEX = 1
MAX_INDEX = 255
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the
vrouter-loopback-interface-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a loopback interface with the given ip exists on the given vRouter,
return LB_INTERFACE_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
# Global flags
global VROUTER_EXISTS, LB_INTERFACE_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for loopback interface
show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip '
'no-show-headers' % vrouter_name)
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if interface_ip in out:
LB_INTERFACE_EXISTS = True
else:
LB_INTERFACE_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-loopback-interface-add'
if state == 'absent':
command = 'vrouter-loopback-interface-remove'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state =dict(required=True, type='str',
choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_interface_ip=dict(type='str'),
pn_index=dict(type='int')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_interface_ip"]],
["state", "absent",
["pn_vrouter_name", "pn_interface_ip"]]
)
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
index = module.params['pn_index']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if index:
if not MIN_INDEX <= index <= MAX_INDEX:
module.exit_json(
msg="Index must be between 1 and 255",
changed=False
)
index = str(index)
if command == 'vrouter-loopback-interface-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if LB_INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s does not exist on %s'
% (interface_ip, vrouter_name))
)
if not index:
# To remove loopback interface, we need the index.
# If index is not specified, get the Loopback interface index
# using the given interface ip.
get_index = cli
get_index += (' vrouter-loopback-interface-show vrouter-name %s ip '
'%s ' % (vrouter_name, interface_ip))
get_index += 'format index no-show-headers'
get_index = shlex.split(get_index)
out = module.run_command(get_index)[1]
index = out.split()[1]
cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index)
if command == 'vrouter-loopback-interface-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg=('vRouter %s does not exist' % vrouter_name)
)
if LB_INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s already exists on %s'
% (interface_ip, vrouter_name))
)
cli += (' %s vrouter-name %s ip %s'
% (command, vrouter_name, interface_ip))
if index:
cli += ' index %s ' % index
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 | -848,433,205,989,160,800 | -3,373,216,016,089,450,500 | 29.95122 | 80 | 0.62234 | false |
edgedb/edgedb | edb/testbase/serutils.py | 1 | 2605 | # mypy: ignore-errors
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import datetime
import decimal
import functools
import uuid
import edgedb
@functools.singledispatch
def serialize(o):
raise TypeError(f'cannot serialiaze type {type(o)}')
@serialize.register
def _tuple(o: edgedb.Tuple):
return [serialize(el) for el in o]
@serialize.register
def _namedtuple(o: edgedb.NamedTuple):
return {attr: serialize(getattr(o, attr)) for attr in dir(o)}
@serialize.register
def _linkset(o: edgedb.LinkSet):
return [serialize(el) for el in o]
@serialize.register
def _link(o: edgedb.Link):
ret = {}
for lprop in dir(o):
if lprop in {'source', 'target'}:
continue
ret[f'@{lprop}'] = serialize(getattr(o, lprop))
ret.update(_object(o.target))
return ret
@serialize.register
def _object(o: edgedb.Object):
ret = {}
for attr in dir(o):
try:
link = o[attr]
except (KeyError, TypeError):
link = None
if link:
ret[attr] = serialize(link)
else:
ret[attr] = serialize(getattr(o, attr))
return ret
@serialize.register(edgedb.Set)
@serialize.register(edgedb.Array)
def _set(o):
return [serialize(el) for el in o]
@serialize.register(uuid.UUID)
def _stringify(o):
return str(o)
@serialize.register(int)
@serialize.register(float)
@serialize.register(str)
@serialize.register(bool)
@serialize.register(type(None))
@serialize.register(decimal.Decimal)
@serialize.register(datetime.timedelta)
@serialize.register(edgedb.RelativeDuration)
def _scalar(o):
return o
@serialize.register
def _datetime(o: datetime.datetime):
return o.isoformat()
@serialize.register
def _date(o: datetime.date):
return o.isoformat()
@serialize.register
def _time(o: datetime.time):
return o.isoformat()
@serialize.register
def _enum(o: edgedb.EnumValue):
return str(o)
| apache-2.0 | -5,083,854,345,926,390,000 | -1,822,991,600,381,531,400 | 20.178862 | 74 | 0.693282 | false |
openprivacy/.emacs.d | elpy/rpc-venv/lib/python3.8/site-packages/setuptools/_distutils/command/build.py | 35 | 5767 | """distutils.command.build
Implements the Distutils 'build' command."""
import sys, os
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build(Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('parallel=', 'j',
"number of parallel build jobs"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
self.parallel = None
def finalize_options(self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-%d.%d' % sys.version_info[:2])
if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules(self):
return self.distribution.has_pure_modules()
def has_c_libraries(self):
return self.distribution.has_c_libraries()
def has_ext_modules(self):
return self.distribution.has_ext_modules()
def has_scripts(self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
| gpl-2.0 | 1,854,503,729,066,862,000 | -2,009,764,986,828,109,800 | 35.732484 | 85 | 0.568753 | false |
dimartiro/gspread | gspread/urls.py | 41 | 2820 | # -*- coding: utf-8 -*-
"""
gspread.urls
~~~~~~~~~~~~
This module is Google API url patterns storage.
"""
import re
from .exceptions import UnsupportedFeedTypeError, UrlParameterMissing
SPREADSHEETS_SERVER = 'spreadsheets.google.com'
SPREADSHEETS_FEED_URL = 'https://%s/%s/' % (SPREADSHEETS_SERVER, 'feeds')
# General pattern
# /feeds/feedType/key/worksheetId/visibility/projection
#
# Spreadsheet metafeed
# /feeds/spreadsheets/private/full
# /feeds/spreadsheets/private/full/key
#
# Worksheet
# /feeds/worksheets/key/visibility/projection
# /feeds/worksheets/key/visibility/projection/worksheetId
#
# Cell-based feed
# /feeds/cells/key/worksheetId/visibility/projection
# /feeds/cells/key/worksheetId/visibility/projection/cellId
_feed_types = {'spreadsheets': 'spreadsheets/{visibility}/{projection}',
'worksheets': 'worksheets/{spreadsheet_id}/{visibility}/{projection}',
'worksheet': 'worksheets/{spreadsheet_id}/{visibility}/{projection}/{worksheet_id}/{version}',
'cells': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}',
'cells_batch': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}/batch',
'cells_cell_id': 'cells/{spreadsheet_id}/{worksheet_id}/{visibility}/{projection}/{cell_id}'}
_fields_cache = {}
_field_re = re.compile(r'{(\w+)}')
def _extract_fields(patternstr):
return _field_re.findall(patternstr)
def construct_url(feedtype=None,
obj=None,
visibility='private',
projection='full',
spreadsheet_id=None,
worksheet_id=None,
cell_id=None,
worksheet_version=None):
"""Constructs URL to be used for API request.
"""
try:
urlpattern = _feed_types[feedtype]
fields = _fields_cache.get(feedtype)
if fields is None:
fields = _extract_fields(urlpattern)
_fields_cache[feedtype] = fields
except KeyError as e:
raise UnsupportedFeedTypeError(e)
obj_fields = obj.get_id_fields() if obj is not None else {}
params = {'visibility': visibility,
'projection': projection,
'spreadsheet_id': (spreadsheet_id if spreadsheet_id
else obj_fields.get('spreadsheet_id')),
'worksheet_id': (worksheet_id if worksheet_id
else obj_fields.get('worksheet_id')),
'cell_id': cell_id,
'version': worksheet_version}
params = dict((k, v) for k, v in params.items() if v is not None)
try:
return '%s%s' % (SPREADSHEETS_FEED_URL,
urlpattern.format(**params))
except KeyError as e:
raise UrlParameterMissing(e)
| mit | -3,239,253,522,385,192,400 | 2,001,971,180,902,860,500 | 31.045455 | 109 | 0.613475 | false |
paulcoiffier/Mobissime-Liberta | vendor/justinrainbow/json-schema/docs/conf.py | 74 | 7837 | # -*- coding: utf-8 -*-
#
# JsonSchema documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 10 15:34:44 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JsonSchema'
copyright = u'2011, Justin Rainbow, Bruno Prieto Reis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'JsonSchemadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'JsonSchema.tex', u'JsonSchema Documentation',
u'Justin Rainbow, Bruno Prieto Reis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jsonschema', u'JsonSchema Documentation',
[u'Justin Rainbow, Bruno Prieto Reis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'JsonSchema', u'JsonSchema Documentation', u'Justin Rainbow, Bruno Prieto Reis',
'JsonSchema', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-2.0 | 6,101,601,758,685,787,000 | 3,492,401,306,586,096,600 | 31.518672 | 92 | 0.70601 | false |
minhphung171093/OpenERP_V8 | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,921,343,893,067,570,000 | 6,840,314,615,198,618,000 | 43.412903 | 99 | 0.61534 | false |
emilroz/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/index.py | 5 | 2494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from webclient.controller import BaseController
class BaseIndex(BaseController):
def __init__(self, conn):
BaseController.__init__(self, conn)
def loadMostRecent(self):
self.mostRecentSharesComments = list(self.conn.listMostRecentShareComments())
self.mostRecentSharesComments.sort(key=lambda x: x.creationEventDate(), reverse=True)
self.mostRecentShares = list()
for sh in list(self.conn.listMostRecentShares()):
flag = True
for s in self.mostRecentShares:
if sh.id == s.id:
flag = False
if flag:
self.mostRecentShares.append(sh)
self.mostRecentShares.sort(key=lambda x: x.started, reverse=True)
def loadTagCloud(self):
tags = dict()
for ann in list(self.conn.listMostRecentTags()):
try:
if tags[ann.id]['count'] > 0:
tags[ann.id]['count'] = tags[ann.id]['count'] + 1
else:
tags[ann.id]['count'] = 1
except:
tags[ann.id] = {'obj':ann, 'count':1}
if len(tags) == 20:
break
font = {'max': 0, 'min': 1}
for key, value in tags.items():
if value['count'] < font['min']:
font['min'] = value['count']
if value['count'] > font['max']:
font['max'] = value['count']
self.font = font
self.mostRecentTags = tags
def loadLastAcquisitions(self):
self.lastAcquiredImages = list(self.conn.listLastImportedImages())
| gpl-2.0 | 4,796,224,248,988,099,000 | -5,099,894,455,206,044,000 | 35.144928 | 93 | 0.603849 | false |
h3biomed/ansible | lib/ansible/modules/network/bigswitch/bigmon_policy.py | 44 | 6499 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ted Elhourani <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Ansible module to manage Big Monitoring Fabric service chains
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigmon_policy
author: "Ted (@tedelhourani)"
short_description: Create and remove a bigmon out-of-band policy.
description:
- Create and remove a bigmon out-of-band policy.
version_added: "2.3"
options:
name:
description:
- The name of the policy.
required: true
policy_description:
description:
- Description of policy.
action:
description:
- Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets,
but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation.
default: forward
choices: ['forward', 'drop', 'flow-gen']
priority:
description:
- A priority associated with this policy. The higher priority policy takes precedence over a lower priority.
default: 100
duration:
description:
- Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first.
default: 0
start_time:
description:
- Date the policy becomes active
default: ansible_date_time.iso8601
delivery_packet_count:
description:
- Run policy until delivery_packet_count packets are delivered.
default: 0
state:
description:
- Whether the policy should be present or absent.
default: present
choices: ['present', 'absent']
controller:
description:
- The controller address.
required: true
validate_certs:
description:
- If C(false), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: true
type: bool
access_token:
description:
- Bigmon access token. If this isn't set, the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used.
'''
EXAMPLES = '''
- name: policy to aggregate filter and deliver data center (DC) 1 traffic
bigmon_policy:
name: policy1
policy_description: DC 1 traffic policy
action: drop
controller: '{{ inventory_hostname }}'
state: present
validate_certs: false
'''
RETURN = ''' # '''
import datetime
import os
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.bigswitch.bigswitch import Rest
from ansible.module_utils._text import to_native
def policy(module):
try:
access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message, exception=traceback.format_exc())
name = module.params['name']
policy_description = module.params['policy_description']
action = module.params['action']
priority = module.params['priority']
duration = module.params['duration']
start_time = module.params['start_time']
delivery_packet_count = module.params['delivery_packet_count']
state = module.params['state']
controller = module.params['controller']
rest = Rest(module,
{'content-type': 'application/json', 'Cookie': 'session_cookie=' + access_token},
'https://' + controller + ':8443/api/v1/data/controller/applications/bigtap')
if name is None:
module.fail_json(msg='parameter `name` is missing')
response = rest.get('policy?config=true', data={})
if response.status_code != 200:
module.fail_json(msg="failed to obtain existing policy config: {0}".format(response.json['description']))
config_present = False
matching = [policy for policy in response.json
if policy['name'] == name and
policy['duration'] == duration and
policy['delivery-packet-count'] == delivery_packet_count and
policy['policy-description'] == policy_description and
policy['action'] == action and
policy['priority'] == priority]
if matching:
config_present = True
if state in ('present') and config_present:
module.exit_json(changed=False)
if state in ('absent') and not config_present:
module.exit_json(changed=False)
if state in ('present'):
data = {'name': name, 'action': action, 'policy-description': policy_description,
'priority': priority, 'duration': duration, 'start-time': start_time,
'delivery-packet-count': delivery_packet_count}
response = rest.put('policy[name="%s"]' % name, data=data)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error creating policy '{0}': {1}".format(name, response.json['description']))
if state in ('absent'):
response = rest.delete('policy[name="%s"]' % name, data={})
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error deleting policy '{0}': {1}".format(name, response.json['description']))
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
policy_description=dict(type='str', default=''),
action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'),
priority=dict(type='int', default=100),
duration=dict(type='int', default=0),
start_time=dict(type='str', default=datetime.datetime.now().isoformat() + '+00:00'),
delivery_packet_count=dict(type='int', default=0),
controller=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present'),
validate_certs=dict(type='bool', default='True'),
access_token=dict(type='str', no_log=True)
)
)
try:
policy(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | 4,019,347,421,067,068,000 | -7,866,185,468,173,828,000 | 33.386243 | 121 | 0.644868 | false |
vortex-ape/scikit-learn | examples/bicluster/plot_bicluster_newsgroups.py | 39 | 5911 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
"""
from __future__ import print_function
from collections import defaultdict
import operator
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
print(__doc__)
def number_normalizer(tokens):
""" Map all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
return ("#NUMBER" if token[0].isdigit() else token for token in tokens)
class NumberNormalizingVectorizer(TfidfVectorizer):
def build_tokenizer(self):
tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()
return lambda doc: list(number_normalizer(tokenize(doc)))
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = NumberNormalizingVectorizer(stop_words='english', min_df=5)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause | 7,834,787,464,747,411,000 | 2,599,415,326,776,251,000 | 37.135484 | 79 | 0.670107 | false |
Altazon97/WolfsDen | modules/twython-3.1.0/twython/streaming/types.py | 9 | 2825 | # -*- coding: utf-8 -*-
"""
twython.streaming.types
~~~~~~~~~~~~~~~~~~~~~~~
This module contains classes and methods for :class:`TwythonStreamer` to use.
"""
class TwythonStreamerTypes(object):
"""Class for different stream endpoints
Not all streaming endpoints have nested endpoints.
User Streams and Site Streams are single streams with no nested endpoints
Status Streams include filter, sample and firehose endpoints
"""
def __init__(self, streamer):
self.streamer = streamer
self.statuses = TwythonStreamerTypesStatuses(streamer)
def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def site(self, **params):
"""Stream site
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/site
"""
url = 'https://sitestream.twitter.com/%s/site.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
class TwythonStreamerTypesStatuses(object):
"""Class for different statuses endpoints
Available so TwythonStreamer.statuses.filter() is available.
Just a bit cleaner than TwythonStreamer.statuses_filter(),
statuses_sample(), etc. all being single methods in TwythonStreamer
"""
def __init__(self, streamer):
self.streamer = streamer
def filter(self, **params):
"""Stream statuses/filter
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/post/statuses/filter
"""
url = 'https://stream.twitter.com/%s/statuses/filter.json' \
% self.streamer.api_version
self.streamer._request(url, 'POST', params=params)
def sample(self, **params):
"""Stream statuses/sample
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/sample
"""
url = 'https://stream.twitter.com/%s/statuses/sample.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params)
| gpl-3.0 | -2,037,558,434,706,439,000 | -5,657,828,821,785,410 | 30.741573 | 77 | 0.63115 | false |
jessefeinman/FintechHackathon | venv/Lib/site-packages/setuptools/msvc.py | 89 | 37091 | """
Improved support for Microsoft Visual C++ compilers.
Known supported compilers:
--------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64);
Microsoft Windows SDK 7.0 (x86, x64, ia64);
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Microsoft Visual C++ 14.0:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
"""
import os
import sys
import platform
import itertools
import distutils.errors
from pkg_resources.extern.packaging.version import LegacyVersion
from setuptools.extern.six.moves import filterfalse
from .monkey import get_unpatched
if platform.system() == 'Windows':
from setuptools.extern.six.moves import winreg
safe_env = os.environ
else:
"""
Mock winreg and environ so the module can be imported
on this platform.
"""
class winreg:
HKEY_USERS = None
HKEY_CURRENT_USER = None
HKEY_LOCAL_MACHINE = None
HKEY_CLASSES_ROOT = None
safe_env = dict()
try:
from distutils.msvc9compiler import Reg
except ImportError:
pass
def msvc9_find_vcvarsall(version):
"""
Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
compiler build for Python (VCForPython). Fall back to original behavior
when the standalone compiler is not available.
Redirect the path of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
Parameters
----------
version: float
Required Microsoft Visual C++ version.
Return
------
vcvarsall.bat path: str
"""
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return get_unpatched(msvc9_find_vcvarsall)(version)
def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
"""
Patched "distutils.msvc9compiler.query_vcvarsall" for support standalones
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64);
Microsoft Windows SDK 7.0 (x86, x64, ia64);
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Parameters
----------
ver: float
Required Microsoft Visual C++ version.
arch: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environement from vcvarsall.bat (Classical way)
try:
orig = get_unpatched(msvc9_query_vcvarsall)
return orig(ver, arch, *args, **kwargs)
except distutils.errors.DistutilsPlatformError:
# Pass error if Vcvarsall.bat is missing
pass
except ValueError:
# Pass error if environment not set after executing vcvarsall.bat
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(arch, ver).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, ver, arch)
raise
def msvc14_get_vc_env(plat_spec):
"""
Patched "distutils._msvccompiler._get_vc_env" for support standalones
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 14.0:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
Parameters
----------
plat_spec: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environment from vcvarsall.bat (Classical way)
try:
return get_unpatched(msvc14_get_vc_env)(plat_spec)
except distutils.errors.DistutilsPlatformError:
# Pass error Vcvarsall.bat is missing
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, 14.0)
raise
def msvc14_gen_lib_options(*args, **kwargs):
"""
Patched "distutils._msvccompiler.gen_lib_options" for fix
compatibility between "numpy.distutils" and "distutils._msvccompiler"
(for Numpy < 1.11.2)
"""
if "numpy.distutils" in sys.modules:
import numpy as np
if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
def _augment_exception(exc, version, arch=''):
"""
Add details to the exception message to help guide the user
as to what action will resolve it.
"""
# Error if MSVC++ directory not found or environment not set
message = exc.args[0]
if "vcvarsall" in message.lower() or "visual c" in message.lower():
# Special error message if MSVC++ not installed
tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
message = tmpl.format(**locals())
msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
if version == 9.0:
if arch.lower().find('ia64') > -1:
# For VC++ 9.0, if IA64 support is needed, redirect user
# to Windows SDK 7.0
message += ' Get it with "Microsoft Windows SDK 7.0": '
message += msdownload % 3138
else:
# For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
# This redirection link is maintained by Microsoft.
# Contact [email protected] if it needs updating.
message += ' Get it from http://aka.ms/vcpython27'
elif version == 10.0:
# For VC++ 10.0 Redirect user to Windows SDK 7.1
message += ' Get it with "Microsoft Windows SDK 7.1": '
message += msdownload % 8279
elif version >= 14.0:
# For VC++ 14.0 Redirect user to Visual C++ Build Tools
message += (' Get it with "Microsoft Visual C++ Build Tools": '
r'http://landinghub.visualstudio.com/'
'visual-cpp-build-tools')
exc.args = (message, )
class PlatformInfo:
"""
Current and Target Architectures informations.
Parameters
----------
arch: str
Target architecture.
"""
current_cpu = safe_env.get('processor_architecture', '').lower()
def __init__(self, arch):
self.arch = arch.lower().replace('x64', 'amd64')
@property
def target_cpu(self):
return self.arch[self.arch.find('_') + 1:]
def target_is_x86(self):
return self.target_cpu == 'x86'
def current_is_x86(self):
return self.current_cpu == 'x86'
def current_dir(self, hidex86=False, x64=False):
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\target', or '' (see hidex86 parameter)
"""
return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
)
def target_dir(self, hidex86=False, x64=False):
"""
Target platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\current', or '' (see hidex86 parameter)
"""
return (
'' if (self.target_cpu == 'x86' and hidex86) else
r'\x64' if (self.target_cpu == 'amd64' and x64) else
r'\%s' % self.target_cpu
)
def cross_dir(self, forcex86=False):
"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current acritecture is
not x86.
Return
------
subfolder: str
'' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
'' if self.target_cpu == current else
self.target_dir().replace('\\', '\\%s_' % current)
)
class RegistryInfo:
"""
Microsoft Visual Studio related registry informations.
Parameters
----------
platform_info: PlatformInfo
"PlatformInfo" instance.
"""
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
def __init__(self, platform_info):
self.pi = platform_info
@property
def visualstudio(self):
"""
Microsoft Visual Studio root registry key.
"""
return 'VisualStudio'
@property
def sxs(self):
"""
Microsoft Visual Studio SxS registry key.
"""
return os.path.join(self.visualstudio, 'SxS')
@property
def vc(self):
"""
Microsoft Visual C++ VC7 registry key.
"""
return os.path.join(self.sxs, 'VC7')
@property
def vs(self):
"""
Microsoft Visual Studio VS7 registry key.
"""
return os.path.join(self.sxs, 'VS7')
@property
def vc_for_python(self):
"""
Microsoft Visual C++ for Python registry key.
"""
return r'DevDiv\VCForPython'
@property
def microsoft_sdk(self):
"""
Microsoft SDK registry key.
"""
return 'Microsoft SDKs'
@property
def windows_sdk(self):
"""
Microsoft Windows/Platform SDK registry key.
"""
return os.path.join(self.microsoft_sdk, 'Windows')
@property
def netfx_sdk(self):
"""
Microsoft .NET Framework SDK registry key.
"""
return os.path.join(self.microsoft_sdk, 'NETFXSDK')
@property
def windows_kits_roots(self):
"""
Microsoft Windows Kits Roots registry key.
"""
return r'Windows Kits\Installed Roots'
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
"""
node64 = '' if self.pi.current_is_x86() or x86 else r'\Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key)
def lookup(self, key, name):
"""
Look for values in registry in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
name: str
Value name to find.
Return
------
str: value
"""
KEY_READ = winreg.KEY_READ
openkey = winreg.OpenKey
ms = self.microsoft
for hkey in self.HKEYS:
try:
bkey = openkey(hkey, ms(key), 0, KEY_READ)
except (OSError, IOError):
if not self.pi.current_is_x86():
try:
bkey = openkey(hkey, ms(key, True), 0, KEY_READ)
except (OSError, IOError):
continue
else:
continue
try:
return winreg.QueryValueEx(bkey, name)[0]
except (OSError, IOError):
pass
class SystemInfo:
"""
Microsoft Windows and Visual Studio related system inormations.
Parameters
----------
registry_info: RegistryInfo
"RegistryInfo" instance.
vc_ver: float
Required Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparaison.
WinDir = safe_env.get('WinDir', '')
ProgramFiles = safe_env.get('ProgramFiles', '')
ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles)
def __init__(self, registry_info, vc_ver=None):
self.ri = registry_info
self.pi = self.ri.pi
if vc_ver:
self.vc_ver = vc_ver
else:
try:
self.vc_ver = self.find_available_vc_vers()[-1]
except IndexError:
err = 'No Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
def find_available_vc_vers(self):
"""
Find all available Microsoft Visual C++ versions.
"""
vckeys = (self.ri.vc, self.ri.vc_for_python)
vc_vers = []
for hkey in self.ri.HKEYS:
for key in vckeys:
try:
bkey = winreg.OpenKey(hkey, key, 0, winreg.KEY_READ)
except (OSError, IOError):
continue
subkeys, values, _ = winreg.QueryInfoKey(bkey)
for i in range(values):
try:
ver = float(winreg.EnumValue(bkey, i)[0])
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
for i in range(subkeys):
try:
ver = float(winreg.EnumKey(bkey, i))
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
return sorted(vc_vers)
@property
def VSInstallDir(self):
"""
Microsoft Visual Studio directory.
"""
# Default path
name = 'Microsoft Visual Studio %0.1f' % self.vc_ver
default = os.path.join(self.ProgramFilesx86, name)
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default
@property
def VCInstallDir(self):
"""
Microsoft Visual C++ directory.
"""
# Default path
default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver
guess_vc = os.path.join(self.ProgramFilesx86, default)
# Try to get "VC++ for Python" path from registry as default path
reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
python_vc = self.ri.lookup(reg_path, 'installdir')
default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc
# Try to get path from registry, if fail use default path
path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc
if not os.path.isdir(path):
msg = 'Microsoft Visual C++ directory not found'
raise distutils.errors.DistutilsPlatformError(msg)
return path
@property
def WindowsSdkVersion(self):
"""
Microsoft Windows SDK versions.
"""
# Set Windows SDK versions for specified MSVC++ version
if self.vc_ver <= 9.0:
return ('7.0', '6.1', '6.0a')
elif self.vc_ver == 10.0:
return ('7.1', '7.0a')
elif self.vc_ver == 11.0:
return ('8.0', '8.0a')
elif self.vc_ver == 12.0:
return ('8.1', '8.1a')
elif self.vc_ver >= 14.0:
return ('10.0', '8.1')
@property
def WindowsSdkDir(self):
"""
Microsoft Windows SDK directory.
"""
sdkdir = ''
for ver in self.WindowsSdkVersion:
# Try to get it from registry
loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver)
sdkdir = self.ri.lookup(loc, 'installationfolder')
if sdkdir:
break
if not sdkdir or not os.path.isdir(sdkdir):
# Try to get "VC++ for Python" version from registry
path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
install_base = self.ri.lookup(path, 'installdir')
if install_base:
sdkdir = os.path.join(install_base, 'WinSDK')
if not sdkdir or not os.path.isdir(sdkdir):
# If fail, use default new path
for ver in self.WindowsSdkVersion:
intver = ver[:ver.rfind('.')]
path = r'Microsoft SDKs\Windows Kits\%s' % (intver)
d = os.path.join(self.ProgramFiles, path)
if os.path.isdir(d):
sdkdir = d
if not sdkdir or not os.path.isdir(sdkdir):
# If fail, use default old path
for ver in self.WindowsSdkVersion:
path = r'Microsoft SDKs\Windows\v%s' % ver
d = os.path.join(self.ProgramFiles, path)
if os.path.isdir(d):
sdkdir = d
if not sdkdir:
# If fail, use Platform SDK
sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK')
return sdkdir
@property
def WindowsSDKExecutablePath(self):
"""
Microsoft Windows SDK executable directory.
"""
# Find WinSDK NetFx Tools registry dir name
if self.vc_ver <= 11.0:
netfxver = 35
arch = ''
else:
netfxver = 40
hidex86 = True if self.vc_ver <= 12.0 else False
arch = self.pi.current_dir(x64=True, hidex86=hidex86)
fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
# liste all possibles registry paths
regpaths = []
if self.vc_ver >= 14.0:
for ver in self.NetFxSdkVersion:
regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)]
for ver in self.WindowsSdkVersion:
regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
# Return installation folder from the more recent path
for path in regpaths:
execpath = self.ri.lookup(path, 'installationfolder')
if execpath:
break
return execpath
@property
def FSharpInstallDir(self):
"""
Microsoft Visual F# directory.
"""
path = r'%0.1f\Setup\F#' % self.vc_ver
path = os.path.join(self.ri.visualstudio, path)
return self.ri.lookup(path, 'productdir') or ''
@property
def UniversalCRTSdkDir(self):
"""
Microsoft Universal CRT SDK directory.
"""
# Set Kit Roots versions for specified MSVC++ version
if self.vc_ver >= 14.0:
vers = ('10', '81')
else:
vers = ()
# Find path of the more recent Kit
for ver in vers:
sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
'kitsroot%s' % ver)
if sdkdir:
break
return sdkdir or ''
@property
def NetFxSdkVersion(self):
"""
Microsoft .NET Framework SDK versions.
"""
# Set FxSdk versions for specified MSVC++ version
if self.vc_ver >= 14.0:
return ('4.6.1', '4.6')
else:
return ()
@property
def NetFxSdkDir(self):
"""
Microsoft .NET Framework SDK directory.
"""
for ver in self.NetFxSdkVersion:
loc = os.path.join(self.ri.netfx_sdk, ver)
sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
if sdkdir:
break
return sdkdir or ''
@property
def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
@property
def FrameworkDir64(self):
"""
Microsoft .NET Framework 64bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
@property
def FrameworkVersion32(self):
"""
Microsoft .NET Framework 32bit versions.
"""
return self._find_dot_net_versions(32)
@property
def FrameworkVersion64(self):
"""
Microsoft .NET Framework 64bit versions.
"""
return self._find_dot_net_versions(64)
def _find_dot_net_versions(self, bits=32):
"""
Find Microsoft .NET Framework versions.
Parameters
----------
bits: int
Platform number of bits: 32 or 64.
"""
# Find actual .NET version
ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) or ''
# Set .NET versions for specified MSVC++ version
if self.vc_ver >= 12.0:
frameworkver = (ver, 'v4.0')
elif self.vc_ver >= 10.0:
frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver,
'v3.5')
elif self.vc_ver == 9.0:
frameworkver = ('v3.5', 'v2.0.50727')
if self.vc_ver == 8.0:
frameworkver = ('v3.0', 'v2.0.50727')
return frameworkver
class EnvironmentInfo:
"""
Return environment variables for specified Microsoft Visual C++ version
and platform : Lib, Include, Path and libpath.
This function is compatible with Microsoft Visual C++ 9.0 to 14.0.
Script created by analysing Microsoft environment configuration files like
"vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
Parameters
----------
arch: str
Target architecture.
vc_ver: float
Required Microsoft Visual C++ version. If not set, autodetect the last
version.
vc_min_ver: float
Minimum Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparaison.
def __init__(self, arch, vc_ver=None, vc_min_ver=None):
self.pi = PlatformInfo(arch)
self.ri = RegistryInfo(self.pi)
self.si = SystemInfo(self.ri, vc_ver)
if vc_min_ver:
if self.vc_ver < vc_min_ver:
err = 'No suitable Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
@property
def vc_ver(self):
"""
Microsoft Visual C++ version.
"""
return self.si.vc_ver
@property
def VSTools(self):
"""
Microsoft Visual Studio Tools
"""
paths = [r'Common7\IDE', r'Common7\Tools']
if self.vc_ver >= 14.0:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
paths += [r'Team Tools\Performance Tools']
paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
return [os.path.join(self.si.VSInstallDir, path) for path in paths]
@property
def VCIncludes(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Includes
"""
return [os.path.join(self.si.VCInstallDir, 'Include'),
os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')]
@property
def VCLibraries(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Libraries
"""
arch_subdir = self.pi.target_dir(hidex86=True)
paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
if self.vc_ver >= 14.0:
paths += [r'Lib\store%s' % arch_subdir]
return [os.path.join(self.si.VCInstallDir, path) for path in paths]
@property
def VCStoreRefs(self):
"""
Microsoft Visual C++ store references Libraries
"""
if self.vc_ver < 14.0:
return []
return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')]
@property
def VCTools(self):
"""
Microsoft Visual C++ Tools
"""
si = self.si
tools = [os.path.join(si.VCInstallDir, 'VCPackages')]
forcex86 = True if self.vc_ver <= 10.0 else False
arch_subdir = self.pi.cross_dir(forcex86)
if arch_subdir:
tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
if self.vc_ver >= 14.0:
path = 'Bin%s' % self.pi.current_dir(hidex86=True)
tools += [os.path.join(si.VCInstallDir, path)]
else:
tools += [os.path.join(si.VCInstallDir, 'Bin')]
return tools
@property
def OSLibraries(self):
"""
Microsoft Windows SDK Libraries
"""
if self.vc_ver <= 10.0:
arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
else:
arch_subdir = self.pi.target_dir(x64=True)
lib = os.path.join(self.si.WindowsSdkDir, 'lib')
libver = self._get_content_dirname(lib)
return [os.path.join(lib, '%sum%s' % (libver, arch_subdir))]
@property
def OSIncludes(self):
"""
Microsoft Windows SDK Include
"""
include = os.path.join(self.si.WindowsSdkDir, 'include')
if self.vc_ver <= 10.0:
return [include, os.path.join(include, 'gl')]
else:
if self.vc_ver >= 14.0:
sdkver = self._get_content_dirname(include)
else:
sdkver = ''
return [os.path.join(include, '%sshared' % sdkver),
os.path.join(include, '%sum' % sdkver),
os.path.join(include, '%swinrt' % sdkver)]
@property
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
if self.vc_ver >= 14.0:
libpath += [
ref,
os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
os.path.join(
ref,
'Windows.Foundation.UniversalApiContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Foundation.FoundationContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0',
),
os.path.join(
self.si.WindowsSdkDir,
'ExtensionSDKs',
'Microsoft.VCLibs',
'%0.1f' % self.vc_ver,
'References',
'CommonConfiguration',
'neutral',
),
]
return libpath
@property
def SdkTools(self):
"""
Microsoft Windows SDK Tools
"""
bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
tools = [os.path.join(self.si.WindowsSdkDir, bin_dir)]
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
tools += [os.path.join(self.si.WindowsSdkDir, path)]
if self.vc_ver == 10.0 or self.vc_ver == 11.0:
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
tools += [os.path.join(self.si.WindowsSdkDir, path)]
if self.si.WindowsSDKExecutablePath:
tools += [self.si.WindowsSDKExecutablePath]
return tools
@property
def SdkSetup(self):
"""
Microsoft Windows SDK Setup
"""
if self.vc_ver > 9.0:
return []
return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
@property
def FxTools(self):
"""
Microsoft .NET Framework Tools
"""
pi = self.pi
si = self.si
if self.vc_ver <= 10.0:
include32 = True
include64 = not pi.target_is_x86() and not pi.current_is_x86()
else:
include32 = pi.target_is_x86() or pi.current_is_x86()
include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
tools = []
if include32:
tools += [os.path.join(si.FrameworkDir32, ver)
for ver in si.FrameworkVersion32]
if include64:
tools += [os.path.join(si.FrameworkDir64, ver)
for ver in si.FrameworkVersion64]
return tools
@property
def NetFxSDKLibraries(self):
"""
Microsoft .Net Framework SDK Libraries
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
arch_subdir = self.pi.target_dir(x64=True)
return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
@property
def NetFxSDKIncludes(self):
"""
Microsoft .Net Framework SDK Includes
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
return [os.path.join(self.si.NetFxSdkDir, r'include\um')]
@property
def VsTDb(self):
"""
Microsoft Visual Studio Team System Database
"""
return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
@property
def MSBuild(self):
"""
Microsoft Build Engine
"""
if self.vc_ver < 12.0:
return []
arch_subdir = self.pi.current_dir(hidex86=True)
path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir)
return [os.path.join(self.si.ProgramFilesx86, path)]
@property
def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop
"""
if self.vc_ver < 11.0:
return []
return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
@property
def UCRTLibraries(self):
"""
Microsoft Universal CRT Libraries
"""
if self.vc_ver < 14.0:
return []
arch_subdir = self.pi.target_dir(x64=True)
lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib')
ucrtver = self._get_content_dirname(lib)
return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
@property
def UCRTIncludes(self):
"""
Microsoft Universal CRT Include
"""
if self.vc_ver < 14.0:
return []
include = os.path.join(self.si.UniversalCRTSdkDir, 'include')
ucrtver = self._get_content_dirname(include)
return [os.path.join(include, '%sucrt' % ucrtver)]
@property
def FSharp(self):
"""
Microsoft Visual F#
"""
if self.vc_ver < 11.0 and self.vc_ver > 12.0:
return []
return self.si.FSharpInstallDir
@property
def VCRuntimeRedist(self):
"""
Microsoft Visual C++ runtime redistribuable dll
"""
arch_subdir = self.pi.target_dir(x64=True)
vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
vcruntime = vcruntime % (arch_subdir, self.vc_ver, self.vc_ver)
return os.path.join(self.si.VCInstallDir, vcruntime)
def return_env(self, exists=True):
"""
Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
"""
env = dict(
include=self._build_paths('include',
[self.VCIncludes,
self.OSIncludes,
self.UCRTIncludes,
self.NetFxSDKIncludes],
exists),
lib=self._build_paths('lib',
[self.VCLibraries,
self.OSLibraries,
self.FxTools,
self.UCRTLibraries,
self.NetFxSDKLibraries],
exists),
libpath=self._build_paths('libpath',
[self.VCLibraries,
self.FxTools,
self.VCStoreRefs,
self.OSLibpath],
exists),
path=self._build_paths('path',
[self.VCTools,
self.VSTools,
self.VsTDb,
self.SdkTools,
self.SdkSetup,
self.FxTools,
self.MSBuild,
self.HTMLHelpWorkshop,
self.FSharp],
exists),
)
if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist):
env['py_vcruntime_redist'] = self.VCRuntimeRedist
return env
def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = safe_env.get(name, '').split(os.pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return os.pathsep.join(unique_paths)
# from Python docs
def _unique_everseen(self, iterable, key=None):
"""
List unique elements, preserving order.
Remember all elements ever seen.
_unique_everseen('AAAABBBCCDAABBB') --> A B C D
_unique_everseen('ABBCcAD', str.lower) --> A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def _get_content_dirname(self, path):
"""
Return name of the first dir in path or '' if no dir found.
Parameters
----------
path: str
Path where search dir.
Return
------
foldername: str
"name\" or ""
"""
try:
name = os.listdir(path)
if name:
return '%s\\' % name[0]
return ''
except (OSError, IOError):
return ''
| bsd-2-clause | 1,319,056,336,718,418,700 | 2,532,518,129,174,908,400 | 30.090528 | 79 | 0.535629 | false |
redPanther/hyperion.ng | effects/candle.py | 4 | 2098 |
# Candleflicker effect by penfold42
# Algorithm courtesy of
# https://cpldcpu.com/2013/12/08/hacking-a-candleflicker-led/
# candles can be :
# a single led number, a list of candle numbers
# "all" to flicker all the leds randomly
# "all-together" to flicker all the leds in unison
import hyperion
import time
import colorsys
import random
# Get parameters
color = hyperion.args.get('color', (255,138,0))
colorShift = float(hyperion.args.get('colorShift', 1))/100.0
brightness = float(hyperion.args.get('brightness', 100))/100.0
sleepTime = float(hyperion.args.get('sleepTime', 0.14))
candles = hyperion.args.get('candles', "all")
ledlist = hyperion.args.get('ledlist', "1")
candlelist = ()
if (candles == "list") and (type(ledlist) is str):
for s in ledlist.split(','):
i = int(s)
if (i<hyperion.ledCount):
candlelist += (i,)
elif (candles == "list") and (type(ledlist) is list):
for s in (ledlist):
i = int(s)
if (i<hyperion.ledCount):
candlelist += (i,)
else:
candlelist = range(hyperion.ledCount)
# Convert rgb color to hsv
hsv = colorsys.rgb_to_hsv(color[0]/255.0, color[1]/255.0, color [2]/255.0)
def CandleRgb():
hue = random.uniform(hsv[0]-colorShift, hsv[0]+colorShift) % 1.0
RAND=random.randint(0,15)
while ((RAND & 0x0c)==0):
RAND=random.randint(0,15)
val = ( min(RAND, 15)/15.0001 ) * brightness
frgb = colorsys.hsv_to_rgb(hue, hsv[1], val);
return (int(255*frgb[0]), int(255*frgb[1]), int(255*frgb[2]))
ledData = bytearray(hyperion.ledCount * (0,0,0) )
while not hyperion.abort():
if (candles == "all-together"):
rgb = CandleRgb()
for lednum in candlelist:
ledData[3*lednum+0] = rgb[0]
ledData[3*lednum+1] = rgb[1]
ledData[3*lednum+2] = rgb[2]
elif (candles == "all"):
for lednum in candlelist:
rgb = CandleRgb()
ledData[3*lednum+0] = rgb[0]
ledData[3*lednum+1] = rgb[1]
ledData[3*lednum+2] = rgb[2]
else:
for lednum in candlelist:
rgb = CandleRgb()
ledData[3*lednum+0] = rgb[0]
ledData[3*lednum+1] = rgb[1]
ledData[3*lednum+2] = rgb[2]
hyperion.setColor (ledData)
time.sleep(sleepTime)
| mit | 5,235,317,486,763,778,000 | -5,709,146,918,686,691,000 | 24.901235 | 74 | 0.666349 | false |
agentfog/qiime | tests/test_parallel/test_map_reads_to_reference.py | 15 | 18488 | #!/usr/bin/env python
# File created on 07 Jul 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from glob import glob
from shutil import rmtree
from os import close
from os.path import exists, join
from tempfile import mkstemp, mkdtemp
from skbio.util import remove_files
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from biom import load_table
from qiime.test import initiate_timeout, disable_timeout
from qiime.util import get_qiime_temp_dir
from qiime.parse import parse_otu_map
from qiime.parallel.map_reads_to_reference import (ParallelDatabaseMapperBlat,
ParallelDatabaseMapperUsearch, ParallelDatabaseMapperBwaShort)
class ParallelDatabaseMapperTests(TestCase):
def setUp(self):
""" """
self.files_to_remove = []
self.dirs_to_remove = []
tmp_dir = get_qiime_temp_dir()
self.test_out = mkdtemp(dir=tmp_dir,
prefix='qiime_parallel_tests_',
suffix='')
self.dirs_to_remove.append(self.test_out)
fd, self.refseqs1_fp = mkstemp(dir=self.test_out,
prefix='qiime_refseqs',
suffix='.fasta')
close(fd)
refseqs1_f = open(self.refseqs1_fp, 'w')
refseqs1_f.write(refseqs1)
refseqs1_f.close()
self.files_to_remove.append(self.refseqs1_fp)
fd, self.refseqs2_fp = mkstemp(dir=self.test_out,
prefix='qiime_refseqs',
suffix='.fasta')
close(fd)
refseqs2_f = open(self.refseqs2_fp, 'w')
refseqs2_f.write(refseqs2)
refseqs2_f.close()
self.files_to_remove.append(self.refseqs2_fp)
fd, self.inseqs1_fp = mkstemp(dir=self.test_out,
prefix='qiime_inseqs',
suffix='.fasta')
close(fd)
inseqs1_f = open(self.inseqs1_fp, 'w')
inseqs1_f.write(inseqs1)
inseqs1_f.close()
self.files_to_remove.append(self.inseqs1_fp)
fd, self.inseqs2_fp = mkstemp(dir=self.test_out,
prefix='qiime_inseqs',
suffix='.fasta')
close(fd)
inseqs2_f = open(self.inseqs2_fp, 'w')
inseqs2_f.write(inseqs2)
inseqs2_f.close()
self.files_to_remove.append(self.inseqs2_fp)
initiate_timeout(60)
def tearDown(self):
""" """
disable_timeout()
remove_files(self.files_to_remove)
# remove directories last, so we don't get errors
# trying to remove files which may be in the directories
for d in self.dirs_to_remove:
if exists(d):
rmtree(d)
class ParallelDatabaseMapperUsearchTests(ParallelDatabaseMapperTests):
def test_parallel_database_mapper_usearch(self):
""" parallel_database_mapper_usearch functions as expected """
params = {'refseqs_fp': self.refseqs1_fp,
'min_percent_id': 0.97,
'evalue': 1e-10,
'max_accepts': 1,
'max_rejects': 32,
'queryalnfract': 0.35,
'targetalnfract': 0.0,
'observation_metadata_fp': None
}
app = ParallelDatabaseMapperUsearch()
r = app(self.inseqs1_fp,
self.test_out,
params,
job_prefix='PTEST',
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = glob(
join(self.test_out, 'observation_map.txt'))[0]
omap = parse_otu_map(open(observation_map_fp, 'U'))
self.assertEqual(len(omap[0]), 3)
self.assertItemsEqual(
omap[1],
['eco:b0015',
'eco:b0122',
'eco:b0015:duplicate'])
self.assertItemsEqual(omap[2], ['eco:b0015-pr', 'eco:b0122-pr'])
class ParallelDatabaseMapperBlatTests(ParallelDatabaseMapperTests):
def test_parallel_database_mapper_blat(self):
""" parallel_database_mapper_blat functions as expected """
params = {'refseqs_fp': self.refseqs1_fp,
'min_percent_id': 0.97,
'evalue': 1e-10,
'max_accepts': 1,
'max_rejects': 32,
'queryalnfract': 0.35,
'targetalnfract': 0.0,
'observation_metadata_fp': None
}
app = ParallelDatabaseMapperBlat()
r = app(self.inseqs1_fp,
self.test_out,
params,
job_prefix='PTEST',
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = glob(
join(self.test_out, 'observation_map.txt'))[0]
omap = parse_otu_map(open(observation_map_fp, 'U'))
self.assertEqual(len(omap[0]), 3)
self.assertItemsEqual(
omap[1],
['eco:b0015',
'eco:b0122',
'eco:b0015:duplicate'])
self.assertItemsEqual(omap[2], ['eco:b0015-pr', 'eco:b0122-pr'])
class ParallelDatabaseMapperBwaShortTests(ParallelDatabaseMapperTests):
def test_bwa_short_database_mapper(self):
"""bwa_short_database_mapper functions as expected """
params = {'refseqs_fp': self.refseqs2_fp,
'max_diff': None,
'observation_metadata_fp': None}
app = ParallelDatabaseMapperBwaShort()
r = app(self.inseqs2_fp,
self.test_out,
params,
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = join(self.test_out, 'observation_map.txt')
self.assertTrue(exists(observation_map_fp))
observation_table_fp = join(self.test_out, 'observation_table.biom')
table = load_table(observation_table_fp)
self.assertItemsEqual(table.ids(), ['s2', 's1'])
self.assertItemsEqual(
table.ids(axis='observation'),
['r1',
'r2',
'r3',
'r4',
'r5'])
self.assertEqual(table.sum(), 6)
def test_bwa_short_database_mapper_alt_params(self):
"""bwa_short_database_mapper functions as expected """
params = {'refseqs_fp': self.refseqs2_fp,
'max_diff': 1,
'observation_metadata_fp': None}
app = ParallelDatabaseMapperBwaShort()
r = app(self.inseqs2_fp,
self.test_out,
params,
poll_directly=True,
suppress_submit_jobs=False)
observation_map_fp = join(self.test_out, 'observation_map.txt')
self.assertTrue(exists(observation_map_fp))
observation_table_fp = join(self.test_out, 'observation_table.biom')
table = load_table(observation_table_fp)
self.assertItemsEqual(table.ids(), ['s2', 's1'])
self.assertItemsEqual(table.ids(axis='observation'),
['r2', 'r3', 'r4', 'r5'])
self.assertEqual(table.sum(), 5)
refseqs1 = """>eco:b0001-pr
MKRISTTITTTITITTGNGAG
>eco:b0015-pr dnaJ
MAKQDYYEILGVSKTAEEREIRKAYKRLAMKYHPDRNQGDKEAEAKFKEIKEAYEVLTDS
QKRAAYDQYGHAAFEQGGMGGGGFGGGADFSDIFGDVFGDIFGGGRGRQRAARGADLRYN
MELTLEEAVRGVTKEIRIPTLEECDVCHGSGAKPGTQPQTCPTCHGSGQVQMRQGFFAVQ
QTCPHCQGRGTLIKDPCNKCHGHGRVERSKTLSVKIPAGVDTGDRIRLAGEGEAGEHGAP
AGDLYVQVQVKQHPIFEREGNNLYCEVPINFAMAALGGEIEVPTLDGRVKLKVPGETQTG
KLFRMRGKGVKSVRGGAQGDLLCRVVVETPVGLNERQKQLLQELQESFGGPTGEHNSPRS
KSFFDGVKKFFDDLTR
>eco:b0122-pr
MKTFFRTVLFGSLMAVCANSYALSESEAEDMADLTAVFVFLKNDCGYQNLPNGQIRRALV
FFAQQNQWDLSNYDTFDMKALGEDSYRDLSGIGIPVAKKCKALARDSLSLLAYVK
"""
refseqs2 = """>r1
atgaaacgcattagcaccaccattaccaccaccatcaccattaccacaggtaacggtgcg
ggctga
>r2 some comments...
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgagaa
>r3
atgaagacgtttttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaataa
>r4
atgaagaaaattttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaatcc
>r5 some comments...
aatgactaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
"""
inseqs1 = """>eco:b0001 thrL; thr operon leader peptide; K08278 thr operon leader peptide (N)
atgaaacgcattagcaccaccattaccaccaccatcaccattaccacaggtaacggtgcg
ggctga
>eco:b0015 dnaJ; chaperone Hsp40, co-chaperone with DnaK; K03686 molecular chaperone DnaJ (N)
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
>eco:b0122
atgaagacgtttttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaataa
>eco:b0015:duplicate
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
"""
inseqs2 = """>s1_1
atgttacgcattagcaccaccattaccaccaccatcaccattaccacaggtaacggtgcg
ggctga
>s2_2 some comments...
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgagaa
>s1_3
atgaagacgtttttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaataa
>s1_4
atgaagaaaattttcagaacagtgttattcggcagcctgatggccgtctgcgcaaacagt
tacgcgctcagcgagtctgaagccgaagatatggccgatttaacggcagtttttgtcttt
ctgaagaacgattgtggttaccagaacttacctaacgggcaaattcgtcgcgcactggtc
tttttcgctcagcaaaaccagtgggacctcagtaattacgacaccttcgacatgaaagcc
ctcggtgaagacagctaccgcgatctcagcggcattggcattcccgtcgctaaaaaatgc
aaagccctggcccgcgattccttaagcctgcttgcctacgtcaaatcc
>s1_5
atggctaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
>s1_6 some comments...
aatgactaagcaagattattacgagattttaggcgtttccaaaacagcggaagagcgtgaa
atcagaaaggcctacaaacgcctggccatgaaataccacccggaccgtaaccagggtgac
aaagaggccgaggcgaaatttaaagagatcaaggaagcttatgaagttctgaccgactcg
caaaaacgtgcggcatacgatcagtatggtcatgctgcgtttgagcaaggtggcatgggc
ggcggcggttttggcggcggcgcagacttcagcgatatttttggtgacgttttcggcgat
atttttggcggcggacgtggtcgtcaacgtgcggcgcgcggtgctgatttacgctataac
atggagctcaccctcgaagaagctgtacgtggcgtgaccaaagagatccgcattccgact
ctggaagagtgtgacgtttgccacggtagcggtgcaaaaccaggtacacagccgcagact
tgtccgacctgtcatggttctggtcaggtgcagatgcgccagggattcttcgctgtacag
cagacctgtccacactgtcagggccgcggtacgctgatcaaagatccgtgcaacaaatgt
catggtcatggtcgtgttgagcgcagcaaaacgctgtccgttaaaatcccggcaggggtg
gacactggagaccgcatccgtcttgcgggcgaaggtgaagcgggcgagcatggcgcaccg
gcaggcgatctgtacgttcaggttcaggttaaacagcacccgattttcgagcgtgaaggc
aacaacctgtattgcgaagtcccgatcaacttcgctatggcggcgctgggtggcgaaatc
gaagtaccgacccttgatggtcgcgtcaaactgaaagtgcctggcgaaacccagaccggt
aagctattccgtatgcgcggtaaaggcgtcaagtctgtccgcggtggcgcacagggtgat
ttgctgtgccgcgttgtcgtcgaaacaccggtaggcctgaacgaaaggcagaaacagctg
ctgcaagagctgcaagaaagcttcggtggcccaaccggcgagcacaacagcccgcgctca
aagagcttctttgatggtgtgaagaagttttttgacgacctgacccgctaa
"""
if __name__ == "__main__":
main()
| gpl-2.0 | 9,192,649,743,010,957,000 | 2,210,804,364,992,203,500 | 43.765133 | 93 | 0.799924 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.