repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
Nikea/VisTrails | vistrails/packages/controlflow/__init__.py | 2 | 2025 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
identifier="org.vistrails.vistrails.control_flow"
name="Control Flow"
version="0.2.4"
old_identifiers = ["edu.utah.sci.vistrails.control_flow"]
| bsd-3-clause | -3,632,654,221,447,493,600 | 9,173,614,323,437,692,000 | 50.923077 | 79 | 0.694321 | false |
devttys0/binwalk | src/binwalk/modules/compression.py | 2 | 10003 | # Performs raw decompression of various compression algorithms (currently,
# only deflate).
import os
import zlib
import struct
import binwalk.core.compat
import binwalk.core.common
from binwalk.core.module import Option, Kwarg, Module
try:
import lzma
except ImportError:
from backports import lzma
class LZMAHeader(object):
def __init__(self, **kwargs):
for (k, v) in binwalk.core.compat.iterator(kwargs):
setattr(self, k, v)
class LZMA(object):
DESCRIPTION = "Raw LZMA compression stream"
COMMON_PROPERTIES = [0x5D, 0x6E]
MAX_PROP = ((4 * 5 + 4) * 9 + 8)
BLOCK_SIZE = 32 * 1024
def __init__(self, module):
self.module = module
self.properties = None
self.build_properties()
self.build_dictionaries()
self.build_headers()
# Add an extraction rule
if self.module.extractor.enabled:
self.module.extractor.add_rule(regex='^%s' % self.DESCRIPTION.lower(), extension="7z", cmd=self.extractor)
def extractor(self, file_name):
# Open and read the file containing the raw compressed data.
# This is not terribly efficient, especially for large files...
compressed_data = binwalk.core.common.BlockFile(file_name).read()
# Re-run self.decompress to detect the properties for this compressed
# data (stored in self.properties)
if self.decompress(compressed_data[:self.BLOCK_SIZE]):
# Build an LZMA header on top of the raw compressed data and write it back to disk.
# Header consists of the detected properties values, the largest possible dictionary size,
# and a fake output file size field.
header = chr(self.properties) + \
self.dictionaries[-1] + ("\xFF" * 8)
binwalk.core.common.BlockFile(file_name, "wb").write(header + compressed_data)
# Try to extract it with all the normal lzma extractors until one
# works
for exrule in self.module.extractor.match("lzma compressed data"):
if self.module.extractor.execute(exrule['cmd'], file_name) == True:
break
def build_property(self, pb, lp, lc):
prop = (((pb * 5) + lp) * 9) + lc
if prop > self.MAX_PROP:
return None
return int(prop)
def parse_property(self, prop):
prop = int(ord(prop))
if prop > self.MAX_PROP:
return None
pb = prop / (9 * 5)
prop -= pb * 9 * 5
lp = prop / 9
lc = prop - lp * 9
return (pb, lp, lc)
def parse_header(self, header):
(pb, lp, lc) = self.parse_property(header[0])
dictionary = struct.unpack("<I", binwalk.core.compat.str2bytes(header[1:5]))[0]
return LZMAHeader(pb=pb, lp=lp, lc=lc, dictionary=dictionary)
def build_properties(self):
self.properties = set()
if self.module.partial_scan == True:
# For partial scans, only check the most common properties values
for prop in self.COMMON_PROPERTIES:
self.properties.add(chr(prop))
else:
for pb in range(0, 9):
for lp in range(0, 5):
for lc in range(0, 5):
prop = self.build_property(pb, lp, lc)
if prop is not None:
self.properties.add(chr(prop))
def build_dictionaries(self):
self.dictionaries = []
if self.module.partial_scan == True:
# For partial scans, only use the largest dictionary value
self.dictionaries.append(binwalk.core.compat.bytes2str(struct.pack("<I", 2 ** 25)))
else:
for n in range(16, 26):
self.dictionaries.append(binwalk.core.compat.bytes2str(struct.pack("<I", 2 ** n)))
def build_headers(self):
self.headers = set()
for prop in self.properties:
for dictionary in self.dictionaries:
self.headers.add(prop + dictionary + ("\xFF" * 8))
def decompress(self, data):
result = None
description = None
for header in self.headers:
# The only acceptable exceptions are those indicating that the
# input data was truncated.
try:
final_data = binwalk.core.compat.str2bytes(header + data)
lzma.decompress(final_data)
result = self.parse_header(header)
break
except IOError as e:
# The Python2 module gives this error on truncated input data.
if str(e) == "unknown BUF error":
result = self.parse_header(header)
break
except Exception as e:
# The Python3 module gives this error on truncated input data.
# The inconsistency between modules is a bit worrisome.
if str(e) == "Compressed data ended before the end-of-stream marker was reached":
result = self.parse_header(header)
break
if result is not None:
self.properties = self.build_property(result.pb, result.lp, result.lc)
description = "%s, properties: 0x%.2X [pb: %d, lp: %d, lc: %d], dictionary size: %d" % (self.DESCRIPTION,
self.properties,
result.pb,
result.lp,
result.lc,
result.dictionary)
return description
class Deflate(object):
'''
Finds and extracts raw deflate compression streams.
'''
ENABLED = False
BLOCK_SIZE = 33 * 1024
DESCRIPTION = "Raw deflate compression stream"
def __init__(self, module):
self.module = module
# Add an extraction rule
if self.module.extractor.enabled:
self.module.extractor.add_rule(regex='^%s' % self.DESCRIPTION.lower(), extension="deflate", cmd=self.extractor)
def extractor(self, file_name):
in_data = ""
out_data = ""
retval = False
out_file = os.path.splitext(file_name)[0]
with binwalk.core.common.BlockFile(file_name, 'r') as fp_in:
while True:
(data, dlen) = fp_in.read_block()
if not data or dlen == 0:
break
else:
in_data += data[:dlen]
try:
out_data = zlib.decompress(binwalk.core.compat.str2bytes(in_data), -15)
with binwalk.core.common.BlockFile(out_file, 'w') as fp_out:
fp_out.write(out_data)
retval = True
break
except zlib.error as e:
pass
return retval
def decompress(self, data):
# Looking for either a valid decompression, or an error indicating
# truncated input data
try:
# Negative window size (e.g., -15) indicates that raw decompression
# should be performed
zlib.decompress(binwalk.core.compat.str2bytes(data), -15)
except zlib.error as e:
if not str(e).startswith("Error -5"):
# Bad data.
return None
return self.DESCRIPTION
class RawCompression(Module):
TITLE = 'Raw Compression'
CLI = [
Option(short='X',
long='deflate',
kwargs={'enabled': True, 'scan_for_deflate': True},
description='Scan for raw deflate compression streams'),
Option(short='Z',
long='lzma',
kwargs={'enabled': True, 'scan_for_lzma': True},
description='Scan for raw LZMA compression streams'),
Option(short='P',
long='partial',
kwargs={'partial_scan': True},
description='Perform a superficial, but faster, scan'),
Option(short='S',
long='stop',
kwargs={'stop_on_first_hit': True},
description='Stop after the first result'),
]
KWARGS = [
Kwarg(name='enabled', default=False),
Kwarg(name='partial_scan', default=False),
Kwarg(name='stop_on_first_hit', default=False),
Kwarg(name='scan_for_deflate', default=False),
Kwarg(name='scan_for_lzma', default=False),
]
def init(self):
self.decompressors = []
if self.scan_for_deflate:
self.decompressors.append(Deflate(self))
if self.scan_for_lzma:
self.decompressors.append(LZMA(self))
def run(self):
for fp in iter(self.next_file, None):
file_done = False
self.header()
while not file_done:
(data, dlen) = fp.read_block()
if dlen < 1:
break
for i in range(0, dlen):
for decompressor in self.decompressors:
description = decompressor.decompress(data[i:i + decompressor.BLOCK_SIZE])
if description:
self.result(description=description, file=fp, offset=fp.tell() - dlen + i)
if self.stop_on_first_hit:
file_done = True
break
if file_done:
break
self.status.completed += 1
self.status.completed = fp.tell() - fp.offset
self.footer()
| mit | 2,545,355,115,473,655,300 | 7,638,287,276,281,828,000 | 34.853047 | 123 | 0.523743 | false |
berkerpeksag/pythondotorg | pydotorg/settings/base.py | 1 | 5943 | import os
import dj_database_url
### Basic config
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
DEBUG = TEMPLATE_DEBUG = True
SITE_ID = 1
SECRET_KEY = 'its-a-secret-to-everybody'
# Until Sentry works on Py3, do errors the old-fashioned way.
ADMINS = []
# General project information
# These are available in the template as SITE_INFO.<title>
SITE_VARIABLES = {
'site_name': 'Python.org',
'site_descript': 'The official home of the Python Programming Language',
}
### Databases
DATABASES = {
'default': dj_database_url.config(default='postgres:///python.org')
}
### Locale settings
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
### Files (media and static)
MEDIA_ROOT = os.path.join(BASE, 'media')
MEDIA_URL = '/m/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(BASE, 'static-root')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE, 'static'),
]
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
### Authentication
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SOCIALACCOUNT_EMAIL_REQUIRED = True
SOCIALACCOUNT_EMAIL_VERIFICATION = True
SOCIALACCOUNT_QUERY_EMAIL = True
### Templates
TEMPLATE_DIRS = [
os.path.join(BASE, 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
"django.contrib.messages.context_processors.messages",
"pydotorg.context_processors.site_info",
"pydotorg.context_processors.url_name",
]
### URLs, WSGI, middleware, etc.
ROOT_URLCONF = 'pydotorg.urls'
MIDDLEWARE_CLASSES = (
'pydotorg.middleware.AdminNoCaching',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'pages.middleware.PageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
AUTH_USER_MODEL = 'users.User'
WSGI_APPLICATION = 'pydotorg.wsgi.application'
### Apps
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
'django_comments_xtd',
'jsonfield',
'pipeline',
'sitetree',
'timedelta',
'imagekit',
'haystack',
'honeypot',
'users',
'boxes',
'cms',
'companies',
'feedbacks',
'community',
'jobs',
'pages',
'sponsors',
'successstories',
'events',
'minutes',
'peps',
'blogs',
'downloads',
'codesamples',
'allauth',
'allauth.account',
'allauth.socialaccount',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.twitter',
# Tastypie needs the `users` app to be already loaded.
'tastypie',
]
# Fixtures
FIXTURE_DIRS = (
os.path.join(BASE, 'fixtures'),
)
### Testing
SKIP_NETWORK_TESTS = True
### Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
### Development
DEV_FIXTURE_URL = 'https://www.python.org/m/fixtures/dev-fixtures.json.gz'
### Comments
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 0
COMMENTS_XTD_FORM_CLASS = "jobs.forms.JobCommentForm"
### Honeypot
HONEYPOT_FIELD_NAME = 'email_body_text'
HONEYPOT_VALUE = 'write your message'
### Blog Feed URL
PYTHON_BLOG_FEED_URL = "http://feeds.feedburner.com/PythonInsider"
PYTHON_BLOG_URL = "http://blog.python.org"
### Registration mailing lists
MAILING_LIST_PSF_MEMBERS = "[email protected]"
### PEP Repo Location
PEP_REPO_PATH = ''
### Fastly ###
FASTLY_API_KEY = False # Set to Fastly API key in production to allow pages to
# be purged on save
# Jobs
JOB_THRESHOLD_DAYS = 90
JOB_FROM_EMAIL = '[email protected]'
### Pipeline
from .pipeline import (
PIPELINE_CSS, PIPELINE_JS,
PIPELINE_COMPILERS,
PIPELINE_SASS_BINARY, PIPELINE_SASS_ARGUMENTS,
PIPELINE_CSS_COMPRESSOR, PIPELINE_JS_COMPRESSOR,
)
| apache-2.0 | -3,535,368,240,454,716,400 | -7,402,436,853,491,285,000 | 23.557851 | 79 | 0.676931 | false |
soravux/deap | examples/es/cma_bipop.py | 11 | 8270 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the BI-Population CMA-ES algorithm. As presented in
*Hansen, 2009, Benchmarking a BI-Population CMA-ES on the BBOB-2009 Function
Testbed* with the exception of the modifications to the original CMA-ES
parameters mentionned at the end of section 2's first paragraph.
"""
from collections import deque
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools
# Problem size
N = 30
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
def main(verbose=True):
NRESTARTS = 10 # Initialization + 9 I-POP restarts
SIGMA0 = 2.0 # 1/5th of the domain [-5 5]
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
halloffame = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbooks = list()
nsmallpopruns = 0
smallbudget = list()
largebudget = list()
lambda0 = 4 + int(3 * numpy.log(N))
regime = 1
i = 0
while i < (NRESTARTS + nsmallpopruns):
# The first regime is enforced on the first and last restart
# The second regime is run if its allocated budget is smaller than the allocated
# large population regime budget
if i > 0 and i < (NRESTARTS + nsmallpopruns) - 1 and sum(smallbudget) < sum(largebudget):
lambda_ = int(lambda0 * (0.5 * (2**(i - nsmallpopruns) * lambda0) / lambda0)**(numpy.random.rand()**2))
sigma = 2 * 10**(-2 * numpy.random.rand())
nsmallpopruns += 1
regime = 2
smallbudget += [0]
else:
lambda_ = 2**(i - nsmallpopruns) * lambda0
sigma = SIGMA0
regime = 1
largebudget += [0]
t = 0
# Set the termination criterion constants
if regime == 1:
MAXITER = 100 + 50 * (N + 3)**2 / numpy.sqrt(lambda_)
elif regime == 2:
MAXITER = 0.5 * largebudget[-1] / lambda_
TOLHISTFUN = 10**-12
TOLHISTFUN_ITER = 10 + int(numpy.ceil(30. * N / lambda_))
EQUALFUNVALS = 1. / 3.
EQUALFUNVALS_K = int(numpy.ceil(0.1 + lambda_ / 4.))
TOLX = 10**-12
TOLUPSIGMA = 10**20
CONDITIONCOV = 10**14
STAGNATION_ITER = int(numpy.ceil(0.2 * t + 120 + 30. * N / lambda_))
NOEFFECTAXIS_INDEX = t % N
equalfunvalues = list()
bestvalues = list()
medianvalues = list()
mins = deque(maxlen=TOLHISTFUN_ITER)
# We start with a centroid in [-4, 4]**D
strategy = cma.Strategy(centroid=numpy.random.uniform(-4, 4, N), sigma=sigma, lambda_=lambda_)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
logbooks.append(tools.Logbook())
logbooks[-1].header = "gen", "evals", "restart", "regime", "std", "min", "avg", "max"
conditions = {"MaxIter" : False, "TolHistFun" : False, "EqualFunVals" : False,
"TolX" : False, "TolUpSigma" : False, "Stagnation" : False,
"ConditionCov" : False, "NoEffectAxis" : False, "NoEffectCoor" : False}
# Run the current regime until one of the following is true:
## Note that the algorithm won't stop by itself on the optimum (0.0 on rastrigin).
while not any(conditions.values()):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
halloffame.update(population)
record = stats.compile(population)
logbooks[-1].record(gen=t, evals=lambda_, restart=i, regime=regime, **record)
if verbose:
print(logbooks[-1].stream)
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Count the number of times the k'th best solution is equal to the best solution
# At this point the population is sorted (method update)
if population[-1].fitness == population[-EQUALFUNVALS_K].fitness:
equalfunvalues.append(1)
# Log the best and median value of this population
bestvalues.append(population[-1].fitness.values)
medianvalues.append(population[int(round(len(population)/2.))].fitness.values)
# First run does not count into the budget
if regime == 1 and i > 0:
largebudget[-1] += lambda_
elif regime == 2:
smallbudget[-1] += lambda_
t += 1
STAGNATION_ITER = int(numpy.ceil(0.2 * t + 120 + 30. * N / lambda_))
NOEFFECTAXIS_INDEX = t % N
if t >= MAXITER:
# The maximum number of iteration per CMA-ES ran
conditions["MaxIter"] = True
mins.append(record["min"])
if (len(mins) == mins.maxlen) and max(mins) - min(mins) < TOLHISTFUN:
# The range of the best values is smaller than the threshold
conditions["TolHistFun"] = True
if t > N and sum(equalfunvalues[-N:]) / float(N) > EQUALFUNVALS:
# In 1/3rd of the last N iterations the best and k'th best solutions are equal
conditions["EqualFunVals"] = True
if all(strategy.pc < TOLX) and all(numpy.sqrt(numpy.diag(strategy.C)) < TOLX):
# All components of pc and sqrt(diag(C)) are smaller than the threshold
conditions["TolX"] = True
if strategy.sigma / sigma > strategy.diagD[-1]**2 * TOLUPSIGMA:
# The sigma ratio is bigger than a threshold
conditions["TolUpSigma"] = True
if len(bestvalues) > STAGNATION_ITER and len(medianvalues) > STAGNATION_ITER and \
numpy.median(bestvalues[-20:]) >= numpy.median(bestvalues[-STAGNATION_ITER:-STAGNATION_ITER + 20]) and \
numpy.median(medianvalues[-20:]) >= numpy.median(medianvalues[-STAGNATION_ITER:-STAGNATION_ITER + 20]):
# Stagnation occured
conditions["Stagnation"] = True
if strategy.cond > 10**14:
# The condition number is bigger than a threshold
conditions["ConditionCov"] = True
if all(strategy.centroid == strategy.centroid + 0.1 * strategy.sigma * strategy.diagD[-NOEFFECTAXIS_INDEX] * strategy.B[-NOEFFECTAXIS_INDEX]):
# The coordinate axis std is too low
conditions["NoEffectAxis"] = True
if any(strategy.centroid == strategy.centroid + 0.2 * strategy.sigma * numpy.diag(strategy.C)):
# The main axis std has no effect
conditions["NoEffectCoor"] = True
stop_causes = [k for k, v in conditions.items() if v]
print("Stopped because of condition%s %s" % ((":" if len(stop_causes) == 1 else "s:"), ",".join(stop_causes)))
i += 1
return halloffame
if __name__ == "__main__":
main()
| lgpl-3.0 | -1,061,889,497,614,883,000 | -5,049,815,129,921,607,000 | 40.979695 | 154 | 0.594196 | false |
neuroidss/nupic | src/nupic/datafiles/extra/gym/raw/makeDataset.py | 27 | 8637 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2010-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unify the various Gym CSV files to a single coherent CSV file
The Gym dataset has two file types:
1. Hourly attendance data per gym
2. KW consumption in 15 minutes intervals
The createDataset() function merges the two file types and creates
a single CSV file with hourly data. Each record contains the following fields:
Gym name, Date, Hour, # Atendees, KW consumption
"""
import os
import sys
import fileinput
import glob
import operator
import datetime
from nupic.data.file import File
months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
class Record(object):
def __init__(self):
self.club = ''
self.date = None
self.time = 0
self.KW = 0
self.attendeeCount = 0
self.consumption = 0
class Club(object):
def __init__(self, name):
self.name = name
self.records = {}
def processAttendance(self, f):
# Skip first two
line = f.next()
assert line == ',,,,,,,,,,,,,,,,,,,\n'
line = f.next()
assert line == 'Date Of Swipe, < 6 am,6-7 am,7-8 am,8-9 am,9-10 am,10-11 am,11-12 am,12-1 pm,1-2 pm,2-3 pm,3-4 pm,4-5 pm,5-6 pm,6-7 pm,7-8 pm,8-9 pm,9-10 pm,> 10 pm,Totals\n'
for i, line in enumerate(f):
# Check weather we're done with this club
if line == ',,,,,,,,,,,,,,,,,,,\n':
# skip next two lines
line = f.next()
assert line.startswith('Club Totals:')
line = f.next()
assert line == ',,,,,,,,,,,,,,,,,,,\n'
return
else:
self.addRecord(line)
def addRecord(self, line):
fields = line.split(',')
assert len(fields) == 20
date = fields[0].split('-')
# Convert day to 'dd'
dd = int(date[0])
mm = months.index(date[1]) + 1
assert mm in (9, 10)
# Convert year from 'yy' to 'yyyy'
yyyy = 2000 + int(date[2])
date = (yyyy, mm, dd)
# Add 0 for hours without attendants (<12AM-4AM and 11PM)
attendance = [0] * 5 + fields[1:19] + [0]
assert len(attendance) == 24
# Create a record for each hour of the day.
for i, a in enumerate(attendance):
r = Record()
r.club = self.name
r.timestamp = datetime.datetime(yyyy, mm, dd, i)
#r.time = i
r.attendeeCount = a
self.records[(date, i)] = r
def updateRecord(self, date, t, consumption):
# Get rid of time and AM/PM if needed
date = date.split()[0]
# Convert to (yyyy, mmm, dd)
date = date.split('/')
# Convert day to 'dd'
dd = int(date[0])
# Convert month index to month name
mm = int(date[1])
yyyy = int(date[2])
# Locate record
key = ((yyyy, mm, dd), t)
if not key in self.records:
print self.name, 'is missing attendance data for', key
else:
r = self.records[key]
r.consumption = consumption
def processClubAttendance(f, clubs):
"""Process the attendance data of one club
If the club already exists in the list update its data.
If the club is new create a new Club object and add it to the dict
The next step is to iterate over all the lines and add a record for each line.
When reaching an empty line it means there are no more records for this club.
Along the way some redundant lines are skipped. When the file ends the f.next()
call raises a StopIteration exception and that's the sign to return False,
which indicates to the caller that there are no more clubs to process.
"""
try:
# Skip as many empty lines as necessary (file format inconsistent)
line = f.next()
while line == ',,,,,,,,,,,,,,,,,,,\n':
line = f.next()
# The first non-empty line should have the name as the first field
name = line.split(',')[0]
# Create a new club object if needed
if name not in clubs:
clubs[name] = Club(name)
# Get the named club
c = clubs[name]
c.processAttendance(f)
return True
except StopIteration:
return False
def processClubConsumption(f, clubs):
"""Process the consumption a club
- Skip the header line
- Iterate over lines
- Read 4 records at a time
- Parse each line: club, date, time, consumption
- Get club object from dictionary if needed
- Aggregate consumption
- Call club.processConsumption() with data
"""
try:
# Skip header line
line = f.next()
assert line.endswith('" ","SITE_LOCATION_NAME","TIMESTAMP","TOTAL_KWH"\n')
valid_times = range(24)
t = 0 # used to track time
club = None
clubName = None
lastDate = None
while True:
assert t in valid_times
consumption = 0
for x in range(4):
# Read the line and get rid of the newline character
line = f.next()[:-1]
fields = line.split(',')
assert len(fields) == 4
for i, field in enumerate(fields):
# Strip the redundant double quotes
assert field[0] == '"' and field[-1] == '"'
fields[i] = field[1:-1]
# Ignoring field 0, which is just a running count
# Get the club name
name = fields[1]
# Hack to fix inconsistent club names like: "Melbourne CBD - Melbourne Central" vs. "Melbourne Central"
partialNames = ('Melbourne Central', 'North Sydney', 'Park St', 'Pitt St')
for pn in partialNames:
if pn in name:
name = pn
# Locate the club if needed (maybe )
if name != clubName:
clubName = name
club = clubs[name]
# Split the date (time is counted using the t variable)
tokens = fields[2].split()
# Verify that t == 0 and consumption == 0 when there is no time in the file
if len(tokens) == 1:
assert consumption == 0 and t == 0
# The first (and sometimes only) token is the date
date = tokens[0]
# Aggregate the consumption
consumption += float(fields[3])
# Update the Club object after aggregating the consumption of 4 lines
club.updateRecord(date, t, consumption)
# Increment time
t += 1
t %= 24
except StopIteration:
return
def processAttendanceFiles():
files = glob.glob('Attendance*.csv')
f = fileinput.input(files=files)
# Process the input files and create a dictionary of Club objects
clubs = {}
while processClubAttendance(f, clubs):
pass
return clubs
def processConsumptionFiles(clubs):
"""
"""
files = glob.glob('all_group*detail.csv')
f = fileinput.input(files=files)
# Process the input files and create a dictionary of Club objects
while processClubConsumption(f, clubs):
pass
return clubs
def makeDataset():
"""
"""
clubs = processAttendanceFiles()
clubs = processConsumptionFiles(clubs)
fields = [('gym', 'string', 'S'),
('timestamp', 'datetime', 'T'),
('attendeeCount', 'int', ''),
('consumption', 'float', ''),
]
with File('gym.csv', fields) as f:
## write header
#f.write('Gym Name,Date,Time,Attendee Count,Consumption (KWH)\n')
for c in clubs.values():
for k, r in sorted(c.records.iteritems(), key=operator.itemgetter(0)):
#dd = r.date[2]
#mm = r.date[1]
#yyyy = r.date[0]
#line = ','.join(str(x) for x in
# (c.name, '%d-%s-%d' % (dd, mmm, yyyy), r.time, r.attendeeCount, r.consumption))
#f.write(line + '\n')
f.write([r.club, r.timestamp, r.attendeeCount, r.consumption])
if __name__=='__main__':
makeDataset()
print 'Done.'
| agpl-3.0 | -7,814,946,136,597,747,000 | 5,351,792,113,991,699,000 | 29.519435 | 178 | 0.596388 | false |
abhik/pebl | src/pebl/learner/simanneal.py | 4 | 4032 | """Classes and functions for Simulated Annealing learner"""
from math import exp
import random
from pebl import network, result, evaluator, config
from pebl.learner.base import *
class SALearnerStatistics:
def __init__(self, starting_temp, delta_temp, max_iterations_at_temp):
self.temp = starting_temp
self.iterations_at_temp = 0
self.max_iterations_at_temp = max_iterations_at_temp
self.delta_temp = delta_temp
self.iterations = 0
self.best_score = 0
self.current_score = 0
def update(self):
self.iterations += 1
self.iterations_at_temp += 1
if self.iterations_at_temp >= self.max_iterations_at_temp:
self.temp *= self.delta_temp
self.iterations_at_temp = 0
class SimulatedAnnealingLearner(Learner):
#
# Parameters
#
_params = (
config.FloatParameter(
'simanneal.start_temp',
"Starting temperature for a run.",
config.atleast(0.0),
default=100.0
),
config.FloatParameter(
'simanneal.delta_temp',
'Change in temp between steps.',
config.atleast(0.0),
default=0.5
),
config.IntParameter(
'simanneal.max_iters_at_temp',
'Max iterations at any temperature.',
config.atleast(0),
default=100
),
config.StringParameter(
'simanneal.seed',
'Starting network for a greedy search.',
default=''
)
)
def __init__(self, data_=None, prior_=None, **options):
"""Create a Simulated Aneaaling learner.
For more information about Simulated Annealing algorithms, consult:
1. http://en.wikipedia.org/wiki/Simulated_annealing
2. D. Heckerman. A Tutorial on Learning with Bayesian Networks.
Microsoft Technical Report MSR-TR-95-06, 1995. p.35-36.
Any config param for 'simanneal' can be passed in via options.
Use just the option part of the parameter name.
"""
super(SimulatedAnnealingLearner,self).__init__(data_, prior_)
config.setparams(self, options)
if not isinstance(self.seed, network.Network):
self.seed = network.Network(self.data.variables, self.seed)
def run(self):
"""Run the learner."""
self.stats = SALearnerStatistics(self.start_temp, self.delta_temp,
self.max_iters_at_temp)
self.result = result.LearnerResult(self)
self.evaluator = evaluator.fromconfig(self.data, self.seed, self.prior)
self.evaluator.score_network(self.seed.copy())
self.result.start_run()
curscore = self.evaluator.score_network()
# temperature decays exponentially, so we'll never get to 0.
# So, we continue until temp < 1
while self.stats.temp >= 1:
try:
newscore = self._alter_network_randomly_and_score()
except CannotAlterNetworkException:
return
self.result.add_network(self.evaluator.network, newscore)
if self._accept(newscore):
# set current score
self.stats.current_score = newscore
if self.stats.current_score > self.stats.best_score:
self.stats.best_score = self.stats.current_score
else:
# undo network alteration
self.evaluator.restore_network()
# temp not updated EVERY iteration. just whenever criteria met.
self.stats.update()
self.result.stop_run()
return self.result
def _accept(self, newscore):
oldscore = self.stats.current_score
if newscore >= oldscore:
return True
elif random.random() < exp((newscore - oldscore)/self.stats.temp):
return True
else:
return False
| mit | 1,424,287,944,278,118,400 | -6,370,497,727,220,143,000 | 31.256 | 79 | 0.580109 | false |
drawks/ansible | lib/ansible/modules/cloud/google/gcp_redis_instance_facts.py | 12 | 7557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_redis_instance_facts
description:
- Gather facts for GCP Instance
short_description: Gather facts for GCP Instance
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
region:
description:
- The name of the Redis region of the instance.
required: true
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a instance facts"
gcp_redis_instance_facts:
region: us-central1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
alternativeLocationId:
description:
- Only applicable to STANDARD_HA tier which protects the instance against zonal
failures by provisioning it across two zones.
- If provided, it must be a different zone from the one provided in [locationId].
returned: success
type: str
authorizedNetwork:
description:
- The full name of the Google Compute Engine network to which the instance is
connected. If left unspecified, the default network will be used.
returned: success
type: str
createTime:
description:
- The time the instance was created in RFC3339 UTC "Zulu" format, accurate to
nanoseconds.
returned: success
type: str
currentLocationId:
description:
- The current zone where the Redis endpoint is placed.
- For Basic Tier instances, this will always be the same as the [locationId]
provided by the user at creation time. For Standard Tier instances, this can
be either [locationId] or [alternativeLocationId] and can change after a failover
event.
returned: success
type: str
displayName:
description:
- An arbitrary and optional user-provided name for the instance.
returned: success
type: str
host:
description:
- Hostname or IP address of the exposed Redis endpoint used by clients to connect
to the service.
returned: success
type: str
labels:
description:
- Resource labels to represent user provided metadata.
returned: success
type: dict
redisConfigs:
description:
- Redis configuration parameters, according to U(http://redis.io/topics/config.)
- 'Please check Memorystore documentation for the list of supported parameters:
U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs)
.'
returned: success
type: dict
locationId:
description:
- The zone where the instance will be provisioned. If not provided, the service
will choose a zone for the instance. For STANDARD_HA tier, instances will
be created across two zones for protection against zonal failures. If [alternativeLocationId]
is also provided, it must be different from [locationId].
returned: success
type: str
name:
description:
- The ID of the instance or a fully qualified identifier for the instance. .
returned: success
type: str
memorySizeGb:
description:
- Redis memory size in GiB.
returned: success
type: int
port:
description:
- The port number of the exposed Redis endpoint.
returned: success
type: int
redisVersion:
description:
- The version of Redis software. If not provided, latest supported version will
be used. Updating the version will perform an upgrade/downgrade to the new
version. Currently, the supported values are REDIS_3_2 for Redis 3.2.
returned: success
type: str
reservedIpRange:
description:
- The CIDR range of internal addresses that are reserved for this instance.
If not provided, the service will choose an unused /29 block, for example,
10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with
existing subnets in an authorized network.
returned: success
type: str
tier:
description:
- 'The service tier of the instance. Must be one of these values: - BASIC: standalone
instance - STANDARD_HA: highly available primary/replica instances .'
returned: success
type: str
region:
description:
- The name of the Redis region of the instance.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(region=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('instances'):
items = items.get('instances')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'redis')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | 7,400,078,695,531,821,000 | 5,048,593,995,158,715,000 | 32.290749 | 135 | 0.60659 | false |
TangXT/edx-platform | common/djangoapps/user_api/models.py | 24 | 1900 | from django.contrib.auth.models import User
from django.core.validators import RegexValidator
from django.db import models
from xmodule_django.models import CourseKeyField
class UserPreference(models.Model):
"""A user's preference, stored as generic text to be processed by client"""
KEY_REGEX = r"[-_a-zA-Z0-9]+"
user = models.ForeignKey(User, db_index=True, related_name="preferences")
key = models.CharField(max_length=255, db_index=True, validators=[RegexValidator(KEY_REGEX)])
value = models.TextField()
class Meta: # pylint: disable=missing-docstring
unique_together = ("user", "key")
@classmethod
def set_preference(cls, user, preference_key, preference_value):
"""
Sets the user preference for a given key
"""
user_pref, _ = cls.objects.get_or_create(user=user, key=preference_key)
user_pref.value = preference_value
user_pref.save()
@classmethod
def get_preference(cls, user, preference_key, default=None):
"""
Gets the user preference value for a given key
Returns the given default if there isn't a preference for the given key
"""
try:
user_pref = cls.objects.get(user=user, key=preference_key)
return user_pref.value
except cls.DoesNotExist:
return default
class UserCourseTag(models.Model):
"""
Per-course user tags, to be used by various things that want to store tags about
the user. Added initially to store assignment to experimental groups.
"""
user = models.ForeignKey(User, db_index=True, related_name="+")
key = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
value = models.TextField()
class Meta: # pylint: disable=missing-docstring
unique_together = ("user", "course_id", "key")
| agpl-3.0 | 6,174,552,057,135,879,000 | -1,767,329,753,187,479,600 | 34.849057 | 97 | 0.668947 | false |
tastynoodle/django | tests/i18n/contenttypes/tests.py | 9 | 1181 | # coding: utf-8
from __future__ import unicode_literals
import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import override_settings
from django.utils._os import upath
from django.utils import six
from django.utils import translation
from i18n import TransRealMixin
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
LANGUAGE_CODE='en',
LANGUAGES=(
('en', 'English'),
('fr', 'French'),
),
)
class ContentTypeTests(TransRealMixin, TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(six.text_type(company_type), 'Company')
with translation.override('fr'):
self.assertEqual(six.text_type(company_type), 'Société')
def test_field_override(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
company_type.name = 'Other'
self.assertEqual(six.text_type(company_type), 'Other')
| bsd-3-clause | 642,158,705,766,330,600 | -5,695,726,863,433,587,000 | 30.026316 | 81 | 0.677693 | false |
snowflakedb/snowflake-connector-python | src/snowflake/connector/vendored/urllib3/util/__init__.py | 27 | 1155 | from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
from .response import is_fp_closed
from .retry import Retry
from .ssl_ import (
ALPN_PROTOCOLS,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
PROTOCOL_TLS,
SSLContext,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import Timeout, current_time
from .url import Url, get_host, parse_url, split_first
from .wait import wait_for_read, wait_for_write
__all__ = (
"HAS_SNI",
"IS_PYOPENSSL",
"IS_SECURETRANSPORT",
"SSLContext",
"PROTOCOL_TLS",
"ALPN_PROTOCOLS",
"Retry",
"Timeout",
"Url",
"assert_fingerprint",
"current_time",
"is_connection_dropped",
"is_fp_closed",
"get_host",
"parse_url",
"make_headers",
"resolve_cert_reqs",
"resolve_ssl_version",
"split_first",
"ssl_wrap_socket",
"wait_for_read",
"wait_for_write",
"SKIP_HEADER",
"SKIPPABLE_HEADERS",
)
| apache-2.0 | -7,145,180,343,750,009,000 | 7,710,897,414,642,870,000 | 22.571429 | 68 | 0.65368 | false |
stevegt/UltimakerUtils | leveling-rings-UM1.py | 1 | 2681 | #!/usr/bin/python
# Derived from the UM2 version by an anonymous contributor...
#
# http://umforum.ultimaker.com/index.php?/topic/5951-um2-calibration-utility-leveling-ringsgcode/?p=54694
#
# ...who wisely says: "I accept NO liability for any damage done by
# using either version or any derivatives. USE AT YOUR OWN RISK."
filament_diameter = 2.89
build_area_width = 205.0
build_area_depth = 205.0
rings = 10
wide = 0.4
thick = 0.2925 / 2
temperature = 230
bed_temperature = 60
base_dia = 180
pi=3.1415927
center_x = build_area_width/2.0
center_y = build_area_depth/2.0
filament_area = (filament_diameter / 2) ** 2 * pi
head = '''
M107 ;start with the fan off
G21 ;metric values
G90 ;absolute positioning
M82 ;set extruder to absolute mode
M107 ;start with the fan off
G28 X0 Y0 ;move X/Y to min endstops
G28 Z0 ;move Z to min endstops
G1 Z15.0 F9000 ;move the platform down 15mm
M140 S{bed_temperature:.2f} ;set bed temp (no wait)
M109 T0 S{temperature:.2f} ;set extruder temp (wait)
M190 S{bed_temperature:.2f} ;set bed temp (wait)
G92 E0 ;zero the extruded length
G1 F200 E3 ;extrude 3mm of feed stock
G92 E0 ;zero the extruded length again
G1 F9000 ;set speed to 9000
;Put printing message on LCD screen
M117 Printing...
;Layer count: 1
;LAYER:0
'''
loop = '''
G0 F9000 X{x:.2f} Y{y:.2f} Z{z:.2f}
G2 F1000 X{x:.2f} Y{y:.2f} I{r:.2f} E{total_mm3:.2f}'''
tail = '''
;End GCode
M104 S0 ;extruder heater off
M140 S0 ;heated bed heater off (if you have it)
G91 ;relative positioning
G1 E-1 F300 ;retract the filament a bit before lifting the nozzle, to release some of the pressure
G1 Z+0.5 E-5 X-20 Y-20 F9000 ;move Z up a bit and retract filament even more
G28 X0 Y0 ;move X/Y to min endstops, so the head is out of the way
M84 ;steppers off
G90 ;absolute positioning'''
total_mm3 = 0
body = ''
cross_section = thick * wide
z = thick
for i in range(rings):
dia = base_dia - ((wide * 2) * i)
circumference = pi * dia
r = dia/2.0;
x = center_x - r
y = center_y
mm3 = (circumference * cross_section) / filament_area
total_mm3 += mm3
body += loop.format(**vars())
print head.format(**vars())
print body
print tail.format(**vars())
| gpl-2.0 | 5,813,475,626,212,953,000 | 5,960,020,954,447,720,000 | 30.174419 | 118 | 0.564715 | false |
biswajitsahu/kuma | vendor/packages/git/diff.py | 32 | 2669 | # diff.py
# Copyright (C) 2008-2010 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import re
import commit
class Diff(object):
"""
A Diff contains diff information between two commits.
"""
def __init__(self, repo, a_path, b_path, a_commit, b_commit, a_mode,
b_mode, new_file, deleted_file, rename_from,
rename_to, diff):
self.repo = repo
self.a_path = a_path
self.b_path = b_path
if not a_commit or re.search(r'^0{40}$', a_commit):
self.a_commit = None
else:
self.a_commit = commit.Commit(repo, id=a_commit)
if not b_commit or re.search(r'^0{40}$', b_commit):
self.b_commit = None
else:
self.b_commit = commit.Commit(repo, id=b_commit)
self.a_mode = a_mode
self.b_mode = b_mode
self.new_file = new_file
self.deleted_file = deleted_file
self.rename_from = rename_from
self.rename_to = rename_to
self.renamed = rename_from != rename_to
self.diff = diff
@classmethod
def list_from_string(cls, repo, text):
diffs = []
diff_header = re.compile(r"""
#^diff[ ]--git
[ ]a/(?P<a_path>\S+)[ ]b/(?P<b_path>\S+)\n
(?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
^rename[ ]from[ ](?P<rename_from>\S+)\n
^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
(?:^old[ ]mode[ ](?P<old_mode>\d+)\n
^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
(?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
(?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
(?:^index[ ](?P<a_commit>[0-9A-Fa-f]+)
\.\.(?P<b_commit>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
""", re.VERBOSE | re.MULTILINE).match
for diff in ('\n' + text).split('\ndiff --git')[1:]:
header = diff_header(diff)
a_path, b_path, similarity_index, rename_from, rename_to, \
old_mode, new_mode, new_file_mode, deleted_file_mode, \
a_commit, b_commit, b_mode = header.groups()
new_file, deleted_file = bool(new_file_mode), bool(deleted_file_mode)
diffs.append(Diff(repo, a_path, b_path, a_commit, b_commit,
old_mode or deleted_file_mode, new_mode or new_file_mode or b_mode,
new_file, deleted_file, rename_from, rename_to, diff[header.end():]))
return diffs
| mpl-2.0 | -483,790,432,639,565,440 | -4,642,493,233,851,116,000 | 36.591549 | 85 | 0.521544 | false |
galtys/odoo | setup.py | 37 | 5624 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
from setuptools import find_packages, setup
from os.path import join, dirname
execfile(join(dirname(__file__), 'openerp', 'release.py')) # Load release variables
lib_name = 'openerp'
def py2exe_datafiles():
data_files = {}
data_files['Microsoft.VC90.CRT'] = glob('C:\Microsoft.VC90.CRT\*.*')
for root, dirnames, filenames in os.walk('openerp'):
for filename in filenames:
if not re.match(r'.*(\.pyc|\.pyo|\~)$', filename):
data_files.setdefault(root, []).append(join(root, filename))
import babel
data_files['babel/localedata'] = glob(join(dirname(babel.__file__), 'localedata', '*'))
others = ['global.dat', 'numbers.py', 'support.py', 'plural.py']
data_files['babel'] = map(lambda f: join(dirname(babel.__file__), f), others)
others = ['frontend.py', 'mofile.py']
data_files['babel/messages'] = map(lambda f: join(dirname(babel.__file__), 'messages', f), others)
import pytz
tzdir = dirname(pytz.__file__)
for root, _, filenames in os.walk(join(tzdir, 'zoneinfo')):
base = join('pytz', root[len(tzdir) + 1:])
data_files[base] = [join(root, f) for f in filenames]
import docutils
dudir = dirname(docutils.__file__)
for root, _, filenames in os.walk(dudir):
base = join('docutils', root[len(dudir) + 1:])
data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
import passlib
pl = dirname(passlib.__file__)
for root, _, filenames in os.walk(pl):
base = join('passlib', root[len(pl) + 1:])
data_files[base] = [join(root, f) for f in filenames if not f.endswith(('.py', '.pyc', '.pyo'))]
return data_files.items()
def py2exe_options():
if os.name == 'nt':
import py2exe
return {
'console': [
{'script': 'odoo.py'},
{'script': 'openerp-gevent'},
{'script': 'openerp-server', 'icon_resources': [
(1, join('setup', 'win32', 'static', 'pixmaps', 'openerp-icon.ico'))
]},
],
'options': {
'py2exe': {
'skip_archive': 1,
'optimize': 0, # Keep the assert running as the integrated tests rely on them.
'dist_dir': 'dist',
'packages': [
'asynchat', 'asyncore',
'commands',
'dateutil',
'decimal',
'decorator',
'docutils',
'email',
'encodings',
'HTMLParser',
'imaplib',
'jinja2',
'lxml', 'lxml._elementpath', 'lxml.builder', 'lxml.etree', 'lxml.objectify',
'mako',
'markupsafe',
'mock',
'openerp',
'openid',
'passlib',
'PIL',
'poplib',
'psutil',
'pychart',
'pydot',
'pyparsing',
'pyPdf',
'pytz',
'reportlab',
'requests',
'select',
'simplejson',
'smtplib',
'uuid',
'vatnumber',
'vobject',
'win32service', 'win32serviceutil',
'xlwt',
'xml', 'xml.dom',
'yaml',
],
'excludes': ['Tkconstants', 'Tkinter', 'tcl'],
}
},
'data_files': py2exe_datafiles()
}
else:
return {}
setup(
name='odoo',
version=version,
description=description,
long_description=long_desc,
url=url,
author=author,
author_email=author_email,
classifiers=filter(None, classifiers.split('\n')),
license=license,
scripts=['openerp-server', 'openerp-gevent', 'odoo.py'],
packages=find_packages(),
package_dir={'%s' % lib_name: 'openerp'},
include_package_data=True,
install_requires=[
'babel >= 1.0',
'decorator',
'docutils',
'feedparser',
'gevent',
'Jinja2',
'lxml', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'mako',
'mock',
'passlib',
'pillow', # windows binary http://www.lfd.uci.edu/~gohlke/pythonlibs/
'psutil', # windows binary code.google.com/p/psutil/downloads/list
'psycogreen',
'psycopg2 >= 2.2',
'python-chart',
'pydot',
'pyparsing',
'pypdf',
'pyserial',
'python-dateutil',
'python-ldap', # optional
'python-openid',
'pytz',
'pyusb >= 1.0.0b1',
'pyyaml',
'qrcode',
'reportlab', # windows binary pypi.python.org/pypi/reportlab
'requests',
'simplejson',
'unittest2',
'vatnumber',
'vobject',
'werkzeug',
'xlwt',
],
extras_require={
'SSL': ['pyopenssl'],
},
tests_require=[
'unittest2',
'mock',
],
**py2exe_options()
)
| agpl-3.0 | -969,763,603,394,796,800 | -885,798,854,168,200,800 | 31.137143 | 104 | 0.45377 | false |
leviroth/praw | praw/models/base.py | 6 | 1256 | """Provide the PRAWBase superclass."""
from copy import deepcopy
class PRAWBase(object):
"""Superclass for all models in PRAW."""
@staticmethod
def _safely_add_arguments(argument_dict, key, **new_arguments):
"""Replace argument_dict[key] with a deepcopy and update.
This method is often called when new parameters need to be added to a
request. By calling this method and adding the new or updated
parameters we can insure we don't modify the dictionary passed in by
the caller.
"""
value = deepcopy(argument_dict[key]) if key in argument_dict else {}
value.update(new_arguments)
argument_dict[key] = value
@classmethod
def parse(cls, data, reddit):
"""Return an instance of ``cls`` from ``data``.
:param data: The structured data.
:param reddit: An instance of :class:`.Reddit`.
"""
return cls(reddit, _data=data)
def __init__(self, reddit, _data):
"""Initialize a PRAWModel instance.
:param reddit: An instance of :class:`.Reddit`.
"""
self._reddit = reddit
if _data:
for attribute, value in _data.items():
setattr(self, attribute, value)
| bsd-2-clause | 6,100,719,251,948,082,000 | -8,156,846,096,876,407,000 | 29.634146 | 77 | 0.611465 | false |
KosiehBarter/anaconda | pyanaconda/pwpolicy.py | 10 | 4777 | #
# Brian C. Lane <[email protected]>
#
# Copyright 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
import warnings
from pyanaconda.i18n import _
class F22_PwPolicyData(BaseData):
""" Kickstart Data object to hold information about pwpolicy. """
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get("name", "")
self.minlen = kwargs.get("minlen", 8)
self.minquality = kwargs.get("minquality", 50)
self.strict = kwargs.get("strict", True)
self.changesok = kwargs.get("changesok", False)
self.emptyok = kwargs.get("emptyok", True)
def __eq__(self, y):
if not y:
return False
return self.name == y.name
def __ne__(self, y):
return not self == y
def __str__(self):
retval = BaseData.__str__(self)
if self.name != "":
retval += "pwpolicy"
retval += self._getArgsAsStr() + "\n"
return retval
def _getArgsAsStr(self):
retval = ""
retval += " %s" % self.name
retval += " --minlen=%d" % self.minlen
retval += " --minquality=%d" % self.minquality
if self.strict:
retval += " --strict"
else:
retval += " --notstrict"
if self.changesok:
retval += " --changesok"
else:
retval += " --nochanges"
if self.emptyok:
retval += " --emptyok"
else:
retval += " --notempty"
return retval
class F22_PwPolicy(KickstartCommand):
""" Kickstart command implementing password policy. """
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.policyList = kwargs.get("policyList", [])
def __str__(self):
retval = ""
for policy in self.policyList:
retval += policy.__str__()
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--minlen", type="int")
op.add_option("--minquality", type="int")
op.add_option("--strict", action="store_true")
op.add_option("--notstrict", dest="strict", action="store_false")
op.add_option("--changesok", action="store_true")
op.add_option("--nochanges", dest="changesok", action="store_false")
op.add_option("--emptyok", action="store_true")
op.add_option("--notempty", dest="emptyok", action="store_false")
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
if len(extra) != 1:
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("policy name required for %s") % "pwpolicy"))
pd = self.handler.PwPolicyData()
self._setToObj(self.op, opts, pd)
pd.lineno = self.lineno
pd.name = extra[0]
# Check for duplicates in the data list.
if pd in self.dataList():
warnings.warn(_("A %(command)s with the name %(policyName)s has already been defined.") % {"command": "pwpolicy", "policyName": pd.name})
return pd
def dataList(self):
return self.policyList
def get_policy(self, name):
""" Get the policy by name
:param str name: Name of the policy to return.
"""
policy = [p for p in self.policyList if p.name == name]
if policy:
return policy[0]
else:
return None
| gpl-2.0 | -452,376,937,736,252,800 | -3,791,006,642,825,640,400 | 33.121429 | 149 | 0.618589 | false |
udapi/udapi-python | udapi/block/ud/complywithtext.py | 1 | 11648 | r"""Block ComplyWithText for adapting the nodes to comply with the text.
Implementation design details:
Usually, most of the inconsistencies between tree tokens and the raw text are simple to solve.
However, there may be also rare cases when it is not clear how to align the tokens
(nodes in the tree) with the raw text (stored in ``root.text``).
This block tries to solve the general case using several heuristics.
It starts with running a LCS-like algorithm (LCS = longest common subsequence)
``difflib.SequenceMatcher`` on the raw text and concatenation of tokens' forms,
i.e. on sequences of characters (as opposed to running LCS on sequences of tokens).
To prevent mis-alignment problems, we keep the spaces present in the raw text
and we insert spaces into the concatenated forms (``tree_chars``) according to ``SpaceAfter=No``.
An example of a mis-alignment problem:
text "énfase na necesidade" with 4 nodes "énfase en a necesidade"
should be solved by adding multiword token "na" over the nodes "en" and "a".
However, running LCS (or difflib) over the character sequences
"énfaseenanecesidade"
"énfasenanecesidade"
may result in énfase -> énfas.
Author: Martin Popel
"""
import difflib
import logging
import re
from udapi.core.block import Block
from udapi.core.mwt import MWT
class ComplyWithText(Block):
"""Adapt the nodes to comply with the text."""
def __init__(self, fix_text=True, prefer_mwt=True, allow_goeswith=True, max_mwt_length=4,
**kwargs):
"""Args:
fix_text: After all heuristics are applied, the token forms may still not match the text.
Should we edit the text to match the token forms (as a last resort)? Default=True.
prefer_mwt - What to do if multiple subsequent nodes correspond to a text written
without spaces and non-word characters (punctuation)?
E.g. if "3pm doesn't" is annotated with four nodes "3 pm does n't".
We can use either SpaceAfter=No, or create a multi-word token (MWT).
Note that if there is space or punctuation, SpaceAfter=No will be used always
(e.g. "3 p.m." annotated with three nodes "3 p. m.").
If the character sequence does not match exactly, MWT will be used always
(e.g. "3pm doesn't" annotated with four nodes "3 p.m. does not").
Thus this parameter influences only the "unclear" cases.
Default=True (i.e. prefer multi-word tokens over SpaceAfter=No).
allow_goeswith - If a node corresponds to multiple space-separated strings in text,
which are not allowed as tokens with space, we can either leave this diff
unresolved or create new nodes and join them with the `goeswith` deprel.
Default=True (i.e. add the goeswith nodes if applicable).
max_mwt_length - Maximum length of newly created multi-word tokens (in syntactic words).
Default=4.
"""
super().__init__(**kwargs)
self.fix_text = fix_text
self.prefer_mwt = prefer_mwt
self.allow_goeswith = allow_goeswith
self.max_mwt_length = max_mwt_length
@staticmethod
def allow_space(form):
"""Is space allowed within this token form?"""
return re.fullmatch('[0-9 ]+([,.][0-9]+)?', form)
@staticmethod
def store_orig_form(node, new_form):
"""Store the original form of this node into MISC, unless the change is common&expected."""
_ = new_form
if node.form not in ("''", "``"):
node.misc['OrigForm'] = node.form
def process_tree(self, root):
text = root.text
if text is None:
raise ValueError('Tree %s has no text, cannot use ud.ComplyWithText' % root)
# Normalize the stored text (double space -> single space)
# and skip sentences which are already ok.
text = ' '.join(text.split())
if text == root.compute_text():
return
tree_chars, char_nodes = _nodes_to_chars(root.token_descendants)
# Align. difflib may not give LCS, but usually it is good enough.
matcher = difflib.SequenceMatcher(None, tree_chars, text, autojunk=False)
diffs = list(matcher.get_opcodes())
_log_diffs(diffs, tree_chars, text, 'matcher')
diffs = self.unspace_diffs(diffs, tree_chars, text)
_log_diffs(diffs, tree_chars, text, 'unspace')
diffs = self.merge_diffs(diffs, char_nodes)
_log_diffs(diffs, tree_chars, text, 'merge')
# Solve diffs.
self.solve_diffs(diffs, tree_chars, char_nodes, text)
# Fill SpaceAfter=No.
tmp_text = text
for node in root.token_descendants:
if tmp_text.startswith(node.form):
tmp_text = tmp_text[len(node.form):]
if not tmp_text or tmp_text[0].isspace():
del node.misc['SpaceAfter']
tmp_text = tmp_text.lstrip()
else:
node.misc['SpaceAfter'] = 'No'
else:
logging.warning('Node %s does not match text "%s"', node, tmp_text[:20])
return
# Edit root.text if needed.
if self.fix_text:
computed_text = root.compute_text()
if text != computed_text:
root.add_comment('ToDoOrigText = ' + root.text)
root.text = computed_text
def unspace_diffs(self, orig_diffs, tree_chars, text):
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert':
if tree_chars[tree_lo] == ' ':
tree_lo += 1
if tree_chars[tree_hi - 1] == ' ':
tree_hi -= 1
old = tree_chars[tree_lo:tree_hi]
new = text[text_lo:text_hi]
if old == '' and new == '':
continue
elif old == new:
edit = 'equal'
elif old == '':
edit = 'insert'
diffs.append((edit, tree_lo, tree_hi, text_lo, text_hi))
return diffs
def merge_diffs(self, orig_diffs, char_nodes):
"""Make sure each diff starts on original token boundary.
If not, merge the diff with the previous diff.
E.g. (equal, "5", "5"), (replace, "-6", "–7")
is changed into (replace, "5-6", "5–7")
"""
diffs = []
for diff in orig_diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
if edit != 'insert' and char_nodes[tree_lo] is not None:
diffs.append(diff)
elif edit == 'equal':
while tree_lo < tree_hi and char_nodes[tree_lo] is None:
tree_lo += 1
text_lo += 1
diffs[-1] = ('replace', diffs[-1][1], tree_lo, diffs[-1][3], text_lo)
if tree_lo < tree_hi:
diffs.append(('equal', tree_lo, tree_hi, text_lo, text_hi))
else:
if not diffs:
diffs = [diff]
elif diffs[-1][0] != 'equal':
diffs[-1] = ('replace', diffs[-1][1], tree_hi, diffs[-1][3], text_hi)
else:
p_tree_hi = diffs[-1][2] - 1
p_text_hi = diffs[-1][4] - 1
while char_nodes[p_tree_hi] is None:
p_tree_hi -= 1
p_text_hi -= 1
assert p_tree_hi >= diffs[-1][1]
assert p_text_hi >= diffs[-1][3]
diffs[-1] = ('equal', diffs[-1][1], p_tree_hi, diffs[-1][3], p_text_hi)
diffs.append(('replace', p_tree_hi, tree_hi, p_text_hi, text_hi))
return diffs
def solve_diffs(self, diffs, tree_chars, char_nodes, text):
for diff in diffs:
edit, tree_lo, tree_hi, text_lo, text_hi = diff
# Focus only on edits of type 'replace', log insertions and deletions as failures.
if edit == 'equal':
continue
if edit in ('insert', 'delete'):
logging.warning('Unable to solve token-vs-text mismatch\n%s',
_diff2str(diff, tree_chars, text))
continue
# Revert the splittng and solve the diff.
nodes = [n for n in char_nodes[tree_lo:tree_hi] if n is not None]
form = text[text_lo:text_hi]
self.solve_diff(nodes, form.strip())
def solve_diff(self, nodes, form):
"""Fix a given (minimal) tokens-vs-text inconsistency."""
nodes_str = ' '.join([n.form for n in nodes]) # just for debugging
node = nodes[0]
# First, solve the cases when the text contains a space.
if ' ' in form:
if len(nodes) == 1 and node.form == form.replace(' ', ''):
if self.allow_space(form):
self.store_orig_form(node, form)
node.form = form
elif self.allow_goeswith:
forms = form.split()
node.form = forms[0]
for split_form in reversed(forms[1:]):
new = node.create_child(form=split_form, deprel='goeswith', upos=node.upos)
new.shift_after_node(node)
else:
logging.warning('Unable to solve 1:m diff:\n%s -> %s', nodes_str, form)
else:
logging.warning('Unable to solve n:m diff:\n%s -> %s', nodes_str, form)
# Second, solve the cases when multiple nodes match one form (without any spaces).
elif len(nodes) > 1:
# If the match is exact, we can choose between MWT ans SpaceAfter solutions.
if not self.prefer_mwt and ''.join([n.form for n in nodes]) == form:
pass # SpaceAfter=No will be added later on.
# If one of the nodes is already a MWT, we cannot have nested MWTs.
# TODO: enlarge the MWT instead of failing.
elif any(isinstance(n, MWT) for n in nodes):
logging.warning('Unable to solve partial-MWT diff:\n%s -> %s', nodes_str, form)
# MWT with too many words are suspicious.
elif len(nodes) > self.max_mwt_length:
logging.warning('Not creating too long (%d>%d) MWT:\n%s -> %s',
len(nodes), self.max_mwt_length, nodes_str, form)
# Otherwise, create a new MWT.
else:
node.root.create_multiword_token(nodes, form)
# Third, solve the 1-1 cases.
else:
self.store_orig_form(node, form)
node.form = form
def _nodes_to_chars(nodes):
chars, char_nodes = [], []
for node in nodes:
form = node.form
if node.misc['SpaceAfter'] != 'No' and node != nodes[-1]:
form += ' '
chars.extend(form)
char_nodes.append(node)
char_nodes.extend([None] * (len(form) - 1))
return ''.join(chars), char_nodes
def _log_diffs(diffs, tree_chars, text, msg):
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.warning('=== After %s:', msg)
for diff in diffs:
logging.warning(_diff2str(diff, tree_chars, text))
def _diff2str(diff, tree, text):
old = '|' + ''.join(tree[diff[1]:diff[2]]) + '|'
new = '|' + ''.join(text[diff[3]:diff[4]]) + '|'
if diff[0] == 'equal':
return '{:7} {!s:>50}'.format(diff[0], old)
return '{:7} {!s:>50} --> {!s}'.format(diff[0], old, new)
| gpl-3.0 | 8,325,257,342,450,780,000 | -1,259,237,825,971,706,600 | 42.75188 | 99 | 0.559117 | false |
tragiclifestories/django | django/contrib/admin/templatetags/admin_modify.py | 342 | 2505 | from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
ctx = {
'opts': opts,
'show_delete_link': (
not is_popup and context['has_delete_permission'] and
change and context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (
context['has_add_permission'] and not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': not is_popup and context['has_change_permission'] and show_save_and_continue,
'is_popup': is_popup,
'show_save': show_save,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| bsd-3-clause | -4,512,941,558,143,638,500 | -7,040,299,243,808,474,000 | 35.838235 | 111 | 0.639521 | false |
wkpark/zinnia | python/test.py | 12 | 1187 | #!/usr/bin/python
import zinnia
input = "(character (width 1000)(height 1000)(strokes ((243 273)(393 450))((700 253)(343 486)(280 716)(393 866)(710 880))))";
try:
s = zinnia.Character()
r = zinnia.Recognizer()
r.open("/usr/local/lib/zinnia/model/tomoe/handwriting-ja.model")
if (not s.parse(input)):
print s.what()
result = r.classify(s, 10)
size = result.size()
for i in range(0, (size - 1)):
print "%s\t%f" % (result.value(i), result.score(i))
s.clear();
s.set_width(300)
s.set_height(300)
s.add(0, 51, 29)
s.add(0, 117, 41)
s.add(1, 99, 65)
s.add(1, 219, 77)
s.add(2, 27, 131)
s.add(2, 261, 131)
s.add(3, 129, 17)
s.add(3, 57, 203)
s.add(4, 111, 71)
s.add(4, 219, 173)
s.add(5, 81, 161)
s.add(5, 93, 281)
s.add(6, 99, 167)
s.add(6, 207, 167)
s.add(6, 189, 245)
s.add(7, 99, 227)
s.add(7, 189, 227)
s.add(8, 111, 257)
s.add(8, 189, 245)
result = r.classify(s, 10)
size = result.size()
for i in range(0, (size - 1)):
print "%s\t%f" % (result.value(i), result.score(i))
except RuntimeError, e:
print "RuntimeError: ", e,
| bsd-3-clause | -664,155,225,742,035,100 | 2,502,088,907,347,160,600 | 24.255319 | 125 | 0.541702 | false |
storiesofsolidarity/story-database | stories/admin.py | 1 | 1393 | from django.contrib import admin
from models import Location, Story
from people.models import Author
class LocationAdmin(admin.ModelAdmin):
list_display = ('zipcode', 'city_fmt', 'county_fmt', 'state_fmt', 'story_count')
list_filter = ('state',)
search_fields = ('zipcode', 'city', 'county')
admin.site.register(Location, LocationAdmin)
class EmployerFilter(admin.SimpleListFilter):
title = 'author employer'
parameter_name = 'employer'
def lookups(self, request, model_admin):
employer_set = set()
for a in Author.objects.all():
if a.employer:
employer_set.add(a.employer.split(' ', 1)[0])
return [(str(c), str(c)) for c in employer_set if c]
def queryset(self, request, queryset):
if self.value() or self.value() == 'None':
return queryset.filter(author__employer__startswith=self.value())
else:
return queryset
class StoryAdmin(admin.ModelAdmin):
list_display = ('excerpt', 'author_display', 'employer', 'anonymous', 'created_at')
list_filter = (EmployerFilter, 'location__state', 'truncated')
date_hierarchy = 'created_at'
readonly_fields = ('truncated',)
raw_id_fields = ('author', 'location')
search_fields = ('location__city', 'author__user__first_name', 'author__user__last_name', 'content')
admin.site.register(Story, StoryAdmin)
| agpl-3.0 | -2,723,449,073,810,279,400 | 7,911,457,744,231,925,000 | 33.825 | 104 | 0.648959 | false |
peppelinux/inventario_verdebinario | museo/models.py | 1 | 4183 | from django.db import models
from photologue.models import ImageModel
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
class Produttore(ImageModel):
id_tabella = models.AutoField(primary_key=True)
nome = models.CharField(max_length=135, blank=True)
nome_abbreviato = models.CharField(max_length=135, blank=True)
#slug = models.SlugField(unique=True, help_text=('"slug": un identificatore automatico e univoco'))
descrizione = models.TextField(max_length=1024, blank=True)
data_nascita = models.DateField(null=True, blank=True)
data_chiusura = models.DateField(null=True, blank=True)
#immagine_logo = models.ImageField(upload_to="LoghiProduttori", blank=True)
url = models.CharField(max_length=256, blank=True)
def save(self, *args, **kwargs):
if self.nome_abbreviato == None or self.nome_abbreviato.split() == []:
self.nome_abbreviato = self.nome.upper()
super(self.__class__, self).save(*args, **kwargs) # Call the "real" save() method.
class Meta:
ordering = ['nome']
db_table = 'produttore'
verbose_name_plural = "Produttore"
# def get_absolute_url(self):
# return '%s' % (self.url)
def __str__(self):
return '%s' % (self.nome_abbreviato)
class SchedaTecnica(models.Model):
id_tabella = models.AutoField(primary_key=True)
modello = models.CharField(max_length=135, blank=True)
produttore = models.ForeignKey(Produttore, null=True, blank=True, on_delete=models.SET_NULL)
paese_di_origine = models.CharField(max_length=135, blank=True)
anno = models.CharField(max_length=135, blank=True)
tastiera = models.CharField(max_length=135, blank=True)
cpu = models.CharField(max_length=135, blank=True)
velocita = models.CharField(max_length=135, blank=True)
memoria_volatile = models.CharField(max_length=135, blank=True)
memoria_di_massa = models.CharField(max_length=135, blank=True)
modalita_grafica = models.CharField(max_length=135, blank=True)
audio = models.CharField(max_length=135, blank=True)
dispositivi_media = models.CharField(max_length=135, blank=True)
alimentazione = models.CharField(max_length=135, blank=True)
prezzo = models.CharField(max_length=135, blank=True)
descrizione = models.TextField(max_length=1024, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
class Meta:
db_table = 'scheda_tecnica'
verbose_name_plural = "Scheda Tecnica"
class FotoHardwareMuseo(ImageModel):
id_tabella = models.AutoField(primary_key=True)
#immagine = models.ImageField(upload_to="FotoHardwareMuseo/%d.%m.%Y", blank=True)
etichetta_verde = models.CharField(max_length=135, blank=True)
data_inserimento = models.DateField(null=True, blank=False, auto_now_add=True)
seriale = models.CharField(max_length=384, blank=True)
didascalia = models.TextField(max_length=328, blank=True)
scheda_tecnica = models.ForeignKey(SchedaTecnica, null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'foto_hardware_museo'
verbose_name_plural = "Foto Hardware Museo"
def __str__(self):
return '%s %s' % (self.seriale, self.scheda_tecnica)
def get_absolute_url(self):
#return '/media/foto/FotoHardwareMuseo/' + self.data_inserimento.strftime('%d.%m.%Y') + '/' + self.image.name
return '/media/%s' % self.image.name
def admin_thumbnail(self):
func = getattr(self, 'get_admin_thumbnail_url', None)
if func is None:
return _('An "admin_thumbnail" photo size has not been defined.')
else:
if hasattr(self, 'get_absolute_url'):
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), func())
else:
return '<a class="foto_admin_thumbs" target="_blank" href="%s"><img src="%s"></a>' % \
(self.image.url, func())
admin_thumbnail.short_description = _('Thumbnail')
admin_thumbnail.allow_tags = True
| gpl-3.0 | -9,071,183,160,125,801,000 | -4,484,088,110,037,239,000 | 44.967033 | 117 | 0.671528 | false |
scholarly/pynacl | tests/test_encoding.py | 7 | 2670 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import pytest
import nacl.encoding
import nacl.secret
KEY = b"1" * nacl.secret.SecretBox.KEY_SIZE
NONCE = b"1" * nacl.secret.SecretBox.NONCE_SIZE
TEXT = b"The quick brown fox jumps over the lazy dog"
VECTORS = [
# Encoder, Ciphertext
(
nacl.encoding.RawEncoder,
(b"111111111111111111111111\xfcU\xe2\x9f\xe6E\x92\xd7\x0eFM=x\x83\x8fj"
b"} v\xd4\xf0\x1a1\xc0\x88Uk\x12\x02\x1cd\xfaOH\x13\xdc\x0e\x0e\xd7A"
b"\x07\x0b.\x9f\x01\xbf\xe4\xd0s\xf1P\xd3\x0e\xaa\x9d\xb3\xf7\\\x0f"),
),
(
nacl.encoding.HexEncoder,
(b"313131313131313131313131313131313131313131313131fc55e29fe64592d70e4"
b"64d3d78838f6a7d2076d4f01a31c088556b12021c64fa4f4813dc0e0ed741070b2e"
b"9f01bfe4d073f150d30eaa9db3f75c0f"),
),
(
nacl.encoding.Base16Encoder,
(b"313131313131313131313131313131313131313131313131FC55E29FE64592D70E4"
b"64D3D78838F6A7D2076D4F01A31C088556B12021C64FA4F4813DC0E0ED741070B2E"
b"9F01BFE4D073F150D30EAA9DB3F75C0F"),
),
(
nacl.encoding.Base32Encoder,
(b"GEYTCMJRGEYTCMJRGEYTCMJRGEYTCMJRGEYTCMP4KXRJ7ZSFSLLQ4RSNHV4IHD3KPUQ"
b"HNVHQDIY4BCCVNMJAEHDE7JHUQE64BYHNOQIHBMXJ6AN74TIHH4KQ2MHKVHNT65OA6"
b"==="),
),
(
nacl.encoding.Base64Encoder,
(b"MTExMTExMTExMTExMTExMTExMTExMTEx/FXin+ZFktcORk09eIOPan0gdtTwGjHAiFV"
b"rEgIcZPpPSBPcDg7XQQcLLp8Bv+TQc/FQ0w6qnbP3XA8="),
),
(
nacl.encoding.URLSafeBase64Encoder,
(b"MTExMTExMTExMTExMTExMTExMTExMTEx_FXin-ZFktcORk09eIOPan0gdtTwGjHAiFV"
b"rEgIcZPpPSBPcDg7XQQcLLp8Bv-TQc_FQ0w6qnbP3XA8="),
),
]
@pytest.mark.parametrize(("encoder", "ciphertext"), VECTORS)
def test_encoders(encoder, ciphertext):
box = nacl.secret.SecretBox(KEY)
test_ciphertext = box.encrypt(TEXT, NONCE, encoder=encoder)
assert test_ciphertext == ciphertext
test_plaintext = box.decrypt(test_ciphertext, encoder=encoder)
assert test_plaintext == TEXT
| apache-2.0 | 968,198,843,612,684,400 | 4,273,405,282,828,522,000 | 35.575342 | 79 | 0.728839 | false |
hsum/sqlalchemy | examples/vertical/__init__.py | 30 | 1043 | """
Illustrates "vertical table" mappings.
A "vertical table" refers to a technique where individual attributes
of an object are stored as distinct rows in a table. The "vertical
table" technique is used to persist objects which can have a varied
set of attributes, at the expense of simple query control and brevity.
It is commonly found in content/document management systems in order
to represent user-created structures flexibly.
Two variants on the approach are given. In the second, each row
references a "datatype" which contains information about the type of
information stored in the attribute, such as integer, string, or date.
Example::
shrew = Animal(u'shrew')
shrew[u'cuteness'] = 5
shrew[u'weasel-like'] = False
shrew[u'poisonous'] = True
session.add(shrew)
session.flush()
q = (session.query(Animal).
filter(Animal.facts.any(
and_(AnimalFact.key == u'weasel-like',
AnimalFact.value == True))))
print 'weasel-like animals', q.all()
.. autosource::
""" | mit | 7,839,794,115,706,111,000 | -6,369,611,111,279,482,000 | 29.705882 | 70 | 0.710451 | false |
jbreitbart/autopin-plus | vendor/fast-lib/vendor/mosquitto-1.3.5/test/broker/05-clean-session-qos1.py | 18 | 1845 | #!/usr/bin/env python
# Test whether a clean session client has a QoS 1 message queued for it.
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
mid = 109
keepalive = 60
connect_packet = mosq_test.gen_connect("clean-qos2-test", keepalive=keepalive, clean_session=False)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
subscribe_packet = mosq_test.gen_subscribe(mid, "qos1/clean_session/test", 1)
suback_packet = mosq_test.gen_suback(mid, 1)
mid = 1
publish_packet = mosq_test.gen_publish("qos1/clean_session/test", qos=1, mid=mid, payload="clean-session-message")
puback_packet = mosq_test.gen_puback(mid)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
sock.send(disconnect_packet)
sock.close()
pub = subprocess.Popen(['./05-clean-session-qos1-helper.py'])
pub.wait()
# Now reconnect and expect a publish message.
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=30)
if mosq_test.expect_packet(sock, "publish", publish_packet):
sock.send(puback_packet)
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| gpl-3.0 | 18,091,107,149,343,830 | 8,439,505,246,591,177,000 | 28.758065 | 129 | 0.687805 | false |
XtheOne/Inverter-Data-Logger | InverterLib.py | 1 | 3301 | import socket
import struct
import os
import binascii
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('cp437')
def getNetworkIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
def createV4RequestFrame(logger_sn):
"""Create request frame for inverter logger.
The request string is build from several parts. The first part is a
fixed 4 char string; the second part is the reversed hex notation of
the s/n twice; then again a fixed string of two chars; a checksum of
the double s/n with an offset; and finally a fixed ending char.
Args:
logger_sn (int): Serial number of the inverter
Returns:
str: Information request string for inverter
"""
#frame = (headCode) + (dataFieldLength) + (contrlCode) + (sn) + (sn) + (command) + (checksum) + (endCode)
frame_hdr = binascii.unhexlify('680241b1') #from SolarMan / new Omnik app
command = binascii.unhexlify('0100')
defchk = binascii.unhexlify('87')
endCode = binascii.unhexlify('16')
tar = bytearray.fromhex(hex(logger_sn)[8:10] + hex(logger_sn)[6:8] + hex(logger_sn)[4:6] + hex(logger_sn)[2:4])
frame = bytearray(frame_hdr + tar + tar + command + defchk + endCode)
checksum = 0
frame_bytes = bytearray(frame)
for i in range(1, len(frame_bytes) - 2, 1):
checksum += frame_bytes[i] & 255
frame_bytes[len(frame_bytes) - 2] = int((checksum & 255))
return bytearray(frame_bytes)
def expand_path(path):
"""
Expand relative path to absolute path.
Args:
path: file path
Returns: absolute path to file
"""
if os.path.isabs(path):
return path
else:
return os.path.dirname(os.path.abspath(__file__)) + "/" + path
def getLoggers():
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((getNetworkIp(), 48899))
# Set a timeout so the socket does not block indefinitely when trying to receive data.
sock.settimeout(3)
# Set the time-to-live for messages to 1 so they do not go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
SendData = "WIFIKIT-214028-READ" # Lotto/TM = "AT+YZAPP=214028,READ"
gateways = ''
try:
# Send data to the broadcast address
sent = sock.sendto(SendData, ('<broadcast>', 48899))
# Look for responses from all recipients
while True:
try:
data, server = sock.recvfrom(1024)
except socket.timeout:
break
else:
if (data == SendData): continue #skip sent data
a = data.split(',')
wifi_ip, wifi_mac, wifi_sn = a[0],a[1],a[2]
if (len(gateways)>1):
gateways = gateways+','
gateways = gateways+wifi_ip+','+wifi_sn
finally:
sock.close()
return gateways
| gpl-3.0 | 4,129,143,990,063,576,000 | -1,093,888,834,026,744,600 | 33.385417 | 115 | 0.62678 | false |
sigrokproject/libsigrokdecode | decoders/swd/__init__.py | 6 | 1212 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Angus Gratton <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This PD decodes the ARM SWD (version 1) protocol, as described in the
"ARM Debug Interface v5.2" Architecture Specification.
Not supported:
* Turnaround periods other than the default 1, as set in DLCR.TURNROUND
(should be trivial to add)
* SWD protocol version 2 (multi-drop support, etc.)
Details:
http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ihi0031c/index.html
(Registration required)
'''
from .pd import Decoder
| gpl-3.0 | -2,123,242,491,780,457,700 | -2,104,304,260,932,624,000 | 34.647059 | 79 | 0.745875 | false |
dstanek/keystone | keystone/common/dependency.py | 10 | 7661 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module provides support for dependency injection.
Providers are registered via the ``@provider()`` decorator, and dependencies on
them are registered with ``@requires()``. Providers are available to their
consumers via an attribute. See the documentation for the individual functions
for more detail.
See also:
https://en.wikipedia.org/wiki/Dependency_injection
"""
import traceback
from keystone.i18n import _
_REGISTRY = {}
_future_dependencies = {}
_factories = {}
def _set_provider(name, provider):
_original_provider, where_registered = _REGISTRY.get(name, (None, None))
if where_registered:
raise Exception('%s already has a registered provider, at\n%s' %
(name, ''.join(where_registered)))
_REGISTRY[name] = (provider, traceback.format_stack())
GET_REQUIRED = object()
GET_OPTIONAL = object()
def get_provider(name, optional=GET_REQUIRED):
if optional is GET_REQUIRED:
return _REGISTRY[name][0]
return _REGISTRY.get(name, (None, None))[0]
class UnresolvableDependencyException(Exception):
"""Raised when a required dependency is not resolvable.
See ``resolve_future_dependencies()`` for more details.
"""
def __init__(self, name, targets):
msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
'name': name, 'targets': targets}
super(UnresolvableDependencyException, self).__init__(msg)
def provider(name):
"""A class decorator used to register providers.
When ``@provider()`` is used to decorate a class, members of that class
will register themselves as providers for the named dependency. As an
example, In the code fragment::
@dependency.provider('foo_api')
class Foo:
def __init__(self):
...
...
foo = Foo()
The object ``foo`` will be registered as a provider for ``foo_api``. No
more than one such instance should be created; additional instances will
replace the previous ones, possibly resulting in different instances being
used by different consumers.
"""
def wrapper(cls):
def wrapped(init):
def __wrapped_init__(self, *args, **kwargs):
"""Initialize the wrapped object and add it to the registry."""
init(self, *args, **kwargs)
_set_provider(name, self)
resolve_future_dependencies(__provider_name=name)
return __wrapped_init__
cls.__init__ = wrapped(cls.__init__)
_factories[name] = cls
return cls
return wrapper
def _process_dependencies(obj):
# Any dependencies that can be resolved immediately are resolved.
# Dependencies that cannot be resolved immediately are stored for
# resolution in resolve_future_dependencies.
def process(obj, attr_name, unresolved_in_out):
for dependency in getattr(obj, attr_name, []):
if dependency not in _REGISTRY:
# We don't know about this dependency, so save it for later.
unresolved_in_out.setdefault(dependency, []).append(obj)
continue
setattr(obj, dependency, get_provider(dependency))
process(obj, '_dependencies', _future_dependencies)
def requires(*dependencies):
"""A class decorator used to inject providers into consumers.
The required providers will be made available to instances of the decorated
class via an attribute with the same name as the provider. For example, in
the code fragment::
@dependency.requires('foo_api', 'bar_api')
class FooBarClient:
def __init__(self):
...
...
client = FooBarClient()
The object ``client`` will have attributes named ``foo_api`` and
``bar_api``, which are instances of the named providers.
Objects must not rely on the existence of these attributes until after
``resolve_future_dependencies()`` has been called; they may not exist
beforehand.
Dependencies registered via ``@required()`` must have providers; if not,
an ``UnresolvableDependencyException`` will be raised when
``resolve_future_dependencies()`` is called.
"""
def wrapper(self, *args, **kwargs):
"""Inject each dependency from the registry."""
self.__wrapped_init__(*args, **kwargs)
_process_dependencies(self)
def wrapped(cls):
"""Note the required dependencies on the object for later injection.
The dependencies of the parent class are combined with that of the
child class to create a new set of dependencies.
"""
existing_dependencies = getattr(cls, '_dependencies', set())
cls._dependencies = existing_dependencies.union(dependencies)
if not hasattr(cls, '__wrapped_init__'):
cls.__wrapped_init__ = cls.__init__
cls.__init__ = wrapper
return cls
return wrapped
def resolve_future_dependencies(__provider_name=None):
"""Forces injection of all dependencies.
Before this function is called, circular dependencies may not have been
injected. This function should be called only once, after all global
providers are registered. If an object needs to be created after this
call, it must not have circular dependencies.
If any required dependencies are unresolvable, this function will raise an
``UnresolvableDependencyException``.
Outside of this module, this function should be called with no arguments;
the optional argument, ``__provider_name`` is used internally, and should
be treated as an implementation detail.
"""
new_providers = dict()
if __provider_name:
# A provider was registered, so take care of any objects depending on
# it.
targets = _future_dependencies.pop(__provider_name, [])
for target in targets:
setattr(target, __provider_name, get_provider(__provider_name))
return
# Resolve future dependencies, raises UnresolvableDependencyException if
# there's no provider registered.
try:
for dependency, targets in _future_dependencies.copy().items():
if dependency not in _REGISTRY:
# a Class was registered that could fulfill the dependency, but
# it has not yet been initialized.
factory = _factories.get(dependency)
if factory:
provider = factory()
new_providers[dependency] = provider
else:
raise UnresolvableDependencyException(dependency, targets)
for target in targets:
setattr(target, dependency, get_provider(dependency))
finally:
_future_dependencies.clear()
return new_providers
def reset():
"""Reset the registry of providers.
This is useful for unit testing to ensure that tests don't use providers
from previous tests.
"""
_REGISTRY.clear()
_future_dependencies.clear()
| apache-2.0 | -7,599,772,074,465,021,000 | 6,310,028,959,675,477,000 | 32.308696 | 79 | 0.654614 | false |
mvaled/sentry | tests/sentry/deletions/test_tagkey.py | 1 | 3690 | from __future__ import absolute_import
from sentry import tagstore
from sentry.tagstore.models import EventTag
from sentry.models import ScheduledDeletion
from sentry.tasks.deletion import run_deletion
from sentry.testutils import TestCase
class DeleteTagKeyTest(TestCase):
def test_simple(self):
team = self.create_team(name="test", slug="test")
project = self.create_project(teams=[team], name="test1", slug="test1")
group = self.create_group(project=project)
key = "foo"
value = "bar"
tk = tagstore.create_tag_key(
key=key, project_id=project.id, environment_id=self.environment.id
)
tv = tagstore.create_tag_value(
key=key, value=value, project_id=project.id, environment_id=self.environment.id
)
tagstore.create_group_tag_key(
key=key, group_id=group.id, project_id=project.id, environment_id=self.environment.id
)
tagstore.create_group_tag_value(
key=key,
value=value,
group_id=group.id,
project_id=project.id,
environment_id=self.environment.id,
)
tagstore.create_event_tags(
group_id=group.id,
project_id=project.id,
event_id=1,
environment_id=self.environment.id,
tags=[(tk.key, tv.value)],
)
project2 = self.create_project(teams=[team], name="test2")
env2 = self.create_environment(project=project2)
group2 = self.create_group(project=project2)
tk2 = tagstore.create_tag_key(project2.id, env2.id, key)
tv2 = tagstore.create_tag_value(
key=key, value=value, project_id=project2.id, environment_id=env2.id
)
tagstore.create_group_tag_key(
key=key, group_id=group2.id, project_id=project2.id, environment_id=env2.id
)
tagstore.create_group_tag_value(
key=key, value=value, group_id=group2.id, project_id=project2.id, environment_id=env2.id
)
tagstore.create_event_tags(
group_id=group2.id,
project_id=project2.id,
environment_id=env2.id,
event_id=1,
tags=[(tk2.key, tv2.value)],
)
deletion = ScheduledDeletion.schedule(tk, days=0)
deletion.update(in_progress=True)
with self.tasks():
run_deletion(deletion.id)
try:
tagstore.get_group_tag_value(
group.project_id, group.id, self.environment.id, key, value
)
assert False # verify exception thrown
except tagstore.GroupTagValueNotFound:
pass
try:
tagstore.get_group_tag_key(group.project_id, group.id, self.environment.id, key)
assert False # verify exception thrown
except tagstore.GroupTagKeyNotFound:
pass
try:
tagstore.get_tag_value(project.id, self.environment.id, key, value)
assert False # verify exception thrown
except tagstore.TagValueNotFound:
pass
try:
tagstore.get_tag_key(project.id, self.environment.id, key)
assert False # verify exception thrown
except tagstore.TagKeyNotFound:
pass
assert tagstore.get_tag_key(project2.id, env2.id, key) is not None
assert tagstore.get_group_tag_key(group2.project_id, group2.id, env2.id, key) is not None
assert (
tagstore.get_group_tag_value(group2.project_id, group2.id, env2.id, key, value)
is not None
)
assert EventTag.objects.filter(key_id=tk2.id).exists()
| bsd-3-clause | 8,201,737,937,189,691,000 | -594,176,852,126,077,000 | 37.041237 | 100 | 0.604607 | false |
RedhawkSDR/integration-gnuhawk | gnuradio/docs/doxygen/doxyxml/doxyindex.py | 16 | 8404 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files we want the contents to be accessible directly
# from the parent rather than having to go through the file
# object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
def set_parameters(self, data):
vs = [ddc.value for ddc in data.detaileddescription.content_]
pls = []
for v in vs:
if hasattr(v, 'parameterlist'):
pls += v.parameterlist
pis = []
for pl in pls:
pis += pl.parameteritem
dpis = []
for pi in pis:
dpi = DoxyParameterItem(pi)
dpi._parse()
dpis.append(dpi)
self._data['params'] = dpis
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self.set_parameters(self._parse_data)
if not self._data['params']:
# If the params weren't set by a comment then just grab the names.
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
@property
def description(self):
descriptions = []
if self.brief_description:
descriptions.append(self.brief_description)
if self.detailed_description:
descriptions.append(self.detailed_description)
return '\n\n'.join(descriptions)
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
name = property(lambda self: self.data()['declname'])
class DoxyParameterItem(DoxyMember):
"""A different representation of a parameter in Doxygen."""
def _parse(self):
if self._parsed:
return
super(DoxyParameterItem, self)._parse()
names = []
for nl in self._parse_data.parameternamelist:
for pn in nl.parametername:
names.append(description(pn))
# Just take first name
self._data['name'] = names[0]
# Get description
pd = description(self._parse_data.get_parameterdescription())
self._data['description'] = pd
description = property(lambda self: self.data()['description'])
name = property(lambda self: self.data()['name'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
self.set_parameters(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| gpl-3.0 | -1,528,458,329,518,198,500 | -3,998,889,201,959,037,400 | 28.180556 | 92 | 0.620538 | false |
cpcloud/PyTables | contrib/make_hdf.py | 14 | 10754 | #!/usr/bin/env python
from __future__ import generators
import tables, cPickle, time
#################################################################################
def is_scalar(item):
try:
iter(item)
#could be a string
try:
item[:0]+'' #check for string
return 'str'
except:
return 0
except:
return 'notstr'
def is_dict(item):
try:
item.iteritems()
return 1
except:
return 0
def make_col(row_type, row_name, row_item, str_len):
'''for strings it will always make at least 80 char or twice mac char size'''
set_len=80
if str_len:
if 2*str_len>set_len:
set_len=2*str_len
row_type[row_name]=tables.Col("CharType", set_len)
else:
type_matrix={
int: tables.Col("Int32", 1),
float: tables.Col("Float32", 4), #Col("Int16", 1)
}
row_type[row_name]=type_matrix[type(row_item)]
def make_row(data):
row_type={}
scalar_type=is_scalar(data)
if scalar_type:
if scalar_type=='str':
make_col(row_type, 'scalar', data, len(data))
else:
make_col(row_type, 'scalar', data, 0)
else: #it is a list-like
the_type=is_scalar(data[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col', data[0], the_max)
elif the_type:
make_col(row_type, 'col', data[0], 0)
else: #list within the list, make many columns
make_col(row_type, 'col_depth', 0, 0)
count=0
for col in data:
the_type=is_scalar(col[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col_'+str(count), col[0], the_max)
elif the_type:
make_col(row_type, 'col_'+str(count), col[0], 0)
else:
raise ValueError('too many nested levels of lists')
count+=1
return row_type
def add_table(fileh, group_obj, data, table_name):
#figure out if it is a list of lists or a single list
#get types of columns
row_type=make_row(data)
table1=fileh.createTable(group_obj, table_name, row_type, 'H', compress=1)
row=table1.row
if is_scalar(data):
row['scalar']=data
row.append()
else:
if is_scalar(data[0]):
for i in data:
row['col']=i
row.append()
else:
count=0
for col in data:
row['col_depth']=len(col)
for the_row in col:
if is_scalar(the_row):
row['col_'+str(count)]=the_row
row.append()
else:
raise ValueError('too many levels of lists')
count+=1
table1.flush()
def add_cache(fileh, cache):
group_name='pytables_cache_v0';table_name='cache0'
root=fileh.root
group_obj=fileh.createGroup(root, group_name)
cache_str=cPickle.dumps(cache, 0)
cache_str=cache_str.replace('\n', chr(1))
cache_pieces=[]
while cache_str:
cache_part=cache_str[:8000];cache_str=cache_str[8000:]
if cache_part:
cache_pieces.append(cache_part)
row_type={}
row_type['col_0']=tables.Col("CharType", 8000)
#
table_cache=fileh.createTable(group_obj, table_name, row_type, 'H', compress =1)
for piece in cache_pieces:
print len(piece)
table_cache.row['col_0']=piece
table_cache.row.append()
table_cache.flush()
def save2(hdf_file, data):
fileh=tables.openFile(hdf_file, mode='w', title='logon history')
root=fileh.root;cache_root=cache={}
root_path=root._v_pathname;root=0
stack = [ (root_path, data, cache) ]
table_num=0
count=0
while stack:
(group_obj_path, data, cache)=stack.pop()
#data='wilma':{'mother':[22,23,24]}}
#grp_name wilma
for grp_name in data:
#print 'fileh=',fileh
count+=1
cache[grp_name]={}
new_group_obj=fileh.createGroup(group_obj_path, grp_name)
#print 'path=',new_group_obj._v_pathname
new_path=new_group_obj._v_pathname
#if dict, you have a bunch of groups
if is_dict(data[grp_name]):#{'mother':[22,23,24]}
stack.append((new_path, data[grp_name], cache[grp_name]))
#you have a table
else:
#data[grp_name]=[110,130,140],[1,2,3]
add_table(fileh, new_path, data[grp_name], 'tbl_'+str(table_num))
table_num+=1
#fileh=tables.openFile(hdf_file,mode='a',title='logon history')
add_cache(fileh, cache_root)
fileh.close()
########################
class Hdf_dict(dict):
def __init__(self,hdf_file,hdf_dict={},stack=[]):
self.hdf_file=hdf_file
self.stack=stack
if stack:
self.hdf_dict=hdf_dict
else:
self.hdf_dict=self.get_cache()
self.cur_dict=self.hdf_dict
def get_cache(self):
fileh=tables.openFile(self.hdf_file, rootUEP='pytables_cache_v0')
table=fileh.root.cache0
total=[]
print 'reading'
begin=time.time()
for i in table.iterrows():
total.append(i['col_0'])
total=''.join(total)
total=total.replace(chr(1), '\n')
print 'loaded cache len=', len(total), time.time()-begin
begin=time.time()
a=cPickle.loads(total)
print 'cache', time.time()-begin
return a
def has_key(self, k):
return k in self.cur_dict
def keys(self):
return self.cur_dict.keys()
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
###########################################
def __len__(self):
return len(self.cur_dict)
def __getitem__(self, k):
if k in self.cur_dict:
#now check if k has any data
if self.cur_dict[k]:
new_stack=self.stack[:]
new_stack.append(k)
return Hdf_dict(self.hdf_file, hdf_dict=self.cur_dict[k], stack=new_stack)
else:
new_stack=self.stack[:]
new_stack.append(k)
fileh=tables.openFile(self.hdf_file, rootUEP='/'.join(new_stack))
#cur_data=getattr(self.cur_group,k) #/wilma (Group) '' =getattr(/ (Group) 'logon history',wilma)
for table in fileh.root:
#return [ i['col_1'] for i in table.iterrows() ] #[9110,91]
#perhaps they stored a single item
try:
for item in table['scalar']:
return item
except:
#otherwise they stored a list of data
try:
return [ item for item in table['col']]
except:
cur_column=[]
total_columns=[]
col_num=0
cur_row=0
num_rows=0
for row in table:
if not num_rows:
num_rows=row['col_depth']
if cur_row==num_rows:
cur_row=num_rows=0
col_num+=1
total_columns.append(cur_column)
cur_column=[]
cur_column.append( row['col_'+str(col_num)])
cur_row+=1
total_columns.append(cur_column)
return total_columns
else:
raise KeyError(k)
def iterkeys(self):
for key in self.iterkeys():
yield key
def __iter__(self):
return self.iterkeys()
def itervalues(self):
for k in self.iterkeys():
v=self.__getitem__(k)
yield v
def iteritems(self):
# yield children
for k in self.iterkeys():
v=self.__getitem__(k)
yield (k, v)
def __repr__(self):
return '{Hdf dict}'
def __str__(self):
return self.__repr__()
#####
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self, d):
for k, v in d.iteritems():
self.__setitem__(k, v)
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError("Hdf Dict is empty")
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __hash__(self):
raise TypeError("Hdf dict bjects are unhashable")
if __name__=='__main__':
def write_small(file=''):
data1={
'fred':['a', 'b', 'c'],
'barney':[[9110, 9130, 9140], [91, 92, 93]],
'wilma':{'mother':{'pebbles':[22, 23, 24],'bambam':[67, 68, 69]}}
}
print 'saving'
save2(file, data1)
print 'saved'
def read_small(file=''):
#a=make_hdf.Hdf_dict(file)
a=Hdf_dict(file)
print a['wilma']
b=a['wilma']
for i in b:
print i
print a.keys()
print 'has fred', bool('fred' in a)
print 'length a', len(a)
print 'get', a.get('fred'), a.get('not here')
print 'wilma keys', a['wilma'].keys()
print 'barney', a['barney']
print 'get items'
print a.items()
for i in a.iteritems():
print 'item', i
for i in a.itervalues():
print i
a=raw_input('enter y to write out test file to test.hdf')
if a.strip()=='y':
print 'writing'
write_small('test.hdf')
print 'reading'
read_small('test.hdf')
| bsd-3-clause | -8,379,329,006,876,277,000 | 6,020,117,530,471,565,000 | 29.378531 | 112 | 0.478706 | false |
kanagasabapathi/python-for-android | python3-alpha/python3-src/Tools/scripts/win_add2path.py | 49 | 1618 | """Add Python to the search path on Windows
This is a simple script to add Python to the Windows search path. It
modifies the current user (HKCU) tree of the registry.
Copyright (c) 2008 by Christian Heimes <[email protected]>
Licensed to PSF under a Contributor Agreement.
"""
import sys
import site
import os
import winreg
HKCU = winreg.HKEY_CURRENT_USER
ENV = "Environment"
PATH = "PATH"
DEFAULT = "%PATH%"
def modify():
pythonpath = os.path.dirname(os.path.normpath(sys.executable))
scripts = os.path.join(pythonpath, "Scripts")
appdata = os.environ["APPDATA"]
if hasattr(site, "USER_SITE"):
userpath = site.USER_SITE.replace(appdata, "%APPDATA%")
userscripts = os.path.join(userpath, "Scripts")
else:
userscripts = None
with winreg.CreateKey(HKCU, ENV) as key:
try:
envpath = winreg.QueryValueEx(key, PATH)[0]
except WindowsError:
envpath = DEFAULT
paths = [envpath]
for path in (pythonpath, scripts, userscripts):
if path and path not in envpath and os.path.isdir(path):
paths.append(path)
envpath = os.pathsep.join(paths)
winreg.SetValueEx(key, PATH, 0, winreg.REG_EXPAND_SZ, envpath)
return paths, envpath
def main():
paths, envpath = modify()
if len(paths) > 1:
print("Path(s) added:")
print('\n'.join(paths[1:]))
else:
print("No path was added")
print("\nPATH is now:\n%s\n" % envpath)
print("Expanded:")
print(winreg.ExpandEnvironmentStrings(envpath))
if __name__ == '__main__':
main()
| apache-2.0 | -8,959,307,499,427,768,000 | -7,598,370,078,580,116,000 | 27.385965 | 70 | 0.63597 | false |
jiaojianbupt/tools | project_manager/alias.py | 1 | 1746 | # -*- coding: utf-8 -*-
"""
Created by jiaojian at 2018/6/29 16:30
"""
import os
import sys
import termios
from tools.utils.basic_printer import print_with_style, ConsoleColor
HOME = os.environ['HOME']
def get_input():
fd = sys.stdin.fileno()
old_tty_info = termios.tcgetattr(fd)
new_tty_info = old_tty_info[:]
new_tty_info[3] &= ~termios.ICANON
new_tty_info[3] &= ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new_tty_info)
answer = os.read(fd, 1)
termios.tcsetattr(fd, termios.TCSANOW, old_tty_info)
return answer
def add_alias():
if sys.platform == 'darwin':
bash_profile_name = '.bash_profile'
else:
bash_profile_name = '.bashrc'
linux_bash_profile_path = os.path.join(HOME, bash_profile_name)
exec_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'main.py')
alias = 'alias updateall="python %s"' % exec_file_path
if os.path.exists(linux_bash_profile_path):
with open(linux_bash_profile_path, 'rw') as bashrc_file:
bash_profile = bashrc_file.read()
if bash_profile.find(alias) >= 0:
return
answer = ''
while not answer or answer not in {'y', 'n'}:
print_with_style('Add \'%s\' to your %s?(y/n)' % (alias, bash_profile_name), color=ConsoleColor.YELLOW)
answer = get_input()
if answer == 'n':
return
elif answer == 'y':
break
bash_profile = bash_profile + '\n' + alias
with open(linux_bash_profile_path, 'w') as bashrc_file:
bashrc_file.write(bash_profile)
print_with_style('Alias added.', color=ConsoleColor.YELLOW)
| gpl-3.0 | -20,217,315,299,847,710 | -1,093,895,960,823,312,100 | 35.375 | 119 | 0.587056 | false |
75651/kbengine_cloud | kbe/res/scripts/common/Lib/test/test_json/test_indent.py | 103 | 1824 | import textwrap
from io import StringIO
from test.test_json import PyTest, CTest
class TestIndent:
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
\t[
\t\t"blorpie"
\t],
\t[
\t\t"whoops"
\t],
\t[],
\t"d-shtaeou",
\t"d-nthiouh",
\t"i-vhbjkhnth",
\t{
\t\t"nifty": 87
\t},
\t{
\t\t"field": "yes",
\t\t"morefield": false
\t}
]""")
d1 = self.dumps(h)
d2 = self.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
d3 = self.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
d4 = self.dumps(h, indent=2, sort_keys=True)
d5 = self.dumps(h, indent='\t', sort_keys=True)
h1 = self.loads(d1)
h2 = self.loads(d2)
h3 = self.loads(d3)
self.assertEqual(h1, h)
self.assertEqual(h2, h)
self.assertEqual(h3, h)
self.assertEqual(d2, expect.expandtabs(2))
self.assertEqual(d3, expect)
self.assertEqual(d4, d2)
self.assertEqual(d5, d3)
def test_indent0(self):
h = {3: 1}
def check(indent, expected):
d1 = self.dumps(h, indent=indent)
self.assertEqual(d1, expected)
sio = StringIO()
self.json.dump(h, sio, indent=indent)
self.assertEqual(sio.getvalue(), expected)
# indent=0 should emit newlines
check(0, '{\n"3": 1\n}')
# indent=None is more compact
check(None, '{"3": 1}')
class TestPyIndent(TestIndent, PyTest): pass
class TestCIndent(TestIndent, CTest): pass
| lgpl-3.0 | -1,929,048,864,508,327,400 | 235,027,976,996,644,260 | 26.223881 | 82 | 0.508772 | false |
gs0510/coala-bears | bears/python/PyFlakesBear.py | 13 | 1050 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
@linter(executable='pyflakes',
use_stderr=True,
output_format='regex',
output_regex=r'.*:(?P<line>\d+):'
r'[(?P<column>\d+):|?]*(?P<severity>)\s(?P<message>.*)\n',
severity_map={
'': RESULT_SEVERITY.INFO
})
class PyFlakesBear:
"""
Checks Python files for errors using ``pyflakes``.
See https://github.com/PyCQA/pyflakes for more info.
"""
LANGUAGES = {'Python', 'Python 3'}
REQUIREMENTS = {PipRequirement('pyflakes', '1.4.0')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/92503'
CAN_DETECT = {'Syntax', 'Unused Code', 'Undefined Element'}
@staticmethod
def create_arguments(filename, file, config_file):
return filename,
| agpl-3.0 | 3,151,962,605,468,251,000 | -1,331,020,837,902,016,000 | 34 | 79 | 0.639048 | false |
amw2104/fireplace | setup.py | 1 | 1046 | #!/usr/bin/env python
import os.path
import fireplace
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), "README.md")).read()
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)"
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Games/Entertainment :: Simulation",
]
setup(
name="fireplace",
version=fireplace.__version__,
packages=find_packages(exclude="tests"),
package_data={"": ["CardDefs.xml"]},
include_package_data=True,
tests_require=["pytest"],
author=fireplace.__author__,
author_email=fireplace.__email__,
description="Pure-python Hearthstone re-implementation and simulator",
classifiers=CLASSIFIERS,
download_url="https://github.com/jleclanche/python-bna/tarball/master",
long_description=README,
license="AGPLv3",
url="https://github.com/jleclanche/fireplace",
)
| agpl-3.0 | -5,186,562,347,708,572,000 | -5,971,693,403,019,094,000 | 28.885714 | 85 | 0.720841 | false |
sjperkins/tensorflow | tensorflow/contrib/keras/python/keras/metrics.py | 8 | 3418 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in Keras metrics functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.keras.python.keras import backend as K
# pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.losses import binary_crossentropy
from tensorflow.contrib.keras.python.keras.losses import categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import cosine_proximity
from tensorflow.contrib.keras.python.keras.losses import hinge
from tensorflow.contrib.keras.python.keras.losses import kullback_leibler_divergence
from tensorflow.contrib.keras.python.keras.losses import logcosh
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_error
from tensorflow.contrib.keras.python.keras.losses import mean_absolute_percentage_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_error
from tensorflow.contrib.keras.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.contrib.keras.python.keras.losses import poisson
from tensorflow.contrib.keras.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.contrib.keras.python.keras.losses import squared_hinge
# pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
def binary_accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.round(y_pred)), axis=-1)
def categorical_accuracy(y_true, y_pred):
return K.cast(
K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), K.floatx())
def sparse_categorical_accuracy(y_true, y_pred):
return K.cast(
K.equal(
K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1),
K.floatx())), K.floatx())
def top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1)
# Aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine = cosine_proximity
def serialize(metric):
return metric.__name__
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
def get(identifier):
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier:', identifier)
| apache-2.0 | -6,979,803,541,612,930,000 | 1,246,293,236,988,803,000 | 36.56044 | 94 | 0.735225 | false |
dapengchen123/code_v1 | reid/datasets/market1501.py | 1 | 3563 | from __future__ import print_function, absolute_import
import os.path as osp
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json
class Market1501(Dataset):
url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view'
md5 = '65005ab7d12ec1c44de4eeafe813e68a'
def __init__(self, root, split_id=0, num_val=0.3, download=False):
super(Market1501, self).__init__(root, split_id=split_id)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Download the raw zip file
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please download the dataset manually from {} "
"to {}".format(self.url, fpath))
# Extract the file
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 1501 identities (+1 for background) with 6 camera views each
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
# Save meta information into a json file
meta = {'name': 'Market1501', 'shot': 'multiple', 'num_cameras': 6,
'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
# Save the only training / test split
splits = [{
'trainval': sorted(list(trainval_pids)),
'query': sorted(list(query_pids)),
'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
| mit | -2,128,807,249,401,896,700 | -1,601,173,726,657,748,500 | 36.505263 | 78 | 0.561605 | false |
glenflet/ZtoRGBpy | ZtoRGBpy/_info.py | 1 | 2082 | # -*- coding: utf-8 -*-
# =================================================================================
# Copyright 2019 Glen Fletcher <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# All documentation this file as docstrings or comments are licensed under the
# Creative Commons Attribution-ShareAlike 4.0 International License; you may
# not use this documentation except in compliance with this License.
# You may obtain a copy of this License at
#
# https://creativecommons.org/licenses/by-sa/4.0
#
# =================================================================================
"""
ZtoRGB information definition module
Special private module used for automatic processing, and inclusion
.. moduleauthor:: Glen Fletcher <[email protected]>
"""
__authors__ = [
("Glen Fletcher", "[email protected]")]
__copyright__ = "2019 Glen Fletcher"
__license__ = """\
The source code for this package is licensed under the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0),
while the documentation including docstrings and comments embedded in the source code are licensed under the
[Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0)
"""
__contact__ = "Glen Fletcher <[email protected]>"
__version__ = "2.0"
__title__ = "ZtoRGBpy"
__desc__ = """\
Complex number to perceptually uniform RGB subset mapping library"""
__all__ = [
'__authors__', '__copyright__', '__license__',
'__contact__', '__version__', '__title__',
'__desc__']
| mit | -1,393,687,778,087,292,200 | 8,241,756,345,185,078,000 | 40.64 | 120 | 0.662344 | false |
ElecProg/decmath | decmath/trig.py | 1 | 4598 | from decimal import getcontext, Decimal
from decmath import _pi, _to_Decimal, sign
# Trigonometric functions
def acos(x):
"""Return the arc cosine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: acos accepts -1 <= x <= 1.")
elif x == -1:
return _pi()
elif x == 0:
return _pi() / 2
elif x == 1:
return Decimal(0)
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, _pi() / 2 - x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s -= coeff * num
getcontext().prec -= 2
return +s
def asin(x):
"""Return the arc sine (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif abs(x) > 1:
raise ValueError("Domain error: asin accepts -1 <= x <= 1.")
elif x == -1:
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == 1:
return _pi() / 2
getcontext().prec += 2
one_half = Decimal('0.5')
i, lasts, s, gamma, fact, num = Decimal(0), 0, x, 1, 1, x
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x * x
gamma *= i - one_half
coeff = gamma / ((2 * i + 1) * fact)
s += coeff * num
getcontext().prec -= 2
return +s
def atan(x):
"""Return the arc tangent (measured in radians) of x."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal("NaN")
elif x == Decimal('-Inf'):
return -_pi() / 2
elif x == 0:
return Decimal(0)
elif x == Decimal('Inf'):
return _pi() / 2
if x < -1:
c = _pi() / -2
x = 1 / x
elif x > 1:
c = _pi() / 2
x = 1 / x
else:
c = 0
getcontext().prec += 2
x_squared = x**2
y = x_squared / (1 + x_squared)
y_over_x = y / x
i, lasts, s, coeff, num = Decimal(0), 0, y_over_x, 1, y_over_x
while s != lasts:
lasts = s
i += 2
coeff *= i / (i + 1)
num *= y
s += coeff * num
if c:
s = c - s
getcontext().prec -= 2
return +s
def atan2(y, x):
"""Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered."""
y = _to_Decimal(y)
x = _to_Decimal(x)
abs_y = abs(y)
abs_x = abs(x)
y_is_real = abs_y != Decimal('Inf')
if y.is_nan() or x.is_nan():
return Decimal("NaN")
if x:
if y_is_real:
a = y and atan(y / x) or Decimal(0)
if x < 0:
a += sign(y) * _pi()
return a
elif abs_y == abs_x:
x = sign(x)
y = sign(y)
return _pi() * (Decimal(2) * abs(x) - x) / (Decimal(4) * y)
if y:
return atan(sign(y) * Decimal('Inf'))
elif sign(x) < 0:
return sign(y) * _pi()
else:
return sign(y) * Decimal(0)
def cos(x):
"""Return the cosine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2 or x == 3 * _pi() / 2:
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 0, 0, 1, 1, 1, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def hypot(x, y):
"""Return the Euclidean distance, sqrt(x*x + y*y)."""
return (_to_Decimal(x).__pow__(2) + _to_Decimal(y).__pow__(2)).sqrt()
def sin(x):
"""Return the sine of x as measured in radians."""
x = _to_Decimal(x) % (2 * _pi())
if x.is_nan():
return Decimal('NaN')
elif x == 0 or x == _pi():
return Decimal(0)
getcontext().prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
getcontext().prec -= 2
return +s
def tan(x):
"""Return the tangent of x (measured in radians)."""
x = _to_Decimal(x)
if x.is_nan():
return Decimal('NaN')
elif x == _pi() / 2:
return Decimal('Inf')
elif x == 3 * _pi() / 2:
return Decimal('-Inf')
return sin(x) / cos(x)
| mit | 4,598,384,634,904,319,000 | -5,145,348,610,334,488,000 | 22.579487 | 73 | 0.45933 | false |
martindurant/astrobits | time_series.py | 1 | 12543 | """Take a list of files and known star coordinates, and
perform photometry on them all, either with apertures (phot)
or by PSF fitting (daophot, which required additional
parameters and is apropriate to poor S/N or crowded fields).
Makes extensive use of iraf tasks; set all photometry parameters
before running:
datapars - for data characteristics
centerpars - finding the reference star on each image.
centerpars, photpars, fitskypars - for controling aperture photometry
daopars - for controling daophot
filelist: set of image files, in IRAF syntax (image.fits[1][*,*,2] etc);
can be more than one per cube.
coords: name a file containing all star coords for photometry, based on
an image unshifted relative to (0,0) in the shifts list. Be pure numbers
for phot method, .mag or .als for daophot method.
shifts: name a file containing shifts, a tuple of shifts arrays, image
header keywords (tuple of two= or None for no shifts
refstar: coords of star for deriving (x,y) offset, as in coords
timestamp: source of the timing information: a header keyword, delta-t
for uniform sampling or a file with times (in whatever formate you'll be
using later.
psf: whether to use daophot or aperture phot for analysis. If this is a
filename, that is the PSF profile to use for every image; if it is "True",
make a new PSF for every image. Pars below only for full PSF fitting
pststars: a .pst file from daophot, listing the IDs of stars for making
the PSF for each image. NB: DAOphot refuses to measure any star with SNR<2.
ids: which stars are interesting, by ID (in input coord list order)
coords: starting well-measured coords (pdump-ed from a .als, perhaps).
"""
import os
import numpy
from glob import glob
import pyfits
from pylab import find
from numpy import load,vstack,save,median
thisdir = os.getcwd()
os.chdir("/home/durant")
from pyraf import iraf
iraf.cd(thisdir)
iraf.digiphot()
iraf.daophot()
import pyraf
import pyfits
import numpy as n
def shift_file_coords(filename,xshift,yshift,output,sort=None):
"""Understands filetypes: 2-column ascii numbers, .mag, .als, .pst.
NB: shift means where each image is, relative to the original (not where
it should be moved to).
"""
if not(sort):
sort = 'num'
if filename.find('.mag')>0: sort = 'mag'
if filename.find('.als')>0: sort = 'als'
if filename.find('.pst')>0: sort = 'pst'
if not(sort=='num' or sort=='mag' or sort=='als' or sort=='pst'):
raise ValueError('Unknown input filetype: %s'%filename)
if sort=='num': # shift 2-column numeric ASCII table
x,y = load(filename,usecols=[0,1],unpack=True)
x += xshift
y += yshift
X = vstack((x,y))
save(output,X.transpose())
return
if sort=='mag': #shift a .mag photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==9 and line[0]!='#':
x = float(line.split()[0]) + xshift
y = float(line.split()[1]) + yshift
line = "%-14.3f %-11.3f"%(x,y)+line[21:]
freda.write(line)
if sort=='als': #shift a .als DAOphot photometry file
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line.split()[-1]=='\\' and len(line.split())==8 and line[0]!='#':
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
if sort=='pst': #shift a PSF star list for DAOphot
fred = open(filename)
freda= open(output,'w')
for line in fred:
if line[0]!="#":
x = float(line.split()[1]) + xshift
y = float(line.split()[2]) + yshift
line = line[:9] + "%-10.3f %-10.3f"%(x,y) + line[29:]
freda.write(line)
fred.close()
freda.close()
def recentre(image,refcoordfile):
"""Returns improved shift by centroiding
on the reference star using phot. This can be VERY
sensitive to the parameters in centerpars."""
xin,yin = load(refcoordfile,unpack=True)
try:
iraf.phot(image,refcoordfile,'temp.mag',inter="no",calgorithm='centroid',
mode='h',verify='no',update='no',verbose='no')
xout,yout=iraf.pdump('temp.mag','xcen,ycen','yes',Stdout=1)[0].split()
except:
print "Recentring failed on", image
return 0.,0.
xout,yout = float(xout),float(yout)
return xout-xin,yout-yin
vary_par = 1.
vary_max = 10
vary_min = 6
vary_fwhm= 0
def setaperture(image,refstar):
"""Measure the FWHM of the reference star unsing simple DAOphot editor
and then set the photometry aperture to this number"""
x,y = load(refstar,unpack=True)
fred = open('tempaperfile','w')
fred.write("%f %f 100 a\nq"%(x,y))
fred.close()
try:
output=iraf.daoedit(image,icomm='tempaperfile',Stdout=1,Stderr=1)
except:
print "Aperture setting failed on",image
return
FWHM = float(output[3].split()[4])
iraf.photpars.apertures = min(max(FWHM*vary_par,vary_min),vary_max)
iraf.daopars.fitrad = min(max(FWHM*vary_par,vary_min),vary_max)
global vary_fwhm
vary_fwhm = FWHM
print "FWHM: ", FWHM, " aperture: ",iraf.photpars.apertures
def apphot(image,coords,refstar=None,centre=False,vary=False):
"""Apperture photometry with centering based on a reference star.
NB: centre refers to shifting the coordinates by centroiding on the
reference star; recentering on the final phot depends on
centerpars.calgorithm ."""
iraf.dele('temp.mag*')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine centring: ", xsh,ysh
else: #no recentreing by reference star (but could still have calgorithm!=none)
xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords')
iraf.phot(image,'tempcoords','temp.mag2',inter="no",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.mag2','id,flux,msky,stdev','yes',Stdout=1)
return out
def psfphot(image,coords,pststars,refstar,centre=True,vary=False):
"""PSF photometry. Centering is through phot on refstar.
Assume coords is a .als file for now. Recentering is always done
for the reference star, never for the targets."""
iraf.dele('temp.mag*')
iraf.dele('temp.psf.fits')
iraf.dele('temp.als')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
shift_file_coords(pststars,xsh,ysh,'temppst2',sort='pst')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.psf(image,'temp.mag2','temppst2','temp.psf','temp.mag.pst','temp.mag.psg',
inter='no',mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2','temp.psf','temp.als','temp.mag.arj',"default",
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def simplepsfphot(image,coords,psf,refstar,centre=True,vary=False):
"""PSF photometry, with a given PSF file in psf used for every image"""
iraf.dele('temp.mag*')
iraf.dele('temp.als')
iraf.dele('temp.sub.fits')
if centre:
xsh,ysh = recentre(image,refstar)
print "Fine Centring: ", xsh,ysh
else: xsh,ysh = 0,0
if vary:
setaperture(image,refstar)
shift_file_coords(coords,xsh,ysh,'tempcoords2',sort='als')
iraf.phot(image,'tempcoords2','temp.mag2',inter="no",calgorithm='none',
mode='h',verify='no',update='no',verbose='no')
iraf.allstar(image,'temp.mag2',psf,'temp.als','temp.mag.arj','temp.sub.fits',
mode='h',verify='no',update='no',verbose='no')
out = iraf.pdump('temp.als','id,mag,merr,msky','yes',Stdout=1)
return out
def custom1(filename): # for NACO timing mode cubes - removes horizontal banding
#iraf.imarith(filename,'-','dark','temp')
iraf.imarith(filename,'/','flatK','temp')
im = pyfits.getdata('temp.fits')
med = median(im.transpose())
out = ((im).transpose()-med).transpose()
(pyfits.ImageHDU(out)).writeto("temp2.fits",clobber=True)
iraf.imdel('temp')
iraf.imcopy('temp2[1]','temp')
def get_id(starid,output='output'):
"""from the output of the photometry, grab the magnitudes and magerrs of starid"""
mag = load(output,usecols=[4+starid*4])
merr= load(output,usecols=[5+starid*4])
return mag,merr
def run(filelist,coords,refstar,shifts=None,centre=False,psf=False,pststars=None,
ids=None,dark=0,flat=1,timestamp="TIME",output='output',custom_process=None,
vary=False):
"""If psf==True, must include all extra par files.
If PSF is a filename (.psf.fits), this profileis used to fit every image.
Timestamp can be either a file of times (same length as filelist), a header
keyword, or an array of times.
The input list can include [] notation for multiple extensions or sections
of each file (incompatible with header-based time-stamps).
custom_process(file) is a function taking a filename (possible including [x]
syntax) and places a processed image in temp.fits."""
output = open(output,'w')
x = load(coords,usecols=[1])
numstars = len(x)
myfiles = open(filelist).readlines()
myfiles = [myfiles[i][:-1] for i in range(len(myfiles))]
if timestamp.__class__ == numpy.ndarray: #--sort out times--
times = 1 #times=1 means we know the times beforehand
elif len(glob(timestamp))>0:
timestamp = load(timestamp,usecols=[0])
times=1
else:
times=0 #times=0 mean find the time from each image
if type(shifts)==type(" "): #--sort out shifts--
xshifts,yshifts = load(shifts,unpack=True)#filename give, assuming 2 columns
xshifts,yshifts = -xshifts,-yshifts #these are in the opposite sense to coords from stack
elif n.iterable(shifts):
xshifts=n.array(shifts[0]) #for shifts given as arrays/lists
yshifts=n.array(shifts[1])
else:
print "No shifts" #assume all shifts are zero
xshifts = n.zeros(len(myfiles))
yshifts = n.zeros(len(myfiles))
for i,thisfile in enumerate(myfiles): #run!
print i,thisfile
if times:
time = timestamp[i] #known time
else:
time = pyfits.getval(thisfile,timestamp) #FITS keyword
try:
iraf.dele('temp.fits')
if custom_process: #arbitrary subroutine to process a file -> temp.fits
custom_process(thisfile)
else: #typical dark/bias subtract and flatfield
iraf.imarith(thisfile,'-',dark,'temp')
iraf.imarith('temp','/',flat,'temp')
shift_file_coords(coords,xshifts[i],yshifts[i],'tempcoords') #apply coarse shifts
shift_file_coords(refstar,xshifts[i],yshifts[i],'tempref',sort='num')
if psf:
if psf is True: #full PSF fit
shift_file_coords(pststars,xshifts[i],yshifts[i],'temppst')
out=psfphot('temp.fits','tempcoords','temppst','tempref',centre,vary)
else: #DAOphot with known PSF
out=simplepsfphot('temp.fits','tempcoords',psf,'tempref',centre,vary)
else: #aperture photometry
out=apphot('temp.fits','tempcoords','tempref',centre,vary=vary)
output.write("%s %s %s "%(thisfile,time,vary_fwhm))
myids = n.array([int(out[i].split()[0]) for i in range(len(out))])
for i in ids or range(numstars):
try: #search for each requested ID
foundid = find(myids==i)[0]
output.write(out[foundid]+" ")
except: #ID not found
output.write(" 0 0 0 0 ")
output.write("\n")
except KeyboardInterrupt: #exit on Ctrl-C
break
except pyraf.irafglobals.IrafError, err:
print "IRAF error ",err,thisfile
break
except ValueError, err:
print "Value error ",err,thisfile
raise
output.close()
#iraf.dele('temp*')
| mit | 3,797,566,785,642,412,000 | 1,540,606,497,877,693,000 | 42.251724 | 97 | 0.63358 | false |
dunkhong/grr | grr/server/grr_response_server/databases/db_yara_test_lib.py | 1 | 1573 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestYaraMixin(object):
"""A mixin class for testing YARA methods of database implementations."""
def testWriteYaraSignatureReferenceIncorrectUsername(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
with self.assertRaises(db.UnknownGRRUserError) as context:
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux")
self.assertEqual(context.exception.username, "quux")
def testWriteYaraSignatureReferenceDuplicated(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
# Writing duplicated signatures is possible, it should not raise.
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
def testVerifyYaraSignatureReferenceSimple(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))
def testVerifyYaraSignatureReferenceIncorrect(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
| apache-2.0 | 7,745,204,012,515,599,000 | -7,463,004,188,567,299,000 | 33.195652 | 75 | 0.760331 | false |
baslr/ArangoDB | 3rdParty/V8/V8-5.0.71.39/tools/find-commit-for-patch.py | 53 | 3327 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import sys
def GetArgs():
parser = argparse.ArgumentParser(
description="Finds a commit that a given patch can be applied to. "
"Does not actually apply the patch or modify your checkout "
"in any way.")
parser.add_argument("patch_file", help="Patch file to match")
parser.add_argument(
"--branch", "-b", default="origin/master", type=str,
help="Git tree-ish where to start searching for commits, "
"default: %(default)s")
parser.add_argument(
"--limit", "-l", default=500, type=int,
help="Maximum number of commits to search, default: %(default)s")
parser.add_argument(
"--verbose", "-v", default=False, action="store_true",
help="Print verbose output for your entertainment")
return parser.parse_args()
def FindFilesInPatch(patch_file):
files = {}
next_file = ""
with open(patch_file) as patch:
for line in patch:
if line.startswith("diff --git "):
# diff --git a/src/objects.cc b/src/objects.cc
words = line.split()
assert words[2].startswith("a/") and len(words[2]) > 2
next_file = words[2][2:]
elif line.startswith("index "):
# index add3e61..d1bbf6a 100644
hashes = line.split()[1]
old_hash = hashes.split("..")[0]
if old_hash.startswith("0000000"): continue # Ignore new files.
files[next_file] = old_hash
return files
def GetGitCommitHash(treeish):
cmd = ["git", "log", "-1", "--format=%H", treeish]
return subprocess.check_output(cmd).strip()
def CountMatchingFiles(commit, files):
matched_files = 0
# Calling out to git once and parsing the result Python-side is faster
# than calling 'git ls-tree' for every file.
cmd = ["git", "ls-tree", "-r", commit] + [f for f in files]
output = subprocess.check_output(cmd)
for line in output.splitlines():
# 100644 blob c6d5daaa7d42e49a653f9861224aad0a0244b944 src/objects.cc
_, _, actual_hash, filename = line.split()
expected_hash = files[filename]
if actual_hash.startswith(expected_hash): matched_files += 1
return matched_files
def FindFirstMatchingCommit(start, files, limit, verbose):
commit = GetGitCommitHash(start)
num_files = len(files)
if verbose: print(">>> Found %d files modified by patch." % num_files)
for _ in range(limit):
matched_files = CountMatchingFiles(commit, files)
if verbose: print("Commit %s matched %d files" % (commit, matched_files))
if matched_files == num_files:
return commit
commit = GetGitCommitHash("%s^" % commit)
print("Sorry, no matching commit found. "
"Try running 'git fetch', specifying the correct --branch, "
"and/or setting a higher --limit.")
sys.exit(1)
if __name__ == "__main__":
args = GetArgs()
files = FindFilesInPatch(args.patch_file)
commit = FindFirstMatchingCommit(args.branch, files, args.limit, args.verbose)
if args.verbose:
print(">>> Matching commit: %s" % commit)
print(subprocess.check_output(["git", "log", "-1", commit]))
print(">>> Kthxbai.")
else:
print(commit)
| apache-2.0 | 1,567,997,230,157,726,500 | -8,159,050,196,743,025,000 | 34.774194 | 80 | 0.65284 | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/internet/test/test_threads.py | 39 | 7983 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorThreads}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
from weakref import ref
import gc, threading
from twisted.python.threadable import isInIOThread
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.threadpool import ThreadPool
from twisted.internet.interfaces import IReactorThreads
class ThreadTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorThreads}.
"""
requiredInterfaces = (IReactorThreads,)
def test_getThreadPool(self):
"""
C{reactor.getThreadPool()} returns an instance of L{ThreadPool} which
starts when C{reactor.run()} is called and stops before it returns.
"""
state = []
reactor = self.buildReactor()
pool = reactor.getThreadPool()
self.assertIsInstance(pool, ThreadPool)
self.assertFalse(
pool.started, "Pool should not start before reactor.run")
def f():
# Record the state for later assertions
state.append(pool.started)
state.append(pool.joined)
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor, 2)
self.assertTrue(
state[0], "Pool should start after reactor.run")
self.assertFalse(
state[1], "Pool should not be joined before reactor.stop")
self.assertTrue(
pool.joined,
"Pool should be stopped after reactor.run returns")
def test_suggestThreadPoolSize(self):
"""
C{reactor.suggestThreadPoolSize()} sets the maximum size of the reactor
threadpool.
"""
reactor = self.buildReactor()
reactor.suggestThreadPoolSize(17)
pool = reactor.getThreadPool()
self.assertEqual(pool.max, 17)
def test_delayedCallFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from a delayed call is run immediately in the next reactor iteration.
When invoked from the reactor thread, previous implementations of
L{IReactorThreads.callFromThread} would skip the pipe/socket based wake
up step, assuming the reactor would wake up on its own. However, this
resulted in the reactor not noticing a insert into the thread queue at
the right time (in this case, after the thread queue has been processed
for that reactor iteration).
"""
reactor = self.buildReactor()
def threadCall():
reactor.stop()
# Set up the use of callFromThread being tested.
reactor.callLater(0, reactor.callFromThread, threadCall)
before = reactor.seconds()
self.runReactor(reactor, 60)
after = reactor.seconds()
# We specified a timeout of 60 seconds. The timeout code in runReactor
# probably won't actually work, though. If the reactor comes out of
# the event notification API just a little bit early, say after 59.9999
# seconds instead of after 60 seconds, then the queued thread call will
# get processed but the timeout delayed call runReactor sets up won't!
# Then the reactor will stop and runReactor will return without the
# timeout firing. As it turns out, select() and poll() are quite
# likely to return *slightly* earlier than we ask them to, so the
# timeout will rarely happen, even if callFromThread is broken. So,
# instead we'll measure the elapsed time and make sure it's something
# less than about half of the timeout we specified. This is heuristic.
# It assumes that select() won't ever return after 30 seconds when we
# asked it to timeout after 60 seconds. And of course like all
# time-based tests, it's slightly non-deterministic. If the OS doesn't
# schedule this process for 30 seconds, then the test might fail even
# if callFromThread is working.
self.assertTrue(after - before < 30)
def test_callFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from another thread is run in the reactor thread.
"""
reactor = self.buildReactor()
result = []
def threadCall():
result.append(threading.currentThread())
reactor.stop()
reactor.callLater(0, reactor.callInThread,
reactor.callFromThread, threadCall)
self.runReactor(reactor, 5)
self.assertEqual(result, [threading.currentThread()])
def test_stopThreadPool(self):
"""
When the reactor stops, L{ReactorBase._stopThreadPool} drops the
reactor's direct reference to its internal threadpool and removes
the associated startup and shutdown triggers.
This is the case of the thread pool being created before the reactor
is run.
"""
reactor = self.buildReactor()
threadpool = ref(reactor.getThreadPool())
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
gc.collect()
self.assertIs(threadpool(), None)
def test_stopThreadPoolWhenStartedAfterReactorRan(self):
"""
We must handle the case of shutting down the thread pool when it was
started after the reactor was run in a special way.
Some implementation background: The thread pool is started with
callWhenRunning, which only returns a system trigger ID when it is
invoked before the reactor is started.
This is the case of the thread pool being created after the reactor
is started.
"""
reactor = self.buildReactor()
threadPoolRefs = []
def acquireThreadPool():
threadPoolRefs.append(ref(reactor.getThreadPool()))
reactor.stop()
reactor.callWhenRunning(acquireThreadPool)
self.runReactor(reactor)
gc.collect()
self.assertIs(threadPoolRefs[0](), None)
def test_cleanUpThreadPoolEvenBeforeReactorIsRun(self):
"""
When the reactor has its shutdown event fired before it is run, the
thread pool is completely destroyed.
For what it's worth, the reason we support this behavior at all is
because Trial does this.
This is the case of the thread pool being created without the reactor
being started at al.
"""
reactor = self.buildReactor()
threadPoolRef = ref(reactor.getThreadPool())
reactor.fireSystemEvent("shutdown")
gc.collect()
self.assertIs(threadPoolRef(), None)
def test_isInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{True} if it is
called in the thread the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.stop()
reactor.callWhenRunning(check)
self.runReactor(reactor)
self.assertEqual([True], results)
def test_isNotInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{False} if it is
called in a different thread than the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.callFromThread(reactor.stop)
reactor.callInThread(check)
self.runReactor(reactor)
self.assertEqual([False], results)
globals().update(ThreadTestsBuilder.makeTestCaseClasses())
| mit | -1,550,284,574,760,127,000 | 2,859,887,216,784,146,000 | 35.286364 | 79 | 0.651384 | false |
CuonDeveloper/cuon | cuon_client/Client/CUON/cuon/Web2/SingleWeb2.py | 3 | 1723 | # -*- coding: utf-8 -*-
##Copyright (C) [2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
import pygtk
pygtk.require('2.0')
import gtk
#import gtk.glade
import gobject
class SingleWeb2(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "web2"
self.xmlTableDef = 0
# self.loadTable()
# self.saveTable()
self.loadTable(allTables)
self.setStore( gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT) )
self.listHeader['names'] = ['title', 'designation', 'ID']
self.listHeader['size'] = [25,10,25,25,10]
print "number of Columns "
print len(self.table.Columns)
#
self.cType = 'html'
def readNonWidgetEntries(self, dicValues):
print 'readNonWidgetEntries(self) by SingleWeb2'
dicValues['ctype'] = [self.cType, 'string']
return dicValues | gpl-3.0 | -1,720,694,092,110,636,000 | -5,553,102,172,206,081,000 | 34.875 | 126 | 0.687391 | false |
AdamWill/blivet | blivet/populator/helpers/devicepopulator.py | 6 | 2082 | # populator/helpers/devicepopulator.py
# Base class for device-type-specific helpers for populating a DeviceTree.
#
# Copyright (C) 2009-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU Lesser General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY expressed or implied, including the implied
# warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU Lesser General Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with this
# program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks
# that are incorporated in the source code or documentation are not subject
# to the GNU Lesser General Public License and may only be used or
# replicated with the express permission of Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <[email protected]>
#
from .populatorhelper import PopulatorHelper
from ... import udev
# pylint: disable=abstract-method
class DevicePopulator(PopulatorHelper):
""" Populator helper base class for devices.
Subclasses must define a match method and, if they want to instantiate
a device, a run method.
"""
@classmethod
def match(cls, data):
return False
def _handle_rename(self):
name = udev.device_get_name(self.data)
if self.device.name != name:
self.device.name = name
# TODO: update name registry -- better yet, generate the name list on demand
def _handle_resize(self):
old_size = self.device.current_size
self.device.update_size()
if old_size != self.device.current_size:
self._devicetree.cancel_disk_actions(self.device.disks)
def update(self):
self._handle_rename()
self._handle_resize()
| lgpl-2.1 | -1,546,580,523,549,254,700 | 5,579,563,148,287,075,000 | 39.038462 | 84 | 0.717099 | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/CherryPy-3.8.0/cherrypy/tutorial/tut03_get_and_post.py | 22 | 1719 | """
Tutorial - Passing variables
This tutorial shows you how to pass GET/POST variables to methods.
"""
import cherrypy
class WelcomePage:
def index(self):
# Ask for the user's name.
return '''
<form action="greetUser" method="GET">
What is your name?
<input type="text" name="name" />
<input type="submit" />
</form>'''
index.exposed = True
def greetUser(self, name=None):
# CherryPy passes all GET and POST variables as method parameters.
# It doesn't make a difference where the variables come from, how
# large their contents are, and so on.
#
# You can define default parameter values as usual. In this
# example, the "name" parameter defaults to None so we can check
# if a name was actually specified.
if name:
# Greet the user!
return "Hey %s, what's up?" % name
else:
if name is None:
# No name was specified
return 'Please enter your name <a href="./">here</a>.'
else:
return 'No, really, enter your name <a href="./">here</a>.'
greetUser.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(WelcomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(WelcomePage(), config=tutconf)
| mit | 4,429,220,806,590,909,000 | -4,113,467,734,930,341,400 | 31.433962 | 75 | 0.596859 | false |
oscaro/django | django/utils/feedgenerator.py | 78 | 16377 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" %
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause | -8,376,584,808,451,384,000 | -5,411,937,145,992,665,000 | 38.653753 | 120 | 0.598767 | false |
Crypt0s/Ramen | fs_libs/ftputil/build/lib/ftputil/path.py | 2 | 7681 | # Copyright (C) 2003-2013, Stefan Schwarzer <[email protected]>
# See the file LICENSE for licensing terms.
"""
ftputil.path - simulate `os.path` for FTP servers
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import posixpath
import stat
import ftputil.compat
import ftputil.error
import ftputil.tool
# The `_Path` class shouldn't be used directly by clients of the
# ftputil library.
__all__ = []
class _Path(object):
"""
Support class resembling `os.path`, accessible from the `FTPHost`
object, e. g. as `FTPHost().path.abspath(path)`.
Hint: substitute `os` with the `FTPHost` object.
"""
# `_Path` needs to provide all methods of `os.path`.
# pylint: disable=too-many-instance-attributes
def __init__(self, host):
self._host = host
# Delegate these to the `posixpath` module.
# pylint: disable=invalid-name
pp = posixpath
self.dirname = pp.dirname
self.basename = pp.basename
self.isabs = pp.isabs
self.commonprefix = pp.commonprefix
self.split = pp.split
self.splitdrive = pp.splitdrive
self.splitext = pp.splitext
self.normcase = pp.normcase
self.normpath = pp.normpath
def abspath(self, path):
"""Return an absolute path."""
original_path = path
path = ftputil.tool.as_unicode(path)
if not self.isabs(path):
path = self.join(self._host.getcwd(), path)
return ftputil.tool.same_string_type_as(original_path,
self.normpath(path))
def exists(self, path):
"""Return true if the path exists."""
try:
lstat_result = self._host.lstat(
path, _exception_for_missing_path=False)
return lstat_result is not None
except ftputil.error.RootDirError:
return True
def getmtime(self, path):
"""
Return the timestamp for the last modification for `path`
as a float.
This will raise `PermanentError` if the path doesn't exist,
but maybe other exceptions depending on the state of the
server (e. g. timeout).
"""
return self._host.stat(path).st_mtime
def getsize(self, path):
"""
Return the size of the `path` item as an integer.
This will raise `PermanentError` if the path doesn't exist,
but maybe raise other exceptions depending on the state of the
server (e. g. timeout).
"""
return self._host.stat(path).st_size
@staticmethod
def join(*paths):
"""
Join the path component from `paths` and return the joined
path.
All of these paths must be either unicode strings or byte
strings. If not, `join` raises a `TypeError`.
"""
# These checks are implicitly done by Python 3, but not by
# Python 2.
all_paths_are_unicode = all(
(isinstance(path, ftputil.compat.unicode_type)
for path in paths))
all_paths_are_bytes = all(
(isinstance(path, ftputil.compat.bytes_type)
for path in paths))
if all_paths_are_unicode or all_paths_are_bytes:
return posixpath.join(*paths)
else:
# Python 3 raises this exception for mixed strings
# in `os.path.join`, so also use this exception.
raise TypeError(
"can't mix unicode strings and bytes in path components")
# Check whether a path is a regular file/dir/link. For the first
# two cases follow links (like in `os.path`).
#
# Implementation note: The previous implementations simply called
# `stat` or `lstat` and returned `False` if they ended with
# raising a `PermanentError`. That exception usually used to
# signal a missing path. This approach has the problem, however,
# that exceptions caused by code earlier in `lstat` are obscured
# by the exception handling in `isfile`, `isdir` and `islink`.
def isfile(self, path):
"""
Return true if the `path` exists and corresponds to a regular
file (no link).
A non-existing path does _not_ cause a `PermanentError`.
"""
path = ftputil.tool.as_unicode(path)
# Workaround if we can't go up from the current directory
if path == self._host.getcwd():
return False
try:
stat_result = self._host.stat(
path, _exception_for_missing_path=False)
if stat_result is None:
return False
else:
return stat.S_ISREG(stat_result.st_mode)
except ftputil.error.RootDirError:
return False
def isdir(self, path):
"""
Return true if the `path` exists and corresponds to a
directory (no link).
A non-existing path does _not_ cause a `PermanentError`.
"""
path = ftputil.tool.as_unicode(path)
# Workaround if we can't go up from the current directory
if path == self._host.getcwd():
return True
try:
stat_result = self._host.stat(
path, _exception_for_missing_path=False)
if stat_result is None:
return False
else:
return stat.S_ISDIR(stat_result.st_mode)
except ftputil.error.RootDirError:
return True
def islink(self, path):
"""
Return true if the `path` exists and is a link.
A non-existing path does _not_ cause a `PermanentError`.
"""
path = ftputil.tool.as_unicode(path)
try:
lstat_result = self._host.lstat(
path, _exception_for_missing_path=False)
if lstat_result is None:
return False
else:
return stat.S_ISLNK(lstat_result.st_mode)
except ftputil.error.RootDirError:
return False
def walk(self, top, func, arg):
"""
Directory tree walk with callback function.
For each directory in the directory tree rooted at top
(including top itself, but excluding "." and ".."), call
func(arg, dirname, fnames). dirname is the name of the
directory, and fnames a list of the names of the files and
subdirectories in dirname (excluding "." and ".."). func may
modify the fnames list in-place (e.g. via del or slice
assignment), and walk will only recurse into the
subdirectories whose names remain in fnames; this can be used
to implement a filter, or to impose a specific order of
visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used,
e.g., to pass a filename pattern, or a mutable object designed
to accumulate statistics. Passing None for arg is common.
"""
top = ftputil.tool.as_unicode(top)
# This code (and the above documentation) is taken from
# `posixpath.py`, with slight modifications.
try:
names = self._host.listdir(top)
except OSError:
return
func(arg, top, names)
for name in names:
name = self.join(top, name)
try:
stat_result = self._host.lstat(name)
except OSError:
continue
if stat.S_ISDIR(stat_result[stat.ST_MODE]):
self.walk(name, func, arg)
| gpl-3.0 | -6,743,622,633,167,957,000 | 7,263,660,858,929,871,000 | 34.560185 | 77 | 0.586382 | false |
netscaler/neutron | neutron/tests/unit/linuxbridge/test_defaults.py | 2 | 1671 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.plugins.linuxbridge.common import config # noqa
from neutron.tests import base
class ConfigurationTest(base.BaseTestCase):
def test_defaults(self):
self.assertEqual(2,
cfg.CONF.AGENT.polling_interval)
self.assertEqual(True,
cfg.CONF.AGENT.rpc_support_old_agents)
self.assertEqual('sudo',
cfg.CONF.AGENT.root_helper)
self.assertEqual('local',
cfg.CONF.VLANS.tenant_network_type)
self.assertEqual(0,
len(cfg.CONF.VLANS.network_vlan_ranges))
self.assertEqual(0,
len(cfg.CONF.LINUX_BRIDGE.
physical_interface_mappings))
self.assertEqual(False, cfg.CONF.VXLAN.enable_vxlan)
self.assertEqual(config.DEFAULT_VXLAN_GROUP,
cfg.CONF.VXLAN.vxlan_group)
self.assertEqual(0, len(cfg.CONF.VXLAN.local_ip))
self.assertEqual(False, cfg.CONF.VXLAN.l2_population)
| apache-2.0 | 7,326,144,855,874,526,000 | -7,022,287,046,107,612,000 | 38.785714 | 69 | 0.648713 | false |
weidnerm/pi-ws2812 | version.py | 10 | 2918 | #
# SConstruct
#
# Copyright (c) 2016 Jeremy Garff <jer @ jers.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import SCons, os
def version_flags(env):
if not env['V']:
env['VERSIONCOMSTR'] = 'Version ${TARGET}'
def version_builders(env):
def generate_version_header(target, source, env):
headername = os.path.basename(target[0].abspath)
headerdef = headername.replace('.', '_').replace('-', '_').upper()
try:
version = open(source[0].abspath, 'r').readline().strip().split('.')
except:
version = [ '0', '0', '0' ]
f = open(headername, 'w')
f.write('/* Auto Generated Header built by version.py - DO NOT MODIFY */\n')
f.write('\n')
f.write('#ifndef __%s__\n' % (headerdef))
f.write('#define __%s__\n' % (headerdef))
f.write('\n')
f.write('#define VERSION_MAJOR %s\n' % version[0])
f.write('#define VERSION_MINOR %s\n' % version[1])
f.write('#define VERSION_MICRO %s\n' % version[2])
f.write('\n')
f.write('#endif /* __%s__ */\n' % (headerdef))
f.close()
env.Append(BUILDERS = {
'Version' : SCons.Builder.Builder(
action = SCons.Action.Action(generate_version_header, '${VERSIONCOMSTR}'),
suffix = '.h',
),
})
def exists(env):
return 1
def generate(env, **kwargs):
[f(env) for f in (version_flags, version_builders)]
| bsd-2-clause | -3,696,392,388,306,374,700 | 8,037,434,838,352,587,000 | 40.098592 | 99 | 0.659698 | false |
xupit3r/askpgh | askbot/middleware/locale.py | 13 | 1027 | "Taken from django.middleware.locale: this is the locale selecting middleware that will look at accept headers"
from django.utils.cache import patch_vary_headers
from django.utils import translation
from askbot.conf import settings
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
def process_request(self, request):
language = settings.ASKBOT_LANGUAGE
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = translation.get_language()
#translation.deactivate()
return response
| gpl-3.0 | -8,204,267,663,317,143,000 | 6,145,357,824,383,059,000 | 38.5 | 111 | 0.721519 | false |
beomyeol/models | street/python/errorcounter_test.py | 22 | 4913 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for errorcounter."""
import tensorflow as tf
import errorcounter as ec
class ErrorcounterTest(tf.test.TestCase):
def testComputeErrorRate(self):
"""Tests that the percent calculation works as expected.
"""
rate = ec.ComputeErrorRate(error_count=0, truth_count=0)
self.assertEqual(rate, 100.0)
rate = ec.ComputeErrorRate(error_count=1, truth_count=0)
self.assertEqual(rate, 100.0)
rate = ec.ComputeErrorRate(error_count=10, truth_count=1)
self.assertEqual(rate, 100.0)
rate = ec.ComputeErrorRate(error_count=0, truth_count=1)
self.assertEqual(rate, 0.0)
rate = ec.ComputeErrorRate(error_count=3, truth_count=12)
self.assertEqual(rate, 25.0)
def testCountErrors(self):
"""Tests that the error counter works as expected.
"""
truth_str = 'farm barn'
counts = ec.CountErrors(ocr_text=truth_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=0, truth_count=9, test_count=9))
# With a period on the end, we get a char error.
dot_str = 'farm barn.'
counts = ec.CountErrors(ocr_text=dot_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=1, truth_count=9, test_count=10))
counts = ec.CountErrors(ocr_text=truth_str, truth_text=dot_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=0, truth_count=10, test_count=9))
# Space is just another char.
no_space = 'farmbarn'
counts = ec.CountErrors(ocr_text=no_space, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=0, truth_count=9, test_count=8))
counts = ec.CountErrors(ocr_text=truth_str, truth_text=no_space)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=1, truth_count=8, test_count=9))
# Lose them all.
counts = ec.CountErrors(ocr_text='', truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=9, fp=0, truth_count=9, test_count=0))
counts = ec.CountErrors(ocr_text=truth_str, truth_text='')
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=9, truth_count=0, test_count=9))
def testCountWordErrors(self):
"""Tests that the error counter works as expected.
"""
truth_str = 'farm barn'
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=0, truth_count=2, test_count=2))
# With a period on the end, we get a word error.
dot_str = 'farm barn.'
counts = ec.CountWordErrors(ocr_text=dot_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=1, truth_count=2, test_count=2))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=dot_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=1, truth_count=2, test_count=2))
# Space is special.
no_space = 'farmbarn'
counts = ec.CountWordErrors(ocr_text=no_space, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=2, fp=1, truth_count=2, test_count=1))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=no_space)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=2, truth_count=1, test_count=2))
# Lose them all.
counts = ec.CountWordErrors(ocr_text='', truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=2, fp=0, truth_count=2, test_count=0))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text='')
self.assertEqual(
counts, ec.ErrorCounts(
fn=0, fp=2, truth_count=0, test_count=2))
# With a space in ba rn, there is an extra add.
sp_str = 'farm ba rn'
counts = ec.CountWordErrors(ocr_text=sp_str, truth_text=truth_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=1, fp=2, truth_count=2, test_count=3))
counts = ec.CountWordErrors(ocr_text=truth_str, truth_text=sp_str)
self.assertEqual(
counts, ec.ErrorCounts(
fn=2, fp=1, truth_count=3, test_count=2))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 5,601,480,852,090,105,000 | 6,612,479,931,778,479,000 | 38.620968 | 80 | 0.642581 | false |
tensorflow/datasets | tensorflow_datasets/text/tiny_shakespeare_test.py | 1 | 1291 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tiny Shakespeare dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import tiny_shakespeare
class TinyShakespeareTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = tiny_shakespeare.TinyShakespeare
SPLITS = {
"train": 1,
"validation": 1,
"test": 1,
}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {'some_key': 'output_file1.txt', ...}
if __name__ == "__main__":
testing.test_main()
| apache-2.0 | 3,335,802,302,169,134,600 | 8,710,184,503,952,274,000 | 32.973684 | 74 | 0.719597 | false |
aboyett/blockdiag | src/blockdiag/plugins/autoclass.py | 1 | 1130 | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from blockdiag import plugins
class AutoClass(plugins.NodeHandler):
def on_created(self, node):
if node.id is None:
return
for name, klass in self.diagram.classes.items():
pattern = "_%s$" % re.escape(name)
if re.search(pattern, node.id):
node.label = re.sub(pattern, '', node.id)
node.set_attributes(klass.attrs)
def setup(self, diagram, **kwargs):
plugins.install_node_handler(AutoClass(diagram, **kwargs))
| apache-2.0 | 1,666,069,896,858,931,000 | -6,582,155,910,155,898,000 | 32.235294 | 75 | 0.676106 | false |
thelac/crazyflie | win32install/generate_nsis.py | 18 | 1224 | import jinja2
import os
from subprocess import Popen, PIPE
DIST_PATH = "..\dist"
# Get list of files and directory to install/uninstall
INSTALL_FILES = []
INSTALL_DIRS = []
os.chdir(os.path.join(os.path.dirname(__file__), DIST_PATH))
for root, dirs, files in os.walk("."):
for f in files:
INSTALL_FILES += [os.path.join(root[2:], f)]
INSTALL_DIRS += [root[2:]]
print "Found {} files in {} folders to install.".format(len(INSTALL_FILES),
len(INSTALL_DIRS))
# Get git tag or VERSION
try:
process = Popen(["git", "describe", "--tags"], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
except OSError:
raise Exception("Cannot run git: Git is required to generate installer!")
VERSION = output.strip()
print "Cfclient version {}".format(VERSION)
os.chdir(os.path.dirname(__file__))
with open("cfclient.nsi.tmpl", "r") as template_file:
TEMPLATE = template_file.read()
TMPL = jinja2.Template(TEMPLATE)
with open("cfclient.nsi", "w") as out_file:
out_file.write(TMPL.render(files=INSTALL_FILES,
dirs=INSTALL_DIRS,
version=VERSION))
| gpl-2.0 | -8,954,391,235,543,288,000 | -3,788,999,584,051,742,700 | 27.465116 | 77 | 0.614379 | false |
waynenilsen/statsmodels | statsmodels/examples/ex_kde_confint.py | 34 | 1973 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 11:02:59 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.nonparametric.api as npar
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.distributions.mixture_rvs import mixture_rvs
# example from test_kde.py mixture of two normal distributions
np.random.seed(12345)
x = mixture_rvs([.25,.75], size=200, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1, scale=.5),dict(loc=1, scale=.5)))
x.sort() # not needed
kde = npar.KDEUnivariate(x)
kde.fit('gau')
ci = kde.kernel.density_confint(kde.density, len(x))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist(x, bins=15, normed=True, alpha=0.25)
ax.plot(kde.support, kde.density, lw=2, color='red')
ax.fill_between(kde.support, ci[:,0], ci[:,1],
color='grey', alpha='0.7')
ax.set_title('Kernel Density Gaussian (bw = %4.2f)' % kde.bw)
# use all kernels directly
x_grid = np.linspace(np.min(x), np.max(x), 51)
x_grid = np.linspace(-3, 3, 51)
kernel_names = ['Biweight', 'Cosine', 'Epanechnikov', 'Gaussian',
'Triangular', 'Triweight', #'Uniform',
]
fig = plt.figure()
for ii, kn in enumerate(kernel_names):
ax = fig.add_subplot(2, 3, ii+1) # without uniform
ax.hist(x, bins=10, normed=True, alpha=0.25)
#reduce bandwidth for Gaussian and Uniform which are to large in example
if kn in ['Gaussian', 'Uniform']:
args = (0.5,)
else:
args = ()
kernel = getattr(kernels, kn)(*args)
kde_grid = [kernel.density(x, xi) for xi in x_grid]
confint_grid = kernel.density_confint(kde_grid, len(x))
ax.plot(x_grid, kde_grid, lw=2, color='red', label=kn)
ax.fill_between(x_grid, confint_grid[:,0], confint_grid[:,1],
color='grey', alpha='0.7')
ax.legend(loc='upper left')
plt.show()
| bsd-3-clause | -7,125,313,473,795,277,000 | 7,017,287,100,322,657,000 | 29.353846 | 76 | 0.641662 | false |
uni2u/neutron | neutron/tests/unit/test_db_migration.py | 14 | 8272 | # Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from neutron.db import migration
from neutron.db.migration import cli
from neutron.tests import base
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.mock_alembic_err = mock.patch('alembic.util.err').start()
self.mock_alembic_err.side_effect = SystemExit
def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}):
with mock.patch.object(sys, 'argv', argv):
cli.main()
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
('foo',),
{'sql': False}
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
('foo',),
{'sql': True}
)
def test_current(self):
self._main_test_helper(['prog', 'current'], 'current')
def test_history(self):
self._main_test_helper(['prog', 'history'], 'history')
def test_check_migration(self):
with mock.patch.object(cli, 'validate_head_file') as validate:
self._main_test_helper(['prog', 'check_migration'], 'branches')
validate.assert_called_once_with(mock.ANY)
def test_database_sync_revision(self):
with mock.patch.object(cli, 'update_head_file') as update:
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': False, 'autogenerate': True}
)
update.assert_called_once_with(mock.ANY)
update.reset_mock()
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': True, 'autogenerate': False}
)
update.assert_called_once_with(mock.ANY)
def test_upgrade(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
('head',),
{'sql': True}
)
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
('+3',),
{'sql': False}
)
def test_downgrade(self):
self._main_test_helper(
['prog', 'downgrade', '--sql', 'folsom'],
'downgrade',
('folsom',),
{'sql': True}
)
self._main_test_helper(
['prog', 'downgrade', '--delta', '2'],
'downgrade',
('-2',),
{'sql': False}
)
def _test_validate_head_file_helper(self, heads, file_content=None):
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = heads
fc.return_value.get_current_head.return_value = heads[0]
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = file_content
with mock.patch('os.path.isfile') as is_file:
is_file.return_value = file_content is not None
if file_content in heads:
cli.validate_head_file(mock.sentinel.config)
else:
self.assertRaises(
SystemExit,
cli.validate_head_file,
mock.sentinel.config
)
self.mock_alembic_err.assert_called_once_with(mock.ANY)
fc.assert_called_once_with(mock.sentinel.config)
def test_validate_head_file_multiple_heads(self):
self._test_validate_head_file_helper(['a', 'b'])
def test_validate_head_file_missing_file(self):
self._test_validate_head_file_helper(['a'])
def test_validate_head_file_wrong_contents(self):
self._test_validate_head_file_helper(['a'], 'b')
def test_validate_head_success(self):
self._test_validate_head_file_helper(['a'], 'a')
def test_update_head_file_multiple_heads(self):
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = ['a', 'b']
self.assertRaises(
SystemExit,
cli.update_head_file,
mock.sentinel.config
)
self.mock_alembic_err.assert_called_once_with(mock.ANY)
fc.assert_called_once_with(mock.sentinel.config)
def test_update_head_file_success(self):
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = ['a']
fc.return_value.get_current_head.return_value = 'a'
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
cli.update_head_file(mock.sentinel.config)
mock_open.return_value.write.assert_called_once_with('a')
fc.assert_called_once_with(mock.sentinel.config)
| apache-2.0 | -3,373,337,318,010,535,400 | 4,418,048,228,824,704,500 | 37.296296 | 79 | 0.570841 | false |
da-anda/xbmc | lib/libUPnP/Platinum/Build/Tools/SCons/gcc-generic.py | 283 | 1317 | import os
def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None, gcc_extra_options=''):
if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning']
### compiler flags
if gcc_strict:
env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long'])
env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations'])
else:
env.AppendUnique(CCFLAGS = ['-Wall'])
compiler_defines = ['-D_REENTRANT']
env.AppendUnique(CCFLAGS = compiler_defines)
env.AppendUnique(CPPFLAGS = compiler_defines)
if env['build_config'] == 'Debug':
env.AppendUnique(CCFLAGS = '-g')
else:
env.AppendUnique(CCFLAGS = '-O3')
if gcc_stop_on_warning:
env.AppendUnique(CCFLAGS = ['-Werror'])
env['STRIP'] = 'strip'
if gcc_cross_prefix:
env['ENV']['PATH'] += os.environ['PATH']
env['AR'] = gcc_cross_prefix+'-ar'
env['RANLIB'] = gcc_cross_prefix+'-ranlib'
env['CC'] = gcc_cross_prefix+'-gcc ' + gcc_extra_options
env['CXX'] = gcc_cross_prefix+'-g++ ' + gcc_extra_options
env['LINK'] = gcc_cross_prefix+'-g++ ' + gcc_extra_options
env['STRIP'] = gcc_cross_prefix+'-strip'
| gpl-2.0 | -2,064,757,747,861,078,500 | 2,570,463,037,580,220,000 | 35.611111 | 106 | 0.580106 | false |
Pistachitos/Sick-Beard | lib/imdb/utils.py | 50 | 60601 | """
utils module (imdb package).
This module provides basic utilities for the imdb package.
Copyright 2004-2012 Davide Alberani <[email protected]>
2009 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import generators
import re
import string
import logging
from copy import copy, deepcopy
from time import strptime, strftime
from imdb import VERSION
from imdb import linguistics
from imdb._exceptions import IMDbParserError
# Logger for imdb.utils module.
_utils_logger = logging.getLogger('imdbpy.utils')
# The regular expression for the "long" year format of IMDb, like
# "(1998)" and "(1986/II)", where the optional roman number (that I call
# "imdbIndex" after the slash is used for movies with the same title
# and year of release.
# XXX: probably L, C, D and M are far too much! ;-)
re_year_index = re.compile(r'\(([0-9\?]{4}(/[IVXLCDM]+)?)\)')
re_extended_year_index = re.compile(r'\((TV episode|TV Series|TV mini-series|TV|Video|Video Game)? ?((?:[0-9\?]{4})(?:-[0-9\?]{4})?)(?:/([IVXLCDM]+)?)?\)')
re_remove_kind = re.compile(r'\((TV episode|TV Series|TV mini-series|TV|Video|Video Game)? ?')
# Match only the imdbIndex (for name strings).
re_index = re.compile(r'^\(([IVXLCDM]+)\)$')
# Match things inside parentheses.
re_parentheses = re.compile(r'(\(.*\))')
# Match the number of episodes.
re_episodes = re.compile('\s?\((\d+) episodes\)', re.I)
re_episode_info = re.compile(r'{\s*(.+?)?\s?(\([0-9\?]{4}-[0-9\?]{1,2}-[0-9\?]{1,2}\))?\s?(\(#[0-9]+\.[0-9]+\))?}')
# Common suffixes in surnames.
_sname_suffixes = ('de', 'la', 'der', 'den', 'del', 'y', 'da', 'van',
'e', 'von', 'the', 'di', 'du', 'el', 'al')
def canonicalName(name):
"""Return the given name in canonical "Surname, Name" format.
It assumes that name is in the 'Name Surname' format."""
# XXX: some statistics (as of 17 Apr 2008, over 2288622 names):
# - just a surname: 69476
# - single surname, single name: 2209656
# - composed surname, composed name: 9490
# - composed surname, single name: 67606
# (2: 59764, 3: 6862, 4: 728)
# - single surname, composed name: 242310
# (2: 229467, 3: 9901, 4: 2041, 5: 630)
# - Jr.: 8025
# Don't convert names already in the canonical format.
if name.find(', ') != -1: return name
if isinstance(name, unicode):
joiner = u'%s, %s'
sur_joiner = u'%s %s'
sur_space = u' %s'
space = u' '
else:
joiner = '%s, %s'
sur_joiner = '%s %s'
sur_space = ' %s'
space = ' '
sname = name.split(' ')
snl = len(sname)
if snl == 2:
# Just a name and a surname: how boring...
name = joiner % (sname[1], sname[0])
elif snl > 2:
lsname = [x.lower() for x in sname]
if snl == 3: _indexes = (0, snl-2)
else: _indexes = (0, snl-2, snl-3)
# Check for common surname prefixes at the beginning and near the end.
for index in _indexes:
if lsname[index] not in _sname_suffixes: continue
try:
# Build the surname.
surn = sur_joiner % (sname[index], sname[index+1])
del sname[index]
del sname[index]
try:
# Handle the "Jr." after the name.
if lsname[index+2].startswith('jr'):
surn += sur_space % sname[index]
del sname[index]
except (IndexError, ValueError):
pass
name = joiner % (surn, space.join(sname))
break
except ValueError:
continue
else:
name = joiner % (sname[-1], space.join(sname[:-1]))
return name
def normalizeName(name):
"""Return a name in the normal "Name Surname" format."""
if isinstance(name, unicode):
joiner = u'%s %s'
else:
joiner = '%s %s'
sname = name.split(', ')
if len(sname) == 2:
name = joiner % (sname[1], sname[0])
return name
def analyze_name(name, canonical=None):
"""Return a dictionary with the name and the optional imdbIndex
keys, from the given string.
If canonical is None (default), the name is stored in its own style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
raise an IMDbParserError exception if the name is not valid.
"""
original_n = name
name = name.strip()
res = {}
imdbIndex = ''
opi = name.rfind('(')
cpi = name.rfind(')')
# Strip notes (but not if the name starts with a parenthesis).
if opi not in (-1, 0) and cpi > opi:
if re_index.match(name[opi:cpi+1]):
imdbIndex = name[opi+1:cpi]
name = name[:opi].rstrip()
else:
# XXX: for the birth and death dates case like " (1926-2004)"
name = re_parentheses.sub('', name).strip()
if not name:
raise IMDbParserError('invalid name: "%s"' % original_n)
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
res['name'] = name
if imdbIndex:
res['imdbIndex'] = imdbIndex
return res
def build_name(name_dict, canonical=None):
"""Given a dictionary that represents a "long" IMDb name,
return a string.
If canonical is None (default), the name is returned in the stored style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
"""
name = name_dict.get('canonical name') or name_dict.get('name', '')
if not name: return ''
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
imdbIndex = name_dict.get('imdbIndex')
if imdbIndex:
name += ' (%s)' % imdbIndex
return name
# XXX: here only for backward compatibility. Find and remove any dependency.
_articles = linguistics.GENERIC_ARTICLES
_unicodeArticles = linguistics.toUnicode(_articles)
articlesDicts = linguistics.articlesDictsForLang(None)
spArticles = linguistics.spArticlesForLang(None)
def canonicalTitle(title, lang=None):
"""Return the title in the canonic format 'Movie Title, The';
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
articlesDicts = linguistics.articlesDictsForLang(lang)
try:
if title.split(', ')[-1].lower() in articlesDicts[isUnicode]:
return title
except IndexError:
pass
if isUnicode:
_format = u'%s, %s'
else:
_format = '%s, %s'
ltitle = title.lower()
spArticles = linguistics.spArticlesForLang(lang)
for article in spArticles[isUnicode]:
if ltitle.startswith(article):
lart = len(article)
title = _format % (title[lart:], title[:lart])
if article[-1] == ' ':
title = title[:-1]
break
## XXX: an attempt using a dictionary lookup.
##for artSeparator in (' ', "'", '-'):
## article = _articlesDict.get(ltitle.split(artSeparator)[0])
## if article is not None:
## lart = len(article)
## # check titles like "una", "I'm Mad" and "L'abbacchio".
## if title[lart:] == '' or (artSeparator != ' ' and
## title[lart:][1] != artSeparator): continue
## title = '%s, %s' % (title[lart:], title[:lart])
## if artSeparator == ' ': title = title[1:]
## break
return title
def normalizeTitle(title, lang=None):
"""Return the title in the normal "The Title" format;
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
stitle = title.split(', ')
articlesDicts = linguistics.articlesDictsForLang(lang)
if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]:
sep = ' '
if stitle[-1][-1] in ("'", '-'):
sep = ''
if isUnicode:
_format = u'%s%s%s'
_joiner = u', '
else:
_format = '%s%s%s'
_joiner = ', '
title = _format % (stitle[-1], sep, _joiner.join(stitle[:-1]))
return title
def _split_series_episode(title):
"""Return the series and the episode titles; if this is not a
series' episode, the returned series title is empty.
This function recognize two different styles:
"The Series" An Episode (2005)
"The Series" (2004) {An Episode (2005) (#season.episode)}"""
series_title = ''
episode_or_year = ''
if title[-1:] == '}':
# Title of the episode, as in the plain text data files.
begin_eps = title.rfind('{')
if begin_eps == -1: return '', ''
series_title = title[:begin_eps].rstrip()
# episode_or_year is returned with the {...}
episode_or_year = title[begin_eps:].strip()
if episode_or_year[:12] == '{SUSPENDED}}': return '', ''
# XXX: works only with tv series; it's still unclear whether
# IMDb will support episodes for tv mini series and tv movies...
elif title[0:1] == '"':
second_quot = title[1:].find('"') + 2
if second_quot != 1: # a second " was found.
episode_or_year = title[second_quot:].lstrip()
first_char = episode_or_year[0:1]
if not first_char: return '', ''
if first_char != '(':
# There is not a (year) but the title of the episode;
# that means this is an episode title, as returned by
# the web server.
series_title = title[:second_quot]
##elif episode_or_year[-1:] == '}':
## # Title of the episode, as in the plain text data files.
## begin_eps = episode_or_year.find('{')
## if begin_eps == -1: return series_title, episode_or_year
## series_title = title[:second_quot+begin_eps].rstrip()
## # episode_or_year is returned with the {...}
## episode_or_year = episode_or_year[begin_eps:]
return series_title, episode_or_year
def is_series_episode(title):
"""Return True if 'title' is an series episode."""
title = title.strip()
if _split_series_episode(title)[0]: return 1
return 0
def analyze_title(title, canonical=None, canonicalSeries=None,
canonicalEpisode=None, _emptyString=u''):
"""Analyze the given title and return a dictionary with the
"stripped" title, the kind of the show ("movie", "tv series", etc.),
the year of production and the optional imdbIndex (a roman number
used to distinguish between movies with the same title and year).
If canonical is None (default), the title is stored in its own style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
raise an IMDbParserError exception if the title is not valid.
"""
# XXX: introduce the 'lang' argument?
if canonical is not None:
canonicalSeries = canonicalEpisode = canonical
original_t = title
result = {}
title = title.strip()
year = _emptyString
kind = _emptyString
imdbIndex = _emptyString
series_title, episode_or_year = _split_series_episode(title)
if series_title:
# It's an episode of a series.
series_d = analyze_title(series_title, canonical=canonicalSeries)
oad = sen = ep_year = _emptyString
# Plain text data files format.
if episode_or_year[0:1] == '{' and episode_or_year[-1:] == '}':
match = re_episode_info.findall(episode_or_year)
if match:
# Episode title, original air date and #season.episode
episode_or_year, oad, sen = match[0]
episode_or_year = episode_or_year.strip()
if not oad:
# No year, but the title is something like (2005-04-12)
if episode_or_year and episode_or_year[0] == '(' and \
episode_or_year[-1:] == ')' and \
episode_or_year[1:2] != '#':
oad = episode_or_year
if oad[1:5] and oad[5:6] == '-':
try:
ep_year = int(oad[1:5])
except (TypeError, ValueError):
pass
if not oad and not sen and episode_or_year.startswith('(#'):
sen = episode_or_year
elif episode_or_year.startswith('Episode dated'):
oad = episode_or_year[14:]
if oad[-4:].isdigit():
try:
ep_year = int(oad[-4:])
except (TypeError, ValueError):
pass
episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode)
episode_d['kind'] = u'episode'
episode_d['episode of'] = series_d
if oad:
episode_d['original air date'] = oad[1:-1]
if ep_year and episode_d.get('year') is None:
episode_d['year'] = ep_year
if sen and sen[2:-1].find('.') != -1:
seas, epn = sen[2:-1].split('.')
if seas:
# Set season and episode.
try: seas = int(seas)
except: pass
try: epn = int(epn)
except: pass
episode_d['season'] = seas
if epn:
episode_d['episode'] = epn
return episode_d
# First of all, search for the kind of show.
# XXX: Number of entries at 17 Apr 2008:
# movie: 379,871
# episode: 483,832
# tv movie: 61,119
# tv series: 44,795
# video movie: 57,915
# tv mini series: 5,497
# video game: 5,490
# More up-to-date statistics: http://us.imdb.com/database_statistics
if title.endswith('(TV)'):
kind = u'tv movie'
title = title[:-4].rstrip()
elif title.endswith('(V)'):
kind = u'video movie'
title = title[:-3].rstrip()
elif title.endswith('(video)'):
kind = u'video movie'
title = title[:-7].rstrip()
elif title.endswith('(mini)'):
kind = u'tv mini series'
title = title[:-6].rstrip()
elif title.endswith('(VG)'):
kind = u'video game'
title = title[:-4].rstrip()
# Search for the year and the optional imdbIndex (a roman number).
yi = re_year_index.findall(title)
if not yi:
yi = re_extended_year_index.findall(title)
if yi:
yk, yiy, yii = yi[-1]
yi = [(yiy, yii)]
if yk == 'TV episode':
kind = u'episode'
elif yk == 'TV':
kind = u'tv movie'
elif yk == 'TV Series':
kind = u'tv series'
elif yk == 'Video':
kind = u'video movie'
elif yk == 'TV mini-series':
kind = u'tv mini series'
elif yk == 'Video Game':
kind = u'video game'
title = re_remove_kind.sub('(', title)
if yi:
last_yi = yi[-1]
year = last_yi[0]
if last_yi[1]:
imdbIndex = last_yi[1][1:]
year = year[:-len(imdbIndex)-1]
i = title.rfind('(%s)' % last_yi[0])
if i != -1:
title = title[:i-1].rstrip()
# This is a tv (mini) series: strip the '"' at the begin and at the end.
# XXX: strip('"') is not used for compatibility with Python 2.0.
if title and title[0] == title[-1] == '"':
if not kind:
kind = u'tv series'
title = title[1:-1].strip()
elif title.endswith('(TV series)'):
kind = u'tv series'
title = title[:-11].rstrip()
if not title:
raise IMDbParserError('invalid title: "%s"' % original_t)
if canonical is not None:
if canonical:
title = canonicalTitle(title)
else:
title = normalizeTitle(title)
# 'kind' is one in ('movie', 'episode', 'tv series', 'tv mini series',
# 'tv movie', 'video movie', 'video game')
result['title'] = title
result['kind'] = kind or u'movie'
if year and year != '????':
if '-' in year:
result['series years'] = year
year = year[:4]
try:
result['year'] = int(year)
except (TypeError, ValueError):
pass
if imdbIndex:
result['imdbIndex'] = imdbIndex
if isinstance(_emptyString, str):
result['kind'] = str(kind or 'movie')
return result
_web_format = '%d %B %Y'
_ptdf_format = '(%Y-%m-%d)'
def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
"""Convert a time expressed in the pain text data files, to
the 'Episode dated ...' format used on the web site; if
fromPTDFtoWEB is false, the inverted conversion is applied."""
try:
if fromPTDFtoWEB:
from_format = _ptdf_format
to_format = _web_format
else:
from_format = u'Episode dated %s' % _web_format
to_format = _ptdf_format
t = strptime(title, from_format)
title = strftime(to_format, t)
if fromPTDFtoWEB:
if title[0] == '0': title = title[1:]
title = u'Episode dated %s' % title
except ValueError:
pass
if isinstance(_emptyString, str):
try:
title = str(title)
except UnicodeDecodeError:
pass
return title
def build_title(title_dict, canonical=None, canonicalSeries=None,
canonicalEpisode=None, ptdf=0, lang=None, _doYear=1,
_emptyString=u''):
"""Given a dictionary that represents a "long" IMDb title,
return a string.
If canonical is None (default), the title is returned in the stored style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
lang can be used to specify the language of the title.
If ptdf is true, the plain text data files format is used.
"""
if canonical is not None:
canonicalSeries = canonical
pre_title = _emptyString
kind = title_dict.get('kind')
episode_of = title_dict.get('episode of')
if kind == 'episode' and episode_of is not None:
# Works with both Movie instances and plain dictionaries.
doYear = 0
if ptdf:
doYear = 1
pre_title = build_title(episode_of, canonical=canonicalSeries,
ptdf=0, _doYear=doYear,
_emptyString=_emptyString)
ep_dict = {'title': title_dict.get('title', ''),
'imdbIndex': title_dict.get('imdbIndex')}
ep_title = ep_dict['title']
if not ptdf:
doYear = 1
ep_dict['year'] = title_dict.get('year', '????')
if ep_title[0:1] == '(' and ep_title[-1:] == ')' and \
ep_title[1:5].isdigit():
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=1,
_emptyString=_emptyString)
else:
doYear = 0
if ep_title.startswith('Episode dated'):
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=0,
_emptyString=_emptyString)
episode_title = build_title(ep_dict,
canonical=canonicalEpisode, ptdf=ptdf,
_doYear=doYear, _emptyString=_emptyString)
if ptdf:
oad = title_dict.get('original air date', _emptyString)
if len(oad) == 10 and oad[4] == '-' and oad[7] == '-' and \
episode_title.find(oad) == -1:
episode_title += ' (%s)' % oad
seas = title_dict.get('season')
if seas is not None:
episode_title += ' (#%s' % seas
episode = title_dict.get('episode')
if episode is not None:
episode_title += '.%s' % episode
episode_title += ')'
episode_title = '{%s}' % episode_title
return '%s %s' % (pre_title, episode_title)
title = title_dict.get('title', '')
if not title: return _emptyString
if canonical is not None:
if canonical:
title = canonicalTitle(title, lang=lang)
else:
title = normalizeTitle(title, lang=lang)
if pre_title:
title = '%s %s' % (pre_title, title)
if kind in (u'tv series', u'tv mini series'):
title = '"%s"' % title
if _doYear:
imdbIndex = title_dict.get('imdbIndex')
year = title_dict.get('year') or u'????'
if isinstance(_emptyString, str):
year = str(year)
title += ' (%s' % year
if imdbIndex:
title += '/%s' % imdbIndex
title += ')'
if kind:
if kind == 'tv movie':
title += ' (TV)'
elif kind == 'video movie':
title += ' (V)'
elif kind == 'tv mini series':
title += ' (mini)'
elif kind == 'video game':
title += ' (VG)'
return title
def split_company_name_notes(name):
"""Return two strings, the first representing the company name,
and the other representing the (optional) notes."""
name = name.strip()
notes = u''
if name.endswith(')'):
fpidx = name.find('(')
if fpidx != -1:
notes = name[fpidx:]
name = name[:fpidx].rstrip()
return name, notes
def analyze_company_name(name, stripNotes=False):
"""Return a dictionary with the name and the optional 'country'
keys, from the given string.
If stripNotes is true, tries to not consider optional notes.
raise an IMDbParserError exception if the name is not valid.
"""
if stripNotes:
name = split_company_name_notes(name)[0]
o_name = name
name = name.strip()
country = None
if name.endswith(']'):
idx = name.rfind('[')
if idx != -1:
country = name[idx:]
name = name[:idx].rstrip()
if not name:
raise IMDbParserError('invalid name: "%s"' % o_name)
result = {'name': name}
if country:
result['country'] = country
return result
def build_company_name(name_dict, _emptyString=u''):
"""Given a dictionary that represents a "long" IMDb company name,
return a string.
"""
name = name_dict.get('name')
if not name:
return _emptyString
country = name_dict.get('country')
if country is not None:
name += ' %s' % country
return name
class _LastC:
"""Size matters."""
def __cmp__(self, other):
if isinstance(other, self.__class__): return 0
return 1
_last = _LastC()
def cmpMovies(m1, m2):
"""Compare two movies by year, in reverse order; the imdbIndex is checked
for movies with the same year of production and title."""
# Sort tv series' episodes.
m1e = m1.get('episode of')
m2e = m2.get('episode of')
if m1e is not None and m2e is not None:
cmp_series = cmpMovies(m1e, m2e)
if cmp_series != 0:
return cmp_series
m1s = m1.get('season')
m2s = m2.get('season')
if m1s is not None and m2s is not None:
if m1s < m2s:
return 1
elif m1s > m2s:
return -1
m1p = m1.get('episode')
m2p = m2.get('episode')
if m1p < m2p:
return 1
elif m1p > m2p:
return -1
try:
if m1e is None: m1y = int(m1.get('year', 0))
else: m1y = int(m1e.get('year', 0))
except ValueError:
m1y = 0
try:
if m2e is None: m2y = int(m2.get('year', 0))
else: m2y = int(m2e.get('year', 0))
except ValueError:
m2y = 0
if m1y > m2y: return -1
if m1y < m2y: return 1
# Ok, these movies have the same production year...
#m1t = m1.get('canonical title', _last)
#m2t = m2.get('canonical title', _last)
# It should works also with normal dictionaries (returned from searches).
#if m1t is _last and m2t is _last:
m1t = m1.get('title', _last)
m2t = m2.get('title', _last)
if m1t < m2t: return -1
if m1t > m2t: return 1
# Ok, these movies have the same title...
m1i = m1.get('imdbIndex', _last)
m2i = m2.get('imdbIndex', _last)
if m1i > m2i: return -1
if m1i < m2i: return 1
m1id = getattr(m1, 'movieID', None)
# Introduce this check even for other comparisons functions?
# XXX: is it safe to check without knowning the data access system?
# probably not a great idea. Check for 'kind', instead?
if m1id is not None:
m2id = getattr(m2, 'movieID', None)
if m1id > m2id: return -1
elif m1id < m2id: return 1
return 0
def cmpPeople(p1, p2):
"""Compare two people by billingPos, name and imdbIndex."""
p1b = getattr(p1, 'billingPos', None) or _last
p2b = getattr(p2, 'billingPos', None) or _last
if p1b > p2b: return 1
if p1b < p2b: return -1
p1n = p1.get('canonical name', _last)
p2n = p2.get('canonical name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('imdbIndex', _last)
p2i = p2.get('imdbIndex', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
def cmpCompanies(p1, p2):
"""Compare two companies."""
p1n = p1.get('long imdb name', _last)
p2n = p2.get('long imdb name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('country', _last)
p2i = p2.get('country', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
# References to titles, names and characters.
# XXX: find better regexp!
re_titleRef = re.compile(r'_(.+?(?: \([0-9\?]{4}(?:/[IVXLCDM]+)?\))?(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)_ \(qv\)')
# FIXME: doesn't match persons with ' in the name.
re_nameRef = re.compile(r"'([^']+?)' \(qv\)")
# XXX: good choice? Are there characters with # in the name?
re_characterRef = re.compile(r"#([^']+?)# \(qv\)")
# Functions used to filter the text strings.
def modNull(s, titlesRefs, namesRefs, charactersRefs):
"""Do nothing."""
return s
def modClearTitleRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles references."""
return re_titleRef.sub(r'\1', s)
def modClearNameRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove names references."""
return re_nameRef.sub(r'\1', s)
def modClearCharacterRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove characters references"""
return re_characterRef.sub(r'\1', s)
def modClearRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles, names and characters references."""
s = modClearTitleRefs(s, {}, {}, {})
s = modClearCharacterRefs(s, {}, {}, {})
return modClearNameRefs(s, {}, {}, {})
def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Modify a string (or string values in a dictionary or strings
in a list), using the provided modFunct function and titlesRefs
namesRefs and charactersRefs references dictionaries."""
# Notice that it doesn't go any deeper than the first two levels in a list.
if isinstance(o, (unicode, str)):
return modFunct(o, titlesRefs, namesRefs, charactersRefs)
elif isinstance(o, (list, tuple, dict)):
_stillorig = 1
if isinstance(o, (list, tuple)): keys = xrange(len(o))
else: keys = o.keys()
for i in keys:
v = o[i]
if isinstance(v, (unicode, str)):
if _stillorig:
o = copy(o)
_stillorig = 0
o[i] = modFunct(v, titlesRefs, namesRefs, charactersRefs)
elif isinstance(v, (list, tuple)):
modifyStrings(o[i], modFunct, titlesRefs, namesRefs,
charactersRefs)
return o
def date_and_notes(s):
"""Parse (birth|death) date and notes; returns a tuple in the
form (date, notes)."""
s = s.strip()
if not s: return (u'', u'')
notes = u''
if s[0].isdigit() or s.split()[0].lower() in ('c.', 'january', 'february',
'march', 'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november',
'december', 'ca.', 'circa',
'????,'):
i = s.find(',')
if i != -1:
notes = s[i+1:].strip()
s = s[:i]
else:
notes = s
s = u''
if s == '????': s = u''
return s, notes
class RolesList(list):
"""A list of Person or Character instances, used for the currentRole
property."""
def __unicode__(self):
return u' / '.join([unicode(x) for x in self])
def __str__(self):
# FIXME: does it make sense at all? Return a unicode doesn't
# seem right, in __str__.
return u' / '.join([unicode(x).encode('utf8') for x in self])
# Replace & with &, but only if it's not already part of a charref.
#_re_amp = re.compile(r'(&)(?!\w+;)', re.I)
#_re_amp = re.compile(r'(?<=\W)&(?=[^a-zA-Z0-9_#])')
_re_amp = re.compile(r'&(?![^a-zA-Z0-9_#]{1,5};)')
def escape4xml(value):
"""Escape some chars that can't be present in a XML value."""
if isinstance(value, int):
value = str(value)
value = _re_amp.sub('&', value)
value = value.replace('"', '"').replace("'", ''')
value = value.replace('<', '<').replace('>', '>')
if isinstance(value, unicode):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Return three lists - for movie titles, persons and characters names -
with two items tuples: the first item is the reference once escaped
by the user-provided modFunct function, the second is the same
reference un-escaped."""
mRefs = []
for refRe, refTemplate in [(re_titleRef, u'_%s_ (qv)'),
(re_nameRef, u"'%s' (qv)"),
(re_characterRef, u'#%s# (qv)')]:
theseRefs = []
for theRef in refRe.findall(value):
# refTemplate % theRef values don't change for a single
# _Container instance, so this is a good candidate for a
# cache or something - even if it's so rarely used that...
# Moreover, it can grow - ia.update(...) - and change if
# modFunct is modified.
goodValue = modFunct(refTemplate % theRef, titlesRefs, namesRefs,
charactersRefs)
# Prevents problems with crap in plain text data files.
# We should probably exclude invalid chars and string that
# are too long in the re_*Ref expressions.
if '_' in goodValue or len(goodValue) > 128:
continue
toReplace = escape4xml(goodValue)
# Only the 'value' portion is replaced.
replaceWith = goodValue.replace(theRef, escape4xml(theRef))
theseRefs.append((toReplace, replaceWith))
mRefs.append(theseRefs)
return mRefs
def _handleTextNotes(s):
"""Split text::notes strings."""
ssplit = s.split('::', 1)
if len(ssplit) == 1:
return s
return u'%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
namesRefs=None, charactersRefs=None):
"""Replace some chars that can't be present in a XML text."""
# XXX: use s.encode(encoding, 'xmlcharrefreplace') ? Probably not
# a great idea: after all, returning a unicode is safe.
if isinstance(value, (unicode, str)):
if not withRefs:
value = _handleTextNotes(escape4xml(value))
else:
# Replace references that were accidentally escaped.
replaceLists = _refsToReplace(value, modFunct, titlesRefs,
namesRefs, charactersRefs)
value = modFunct(value, titlesRefs or {}, namesRefs or {},
charactersRefs or {})
value = _handleTextNotes(escape4xml(value))
for replaceList in replaceLists:
for toReplace, replaceWith in replaceList:
value = value.replace(toReplace, replaceWith)
else:
value = unicode(value)
return value
def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
"""Build a tag for the given _Container instance;
both open and close tags are returned."""
tag = ton.__class__.__name__.lower()
what = 'name'
if tag == 'movie':
value = ton.get('long imdb title') or ton.get('title', '')
what = 'title'
else:
value = ton.get('long imdb name') or ton.get('name', '')
value = _normalizeValue(value)
extras = u''
crl = ton.currentRole
if crl:
if not isinstance(crl, list):
crl = [crl]
for cr in crl:
crTag = cr.__class__.__name__.lower()
crValue = cr['long imdb name']
crValue = _normalizeValue(crValue)
crID = cr.getID()
if crID is not None:
extras += u'<current-role><%s id="%s">' \
u'<name>%s</name></%s>' % (crTag, crID,
crValue, crTag)
else:
extras += u'<current-role><%s><name>%s</name></%s>' % \
(crTag, crValue, crTag)
if cr.notes:
extras += u'<notes>%s</notes>' % _normalizeValue(cr.notes)
extras += u'</current-role>'
theID = ton.getID()
if theID is not None:
beginTag = u'<%s id="%s"' % (tag, theID)
if addAccessSystem and ton.accessSystem:
beginTag += ' access-system="%s"' % ton.accessSystem
if not _containerOnly:
beginTag += u'><%s>%s</%s>' % (what, value, what)
else:
beginTag += u'>'
else:
if not _containerOnly:
beginTag = u'<%s><%s>%s</%s>' % (tag, what, value, what)
else:
beginTag = u'<%s>' % tag
beginTag += extras
if ton.notes:
beginTag += u'<notes>%s</notes>' % _normalizeValue(ton.notes)
return (beginTag, u'</%s>' % tag)
TAGS_TO_MODIFY = {
'movie.parents-guide': ('item', True),
'movie.number-of-votes': ('item', True),
'movie.soundtrack.item': ('item', True),
'movie.quotes': ('quote', False),
'movie.quotes.quote': ('line', False),
'movie.demographic': ('item', True),
'movie.episodes': ('season', True),
'movie.episodes.season': ('episode', True),
'person.merchandising-links': ('item', True),
'person.genres': ('item', True),
'person.quotes': ('quote', False),
'person.keywords': ('item', True),
'character.quotes': ('item', True),
'character.quotes.item': ('quote', False),
'character.quotes.item.quote': ('line', False)
}
_allchars = string.maketrans('', '')
_keepchars = _allchars.translate(_allchars, string.ascii_lowercase + '-' +
string.digits)
def _tagAttr(key, fullpath):
"""Return a tuple with a tag name and a (possibly empty) attribute,
applying the conversions specified in TAGS_TO_MODIFY and checking
that the tag is safe for a XML document."""
attrs = {}
_escapedKey = escape4xml(key)
if fullpath in TAGS_TO_MODIFY:
tagName, useTitle = TAGS_TO_MODIFY[fullpath]
if useTitle:
attrs['key'] = _escapedKey
elif not isinstance(key, unicode):
if isinstance(key, str):
tagName = unicode(key, 'ascii', 'ignore')
else:
strType = str(type(key)).replace("<type '", "").replace("'>", "")
attrs['keytype'] = strType
tagName = unicode(key)
else:
tagName = key
if isinstance(key, int):
attrs['keytype'] = 'int'
origTagName = tagName
tagName = tagName.lower().replace(' ', '-')
tagName = str(tagName).translate(_allchars, _keepchars)
if origTagName != tagName:
if 'key' not in attrs:
attrs['key'] = _escapedKey
if (not tagName) or tagName[0].isdigit() or tagName[0] == '-':
# This is a fail-safe: we should never be here, since unpredictable
# keys must be listed in TAGS_TO_MODIFY.
# This will proably break the DTD/schema, but at least it will
# produce a valid XML.
tagName = 'item'
_utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath))
attrs['key'] = _escapedKey
return tagName, u' '.join([u'%s="%s"' % i for i in attrs.items()])
def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
titlesRefs=None, namesRefs=None, charactersRefs=None,
_topLevel=True, key2infoset=None, fullpath=''):
"""Convert a sequence or a dictionary to a list of XML
unicode strings."""
if _l is None:
_l = []
if isinstance(seq, dict):
for key in seq:
value = seq[key]
if isinstance(key, _Container):
# Here we're assuming that a _Container is never a top-level
# key (otherwise we should handle key2infoset).
openTag, closeTag = _tag4TON(key)
# So that fullpath will contains something meaningful.
tagName = key.__class__.__name__.lower()
else:
tagName, attrs = _tagAttr(key, fullpath)
openTag = u'<%s' % tagName
if attrs:
openTag += ' %s' % attrs
if _topLevel and key2infoset and key in key2infoset:
openTag += u' infoset="%s"' % key2infoset[key]
if isinstance(value, int):
openTag += ' type="int"'
elif isinstance(value, float):
openTag += ' type="float"'
openTag += u'>'
closeTag = u'</%s>' % tagName
_l.append(openTag)
_seq2xml(value, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
elif isinstance(seq, (list, tuple)):
tagName, attrs = _tagAttr('item', fullpath)
beginTag = u'<%s' % tagName
if attrs:
beginTag += u' %s' % attrs
#beginTag += u'>'
closeTag = u'</%s>' % tagName
for item in seq:
if isinstance(item, _Container):
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath,
item.__class__.__name__.lower()))
else:
openTag = beginTag
if isinstance(item, int):
openTag += ' type="int"'
elif isinstance(item, float):
openTag += ' type="float"'
openTag += u'>'
_l.append(openTag)
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
else:
if isinstance(seq, _Container):
_l.extend(_tag4TON(seq))
else:
# Text, ints, floats and the like.
_l.append(_normalizeValue(seq, withRefs=withRefs,
modFunct=modFunct,
titlesRefs=titlesRefs,
namesRefs=namesRefs,
charactersRefs=charactersRefs))
return _l
_xmlHead = u"""<?xml version="1.0"?>
<!DOCTYPE %s SYSTEM "http://imdbpy.sf.net/dtd/imdbpy{VERSION}.dtd">
"""
_xmlHead = _xmlHead.replace('{VERSION}',
VERSION.replace('.', '').split('dev')[0][:2])
class _Container(object):
"""Base class for Movie, Person, Character and Company classes."""
# The default sets of information retrieved.
default_info = ()
# Aliases for some not-so-intuitive keys.
keys_alias = {}
# List of keys to modify.
keys_tomodify_list = ()
# Function used to compare two instances of this class.
cmpFunct = None
# Regular expression used to build the 'full-size (headshot|cover url)'.
_re_fullsizeURL = re.compile(r'\._V1\._SX(\d+)_SY(\d+)_')
def __init__(self, myID=None, data=None, notes=u'',
currentRole=u'', roleID=None, roleIsPerson=False,
accessSystem=None, titlesRefs=None, namesRefs=None,
charactersRefs=None, modFunct=None, *args, **kwds):
"""Initialize a Movie, Person, Character or Company object.
*myID* -- your personal identifier for this object.
*data* -- a dictionary used to initialize the object.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)' or the alias used in the
movie credits.
*accessSystem* -- a string representing the data access system used.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
self.reset()
self.accessSystem = accessSystem
self.myID = myID
if data is None: data = {}
self.set_data(data, override=1)
self.notes = notes
if titlesRefs is None: titlesRefs = {}
self.update_titlesRefs(titlesRefs)
if namesRefs is None: namesRefs = {}
self.update_namesRefs(namesRefs)
if charactersRefs is None: charactersRefs = {}
self.update_charactersRefs(charactersRefs)
self.set_mod_funct(modFunct)
self.keys_tomodify = {}
for item in self.keys_tomodify_list:
self.keys_tomodify[item] = None
self._roleIsPerson = roleIsPerson
if not roleIsPerson:
from imdb.Character import Character
self._roleClass = Character
else:
from imdb.Person import Person
self._roleClass = Person
self.currentRole = currentRole
if roleID:
self.roleID = roleID
self._init(*args, **kwds)
def _get_roleID(self):
"""Return the characterID or personID of the currentRole object."""
if not self.__role:
return None
if isinstance(self.__role, list):
return [x.getID() for x in self.__role]
return self.currentRole.getID()
def _set_roleID(self, roleID):
"""Set the characterID or personID of the currentRole object."""
if not self.__role:
# XXX: needed? Just ignore it? It's probably safer to
# ignore it, to prevent some bugs in the parsers.
#raise IMDbError,"Can't set ID of an empty Character/Person object."
pass
if not self._roleIsPerson:
if not isinstance(roleID, (list, tuple)):
self.currentRole.characterID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].characterID = item
else:
if not isinstance(roleID, (list, tuple)):
self.currentRole.personID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].personID = item
roleID = property(_get_roleID, _set_roleID,
doc="the characterID or personID of the currentRole object.")
def _get_currentRole(self):
"""Return a Character or Person instance."""
if self.__role:
return self.__role
return self._roleClass(name=u'', accessSystem=self.accessSystem,
modFunct=self.modFunct)
def _set_currentRole(self, role):
"""Set self.currentRole to a Character or Person instance."""
if isinstance(role, (unicode, str)):
if not role:
self.__role = None
else:
self.__role = self._roleClass(name=role, modFunct=self.modFunct,
accessSystem=self.accessSystem)
elif isinstance(role, (list, tuple)):
self.__role = RolesList()
for item in role:
if isinstance(item, (unicode, str)):
self.__role.append(self._roleClass(name=item,
accessSystem=self.accessSystem,
modFunct=self.modFunct))
else:
self.__role.append(item)
if not self.__role:
self.__role = None
else:
self.__role = role
currentRole = property(_get_currentRole, _set_currentRole,
doc="The role of a Person in a Movie" + \
" or the interpreter of a Character in a Movie.")
def _init(self, **kwds): pass
def reset(self):
"""Reset the object."""
self.data = {}
self.myID = None
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.modFunct = modClearRefs
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._reset()
def _reset(self): pass
def clear(self):
"""Reset the dictionary."""
self.data.clear()
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._clear()
def _clear(self): pass
def get_current_info(self):
"""Return the current set of information retrieved."""
return self.current_info
def update_infoset_map(self, infoset, keys, mainInfoset):
"""Update the mappings between infoset and keys."""
if keys is None:
keys = []
if mainInfoset is not None:
theIS = mainInfoset
else:
theIS = infoset
self.infoset2keys[theIS] = keys
for key in keys:
self.key2infoset[key] = theIS
def set_current_info(self, ci):
"""Set the current set of information retrieved."""
# XXX:Remove? It's never used and there's no way to update infoset2keys.
self.current_info = ci
def add_to_current_info(self, val, keys=None, mainInfoset=None):
"""Add a set of information to the current list."""
if val not in self.current_info:
self.current_info.append(val)
self.update_infoset_map(val, keys, mainInfoset)
def has_current_info(self, val):
"""Return true if the given set of information is in the list."""
return val in self.current_info
def set_mod_funct(self, modFunct):
"""Set the fuction used to modify the strings."""
if modFunct is None: modFunct = modClearRefs
self.modFunct = modFunct
def update_titlesRefs(self, titlesRefs):
"""Update the dictionary with the references to movies."""
self.titlesRefs.update(titlesRefs)
def get_titlesRefs(self):
"""Return the dictionary with the references to movies."""
return self.titlesRefs
def update_namesRefs(self, namesRefs):
"""Update the dictionary with the references to names."""
self.namesRefs.update(namesRefs)
def get_namesRefs(self):
"""Return the dictionary with the references to names."""
return self.namesRefs
def update_charactersRefs(self, charactersRefs):
"""Update the dictionary with the references to characters."""
self.charactersRefs.update(charactersRefs)
def get_charactersRefs(self):
"""Return the dictionary with the references to characters."""
return self.charactersRefs
def set_data(self, data, override=0):
"""Set the movie data to the given dictionary; if 'override' is
set, the previous data is removed, otherwise the two dictionary
are merged.
"""
if not override:
self.data.update(data)
else:
self.data = data
def getID(self):
"""Return movieID, personID, characterID or companyID."""
raise NotImplementedError('override this method')
def __cmp__(self, other):
"""Compare two Movie, Person, Character or Company objects."""
# XXX: raise an exception?
if self.cmpFunct is None: return -1
if not isinstance(other, self.__class__): return -1
return self.cmpFunct(other)
def __hash__(self):
"""Hash for this object."""
# XXX: does it always work correctly?
theID = self.getID()
if theID is not None and self.accessSystem not in ('UNKNOWN', None):
# Handle 'http' and 'mobile' as they are the same access system.
acs = self.accessSystem
if acs in ('mobile', 'httpThin'):
acs = 'http'
# There must be some indication of the kind of the object, too.
s4h = '%s:%s[%s]' % (self.__class__.__name__, theID, acs)
else:
s4h = repr(self)
return hash(s4h)
def isSame(self, other):
"""Return True if the two represent the same object."""
if not isinstance(other, self.__class__): return 0
if hash(self) == hash(other): return 1
return 0
def __len__(self):
"""Number of items in the data dictionary."""
return len(self.data)
def getAsXML(self, key, _with_add_keys=True):
"""Return a XML representation of the specified key, or None
if empty. If _with_add_keys is False, dinamically generated
keys are excluded."""
# Prevent modifyStrings in __getitem__ to be called; if needed,
# it will be called by the _normalizeValue function.
origModFunct = self.modFunct
self.modFunct = modNull
# XXX: not totally sure it's a good idea, but could prevent
# problems (i.e.: the returned string always contains
# a DTD valid tag, and not something that can be only in
# the keys_alias map).
key = self.keys_alias.get(key, key)
if (not _with_add_keys) and (key in self._additional_keys()):
self.modFunct = origModFunct
return None
try:
withRefs = False
if key in self.keys_tomodify and \
origModFunct not in (None, modNull):
withRefs = True
value = self.get(key)
if value is None:
return None
tag = self.__class__.__name__.lower()
return u''.join(_seq2xml({key: value}, withRefs=withRefs,
modFunct=origModFunct,
titlesRefs=self.titlesRefs,
namesRefs=self.namesRefs,
charactersRefs=self.charactersRefs,
key2infoset=self.key2infoset,
fullpath=tag))
finally:
self.modFunct = origModFunct
def asXML(self, _with_add_keys=True):
"""Return a XML representation of the whole object.
If _with_add_keys is False, dinamically generated keys are excluded."""
beginTag, endTag = _tag4TON(self, addAccessSystem=True,
_containerOnly=True)
resList = [beginTag]
for key in self.keys():
value = self.getAsXML(key, _with_add_keys=_with_add_keys)
if not value:
continue
resList.append(value)
resList.append(endTag)
head = _xmlHead % self.__class__.__name__.lower()
return head + u''.join(resList)
def _getitem(self, key):
"""Handle special keys."""
return None
def __getitem__(self, key):
"""Return the value for a given key, checking key aliases;
a KeyError exception is raised if the key is not found.
"""
value = self._getitem(key)
if value is not None: return value
# Handle key aliases.
key = self.keys_alias.get(key, key)
rawData = self.data[key]
if key in self.keys_tomodify and \
self.modFunct not in (None, modNull):
try:
return modifyStrings(rawData, self.modFunct, self.titlesRefs,
self.namesRefs, self.charactersRefs)
except RuntimeError, e:
# Symbian/python 2.2 has a poor regexp implementation.
import warnings
warnings.warn('RuntimeError in '
"imdb.utils._Container.__getitem__; if it's not "
"a recursion limit exceeded and we're not running "
"in a Symbian environment, it's a bug:\n%s" % e)
return rawData
def __setitem__(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __delitem__(self, key):
"""Remove the given section or key."""
# XXX: how to remove an item of a section?
del self.data[key]
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
return []
def keys(self):
"""Return a list of valid keys."""
return self.data.keys() + self._additional_keys()
def items(self):
"""Return the items in the dictionary."""
return [(k, self.get(k)) for k in self.keys()]
# XXX: is this enough?
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self):
"""Return the values in the dictionary."""
return [self.get(k) for k in self.keys()]
def has_key(self, key):
"""Return true if a given section is defined."""
try:
self.__getitem__(key)
except KeyError:
return 0
return 1
# XXX: really useful???
# consider also that this will confuse people who meant to
# call ia.update(movieObject, 'data set') instead.
def update(self, dict):
self.data.update(dict)
def get(self, key, failobj=None):
"""Return the given section, or default if it's not found."""
try:
return self.__getitem__(key)
except KeyError:
return failobj
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __repr__(self):
"""String representation of an object."""
raise NotImplementedError('override this method')
def __str__(self):
"""Movie title or person name."""
raise NotImplementedError('override this method')
def __contains__(self, key):
raise NotImplementedError('override this method')
def append_item(self, key, item):
"""The item is appended to the list identified by the given key."""
self.data.setdefault(key, []).append(item)
def set_item(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __nonzero__(self):
"""Return true if self.data contains something."""
if self.data: return 1
return 0
def __deepcopy__(self, memo):
raise NotImplementedError('override this method')
def copy(self):
"""Return a deep copy of the object itself."""
return deepcopy(self)
def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=0,
onlyKeysType=(_Container,), scalar=None):
"""Iterate over nested lists and dictionaries; toDescend is a list
or a tuple of types to be considered non-scalar; if yieldDictKeys is
true, also dictionaries' keys are yielded; if scalar is not None, only
items of the given type(s) are yielded."""
if scalar is None or isinstance(seq, scalar):
yield seq
if isinstance(seq, toDescend):
if isinstance(seq, (dict, _Container)):
if yieldDictKeys:
# Yield also the keys of the dictionary.
for key in seq.iterkeys():
for k in flatten(key, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
if onlyKeysType and isinstance(k, onlyKeysType):
yield k
for value in seq.itervalues():
for v in flatten(value, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield v
elif not isinstance(seq, (str, unicode, int, float)):
for item in seq:
for i in flatten(item, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield i
| gpl-3.0 | 2,076,879,400,132,659,500 | -264,680,269,353,519,740 | 37.550254 | 155 | 0.552978 | false |
Hellowlol/PyTunes | modules/newznab.py | 1 | 9273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import pytunes
import math
from pytunes.proxy import get_image
from urllib2 import urlopen, quote
from json import loads
import logging
class Newznab:
def __init__(self):
self.logger = logging.getLogger('modules.newznab')
pytunes.MODULES.append({
'name': 'Newznab Search',
'id': 'newznab',
'fields': [
{'type':'bool', 'label':'Enable', 'name':'newznab_enable'},
{'type':'text', 'label':'Menu name', 'name':'newznab_name', 'placeholder':''},
#{'type':'select',
# 'label':'Default NZB Client',
# 'name':'default_nzb_id',
# 'options':[],
# 'desc':'Only Enabled Clients Will Show'
#},
{'type':'text', 'label':'Console Category', 'name':'newznab_console', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Movies Category', 'name':'newznab_movies', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Audio Category', 'name':'newznab_audio', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'PC Category', 'name':'newznab_pc', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'TV Category', 'name':'newznab_tv', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'XXX Category', 'name':'newznab_xxx', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Books Category', 'name':'newznab_books', 'desc':'From Sabnzbd Configuration'},
{'type':'text', 'label':'Other Category', 'name':'newznab_other', 'desc':'From Sabnzbd Configuration'}
]})
pytunes.MODULES.append({
'name': 'Newznab Servers',
'id': 'newznab_update_server',
'action': '%ssettings/setnewzserver' % pytunes.WEBDIR,
#'test': pytunes.WEBDIR + 'newznab/ping',
'fields': [
{'type':'select',
'label':'Newznab Servers',
'name':'newznab_server_id',
'options':[
{'name':'New', 'value':0}
]},
{'type':'text',
'label':'Name',
'name':'newznab_server_name'},
{'type':'text', 'label':'Host', 'name':'newznab_server_host'},
{'type':'text', 'label':'Apikey', 'name':'newznab_server_apikey'},
{'type':'bool', 'label':'Use SSL', 'name':'newznab_server_ssl'}
]})
@cherrypy.expose()
def index(self, query='', **kwargs):
return pytunes.LOOKUP.get_template('newznab.html').render(query=query, scriptname='newznab')
"""
NOT IMPLEMENTET
@cherrypy.expose()
@cherrypy.tools.json_out()
def ping(self, newznab_host, newznab_apikey, **kwargs):
self.logger.debug("Pinging newznab-host")
return 1
"""
@cherrypy.expose()
def thumb(self, url, h=None, w=None, o=100):
if url.startswith('rageid'):
settings = pytunes.settings
host = settings.get('newznab_host', '').replace('http://', '').replace('https://', '')
ssl = 's' if settings.get('newznab_ssl', 0) else ''
url = 'http%s://%s/covers/tv/%s.jpg' % (ssl, host, url[6:])
return get_image(url, h, w, o)
@cherrypy.expose()
def getcategories(self, **kwargs):
self.logger.debug("Fetching available categories")
ret = ''
try:
settings = pytunes.settings
self.current = settings.get_current_newznab_host()
host = self.current.host.replace('http://', '').replace('https://', '')
ssl = '' if self.current.ssl == '0' else 's'
apikey = self.current.apikey
url = 'http%s://%s/api?t=caps&o=xml' % (ssl, host)
self.logger.debug("Fetching Cat information from: %s" % url)
caps = urlopen(url, timeout=10).read()
lines = caps.split('\n')
opt_line = '<option value="%s">%s</option>'
for line in lines:
if 'category' in line and 'genre' not in line and not '/cat' in line:
junk,id,name = line.strip().split(' ')
id = id.split('"')[1]
main_name = name.split('"')[1]
ret += opt_line % (id, main_name)
if 'subcat' in line:
subcat = line.strip().split(' name')
id = subcat[0].split('"')[1]
name = '%s > %s' % (main_name, subcat[1].split('"')[1])
ret += opt_line % (id, name)
except:
self.logger.error('Unable to fetch categories from: %s' % url)
return ret
@cherrypy.expose()
def search(self, q='', cat='', **kwargs):
ret = ''
row = '<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>'
settings = pytunes.settings
sab_cat = {
'1000':settings.get('newznab_console', ''),
'2000':settings.get('newznab_movies', ''),
'3000':settings.get('newznab_audio', ''),
'4000':settings.get('newznab_pc', ''),
'5000':settings.get('newznab_tv', ''),
'6000':settings.get('newznab_xxx', ''),
'7000':settings.get('newznab_books', ''),
'8000':settings.get('newznab_other', '')
}
if cat:
cat = '&cat=%s' % cat
res = self.fetch('search&q=%s%s&extended=1' % (quote(q), cat))
#put in staticvars
link = "<a href='/newznab/AddNzbFromUrl?nzb_url=%s&nzb_category=%s' class='ajax-link' title='Download' cat='%s'><i class='icon-download-alt'></i></a>"
try:
results = res['channel']['item']
except:
results = res
grabs = '0'
for each in results:
files = str(each['attr'][4]['@attributes']['value'])
grabs = str(each['attr'][6]['@attributes']['value'])
category = each['category']
title = each['title']
cat = sab_cat[str(each['attr'][0]['@attributes']['value'])]
num = int(each['enclosure']['@attributes']['length'])
for x in [' bytes',' KB',' MB',' GB']:
if num < 1024.0:
size = "%3.2f%s" % (num, x)
break
num /= 1024.0
dl = link % (quote(each['link']), cat, cat)
ret += row % (title, category, size, files, grabs, dl)
return ret
@cherrypy.expose()
@cherrypy.tools.json_out()
def AddNzbFromUrl(self, nzb_url, nzb_category=''):
self.logger.debug("Adding nzb from url")
if nzb_category:
nzb_category = '&cat=%s' % nzb_category
return self.send('&mode=addurl&name=%s%s' % (quote(nzb_url), nzb_category))
def fetch(self, cmd):
try:
settings = pytunes.settings
self.current = settings.get_current_newznab_host()
host = self.current.host.replace('http://', '').replace('https://', '')
ssl = 's' if settings.get('newznab_ssl') == 'on' else ''
apikey = self.current.apikey
url = 'http%s://%s/api?o=json&apikey=%s&t=%s' ( ssl, host, apikey, cmd)
self.logger.debug("Fetching information from: %s" % url)
return loads(urlopen(url, timeout=30).read())
except Exception, e:
self.logger.debug("Exception%s: " % str(e))
self.logger.error("Unable to fetch information from: newznab %s" % str(e))
def send(self, link):
try:
host = pytunes.settings.get('sabnzbd_host', '')
port = str(pytunes.settings.get('sabnzbd_port', ''))
apikey = pytunes.settings.get('sabnzbd_apikey', '')
sabnzbd_basepath = pytunes.settings.get('sabnzbd_basepath', '/sabnzbd/')
ssl = 's' if pytunes.settings.get('sabnzbd_ssl', 0) else ''
if(sabnzbd_basepath == ""):
sabnzbd_basepath = "/sabnzbd/"
if not(sabnzbd_basepath.endswith('/')):
sabnzbd_basepath += "/"
url = 'http%s://%s:%s%sapi?output=json&apikey=%s%s' % (ssl, host, port, sabnzbd_basepath, apikey, link)
self.logger.debug("Sending NZB to: %s: " % url)
return loads(urlopen(url, timeout=10).read())
except:
self.logger.error("Cannot contact sabnzbd")
return
#Future use...use staticvars
@cherrypy.expose()
def GetClients(self):
nzbclients = ''
if pytunes.settings.get('nzbget_enable', ''):
nzbclients += '<option id="nzbget">NZBget</option>'
if pytunes.settings.get('sabnzbd_enable', ''):
nzbclients += '<option id="sabnzbd">Sabnzbd+</option>'
if not nzbclients:
nzbclients = '<option>No Clients Enabled</option>'
return nzbclients
| gpl-3.0 | -6,709,094,554,784,388,000 | 5,849,650,385,015,720,000 | 43.234146 | 158 | 0.502211 | false |
3dfxmadscientist/cbss-server | addons/base_vat/__openerp__.py | 125 | 2928 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'VAT Number Validation',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
VAT validation for Partner's VAT numbers.
=========================================
After installing this module, values entered in the VAT field of Partners will
be validated for all supported countries. The country is inferred from the
2-letter country code that prefixes the VAT number, e.g. ``BE0477472701``
will be validated using the Belgian rules.
There are two different levels of VAT number validation:
--------------------------------------------------------
* By default, a simple off-line check is performed using the known validation
rules for the country, usually a simple check digit. This is quick and
always available, but allows numbers that are perhaps not truly allocated,
or not valid anymore.
* When the "VAT VIES Check" option is enabled (in the configuration of the user's
Company), VAT numbers will be instead submitted to the online EU VIES
database, which will truly verify that the number is valid and currently
allocated to a EU company. This is a little bit slower than the simple
off-line check, requires an Internet connection, and may not be available
all the time. If the service is not available or does not support the
requested country (e.g. for non-EU countries), a simple check will be performed
instead.
Supported countries currently include EU countries, and a few non-EU countries
such as Chile, Colombia, Mexico, Norway or Russia. For unsupported countries,
only the country code will be validated.
""",
'author': 'OpenERP SA',
'depends': ['account'],
'website': 'http://www.openerp.com',
'data': ['base_vat_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_partner_vat.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,514,886,090,016,711,000 | -8,282,388,581,802,079,000 | 44.75 | 85 | 0.652322 | false |
freelawproject/recap-server | settings.py | 1 | 1377 | """Settings are derived by compiling any files ending in .py in the settings
directory, in alphabetical order.
This results in the following concept:
- default settings are in 10-public.py (this should contain most settings)
- custom settings are in 05-private.py (an example of this file is here for
you)
- any overrides to public settings can go in 20-private.py (you'll need to
create this)
"""
from __future__ import with_statement
import os
import glob
import sys
def _generate_secret_key(file_path):
import random
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
def random_char():
return chars[int(len(chars)*random.random())]
rand_str = ''.join(random_char() for i in range(64))
with open(file_path, 'w') as f:
f.write('SECRET_KEY=%s\n' % repr(rand_str))
ROOT_PATH = os.path.dirname(__file__)
# Try importing the SECRET_KEY from the file secret_key.py. If it doesn't exist,
# there is an import error, and the key is generated and written to the file.
try:
from secret_key import SECRET_KEY
except ImportError:
_generate_secret_key(os.path.join(ROOT_PATH, 'secret_key.py'))
from secret_key import SECRET_KEY
# Load the conf files.
conf_files = glob.glob(os.path.join(
os.path.dirname(__file__), 'settings', '*.py'))
conf_files.sort()
for f in conf_files:
execfile(os.path.abspath(f))
| gpl-3.0 | 9,116,316,685,373,156,000 | 1,069,371,775,039,052,200 | 31.023256 | 80 | 0.697168 | false |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/ast.py | 91 | 12034 | """
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
"""
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, (Str, Bytes)):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, NameConstant):
return node.value
elif isinstance(node, UnaryOp) and \
isinstance(node.op, (UAdd, USub)) and \
isinstance(node.operand, (Num, UnaryOp, BinOp)):
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
return + operand
else:
return - operand
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, (Num, UnaryOp, BinOp)) and \
isinstance(node.left, (Num, UnaryOp, BinOp)):
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed node or string: ' + repr(node))
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| lgpl-3.0 | 1,438,913,190,463,373,300 | 6,025,998,684,444,138,000 | 37.203175 | 81 | 0.60703 | false |
gfreed/android_external_chromium-org | tools/metrics/histograms/pretty_print.py | 53 | 12009 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the histograms.xml file, alphabetizing tags, wrapping text
at 80 chars, enforcing standard attribute ordering, and standardizing
indentation.
This is quite a bit more complicated than just calling tree.toprettyxml();
we need additional customization, like special attribute ordering in tags
and wrapping text nodes, so we implement our own full custom XML pretty-printer.
"""
from __future__ import with_statement
import diffutil
import json
import logging
import shutil
import sys
import textwrap
import xml.dom.minidom
WRAP_COLUMN = 80
# Desired order for tag attributes; attributes listed here will appear first,
# and in the same order as in these lists.
# { tag_name: [attribute_name, ...] }
ATTRIBUTE_ORDER = {
'enum': ['name', 'type'],
'histogram': ['name', 'enum', 'units'],
'int': ['value', 'label'],
'fieldtrial': ['name', 'separator', 'ordering'],
'group': ['name', 'label'],
'affected-histogram': ['name'],
'with-group': ['name'],
}
# Tag names for top-level nodes whose children we don't want to indent.
TAGS_THAT_DONT_INDENT = [
'histogram-configuration',
'histograms',
'fieldtrials',
'enums'
]
# Extra vertical spacing rules for special tag names.
# {tag_name: (newlines_after_open, newlines_before_close, newlines_after_close)}
TAGS_THAT_HAVE_EXTRA_NEWLINE = {
'histogram-configuration': (2, 1, 1),
'histograms': (2, 1, 1),
'fieldtrials': (2, 1, 1),
'enums': (2, 1, 1),
'histogram': (1, 1, 1),
'enum': (1, 1, 1),
'fieldtrial': (1, 1, 1),
}
# Tags that we allow to be squished into a single line for brevity.
TAGS_THAT_ALLOW_SINGLE_LINE = [
'summary',
'int',
]
# Tags whose children we want to alphabetize. The key is the parent tag name,
# and the value is a pair of the tag name of the children we want to sort,
# and a key function that maps each child node to the desired sort key.
ALPHABETIZATION_RULES = {
'histograms': ('histogram', lambda n: n.attributes['name'].value.lower()),
'enums': ('enum', lambda n: n.attributes['name'].value.lower()),
'enum': ('int', lambda n: int(n.attributes['value'].value)),
'fieldtrials': ('fieldtrial', lambda n: n.attributes['name'].value.lower()),
'fieldtrial': ('affected-histogram',
lambda n: n.attributes['name'].value.lower()),
}
class Error(Exception):
pass
def LastLineLength(s):
"""Returns the length of the last line in s.
Args:
s: A multi-line string, including newlines.
Returns:
The length of the last line in s, in characters.
"""
if s.rfind('\n') == -1: return len(s)
return len(s) - s.rfind('\n') - len('\n')
def XmlEscape(s):
"""XML-escapes the given string, replacing magic characters (&<>") with their
escaped equivalents."""
s = s.replace("&", "&").replace("<", "<")
s = s.replace("\"", """).replace(">", ">")
return s
def PrettyPrintNode(node, indent=0):
"""Pretty-prints the given XML node at the given indent level.
Args:
node: The minidom node to pretty-print.
indent: The current indent level.
Returns:
The pretty-printed string (including embedded newlines).
Raises:
Error if the XML has unknown tags or attributes.
"""
# Handle the top-level document node.
if node.nodeType == xml.dom.minidom.Node.DOCUMENT_NODE:
return '\n'.join([PrettyPrintNode(n) for n in node.childNodes])
# Handle text nodes.
if node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
# Wrap each paragraph in the text to fit in the 80 column limit.
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = ' ' * indent
wrapper.subsequent_indent = ' ' * indent
wrapper.break_on_hyphens = False
wrapper.break_long_words = False
wrapper.width = WRAP_COLUMN
text = XmlEscape(node.data)
# Remove any common indent.
text = textwrap.dedent(text.strip('\n'))
lines = text.split('\n')
# Split the text into paragraphs at blank line boundaries.
paragraphs = [[]]
for l in lines:
if len(l.strip()) == 0 and len(paragraphs[-1]) > 0:
paragraphs.append([])
else:
paragraphs[-1].append(l)
# Remove trailing empty paragraph if present.
if len(paragraphs) > 0 and len(paragraphs[-1]) == 0:
paragraphs = paragraphs[:-1]
# Wrap each paragraph and separate with two newlines.
return '\n\n'.join([wrapper.fill('\n'.join(p)) for p in paragraphs])
# Handle element nodes.
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
newlines_after_open, newlines_before_close, newlines_after_close = (
TAGS_THAT_HAVE_EXTRA_NEWLINE.get(node.tagName, (1, 1, 0)))
# Open the tag.
s = ' ' * indent + '<' + node.tagName
# Calculate how much space to allow for the '>' or '/>'.
closing_chars = 1
if not node.childNodes:
closing_chars = 2
# Pretty-print the attributes.
attributes = node.attributes.keys()
if attributes:
# Reorder the attributes.
if not node.tagName in ATTRIBUTE_ORDER:
unrecognized_attributes = attributes;
else:
unrecognized_attributes = (
[a for a in attributes if not a in ATTRIBUTE_ORDER[node.tagName]])
attributes = (
[a for a in ATTRIBUTE_ORDER[node.tagName] if a in attributes])
for a in unrecognized_attributes:
logging.error(
'Unrecognized attribute "%s" in tag "%s"' % (a, node.tagName))
if unrecognized_attributes:
raise Error()
for a in attributes:
value = XmlEscape(node.attributes[a].value)
# Replace sequences of whitespace with single spaces.
words = value.split()
a_str = ' %s="%s"' % (a, ' '.join(words))
# Start a new line if the attribute will make this line too long.
if LastLineLength(s) + len(a_str) + closing_chars > WRAP_COLUMN:
s += '\n' + ' ' * (indent + 3)
# Output everything up to the first quote.
s += ' %s="' % (a)
value_indent_level = LastLineLength(s)
# Output one word at a time, splitting to the next line where necessary.
column = value_indent_level
for i, word in enumerate(words):
# This is slightly too conservative since not every word will be
# followed by the closing characters...
if i > 0 and (column + len(word) + 1 + closing_chars > WRAP_COLUMN):
s = s.rstrip() # remove any trailing whitespace
s += '\n' + ' ' * value_indent_level
column = value_indent_level
s += word + ' '
column += len(word) + 1
s = s.rstrip() # remove any trailing whitespace
s += '"'
s = s.rstrip() # remove any trailing whitespace
# Pretty-print the child nodes.
if node.childNodes:
s += '>'
# Calculate the new indent level for child nodes.
new_indent = indent
if node.tagName not in TAGS_THAT_DONT_INDENT:
new_indent += 2
child_nodes = node.childNodes
# Recursively pretty-print the child nodes.
child_nodes = [PrettyPrintNode(n, indent=new_indent) for n in child_nodes]
child_nodes = [c for c in child_nodes if len(c.strip()) > 0]
# Determine whether we can fit the entire node on a single line.
close_tag = '</%s>' % node.tagName
space_left = WRAP_COLUMN - LastLineLength(s) - len(close_tag)
if (node.tagName in TAGS_THAT_ALLOW_SINGLE_LINE and
len(child_nodes) == 1 and len(child_nodes[0].strip()) <= space_left):
s += child_nodes[0].strip()
else:
s += '\n' * newlines_after_open + '\n'.join(child_nodes)
s += '\n' * newlines_before_close + ' ' * indent
s += close_tag
else:
s += '/>'
s += '\n' * newlines_after_close
return s
# Handle comment nodes.
if node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
return '<!--%s-->\n' % node.data
# Ignore other node types. This could be a processing instruction (<? ... ?>)
# or cdata section (<![CDATA[...]]!>), neither of which are legal in the
# histograms XML at present.
logging.error('Ignoring unrecognized node data: %s' % node.toxml())
raise Error()
def unsafeAppendChild(parent, child):
"""Append child to parent's list of children, ignoring the possibility that it
is already in another node's childNodes list. Requires that the previous
parent of child is discarded (to avoid non-tree DOM graphs).
This can provide a significant speedup as O(n^2) operations are removed (in
particular, each child insertion avoids the need to traverse the old parent's
entire list of children)."""
child.parentNode = None
parent.appendChild(child)
child.parentNode = parent
def TransformByAlphabetizing(node):
"""Transform the given XML by alphabetizing specific node types according to
the rules in ALPHABETIZATION_RULES.
Args:
node: The minidom node to transform.
Returns:
The minidom node, with children appropriately alphabetized. Note that the
transformation is done in-place, i.e. the original minidom tree is modified
directly.
"""
if node.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
for c in node.childNodes: TransformByAlphabetizing(c)
return node
# Element node with a tag name that we alphabetize the children of?
if node.tagName in ALPHABETIZATION_RULES:
# Put subnodes in a list of node,key pairs to allow for custom sorting.
subtag, key_function = ALPHABETIZATION_RULES[node.tagName]
subnodes = []
last_key = -1
for c in node.childNodes:
if (c.nodeType == xml.dom.minidom.Node.ELEMENT_NODE and
c.tagName == subtag):
last_key = key_function(c)
# Subnodes that we don't want to rearrange use the last node's key,
# so they stay in the same relative position.
subnodes.append( (c, last_key) )
# Sort the subnode list.
subnodes.sort(key=lambda pair: pair[1])
# Re-add the subnodes, transforming each recursively.
while node.firstChild:
node.removeChild(node.firstChild)
for (c, _) in subnodes:
unsafeAppendChild(node, TransformByAlphabetizing(c))
return node
# Recursively handle other element nodes and other node types.
for c in node.childNodes: TransformByAlphabetizing(c)
return node
def PrettyPrint(raw_xml):
"""Pretty-print the given XML.
Args:
xml: The contents of the histograms XML file, as a string.
Returns:
The pretty-printed version.
"""
tree = xml.dom.minidom.parseString(raw_xml)
tree = TransformByAlphabetizing(tree)
return PrettyPrintNode(tree)
def main():
logging.basicConfig(level=logging.INFO)
presubmit = ('--presubmit' in sys.argv)
logging.info('Loading histograms.xml...')
with open('histograms.xml', 'rb') as f:
xml = f.read()
# Check there are no CR ('\r') characters in the file.
if '\r' in xml:
logging.info('DOS-style line endings (CR characters) detected - these are '
'not allowed. Please run dos2unix histograms.xml')
sys.exit(1)
logging.info('Pretty-printing...')
try:
pretty = PrettyPrint(xml)
except Error:
logging.error('Aborting parsing due to fatal errors.')
sys.exit(1)
if xml == pretty:
logging.info('histograms.xml is correctly pretty-printed.')
sys.exit(0)
if presubmit:
logging.info('histograms.xml is not formatted correctly; run '
'pretty_print.py to fix.')
sys.exit(1)
if not diffutil.PromptUserToAcceptDiff(
xml, pretty,
'Is the prettified version acceptable?'):
logging.error('Aborting')
return
logging.info('Creating backup file histograms.before.pretty-print.xml')
shutil.move('histograms.xml', 'histograms.before.pretty-print.xml')
logging.info('Writing new histograms.xml file')
with open('histograms.xml', 'wb') as f:
f.write(pretty)
if __name__ == '__main__':
main()
| bsd-3-clause | 4,759,108,933,910,486,000 | 9,174,966,515,046,391,000 | 32.544693 | 80 | 0.654925 | false |
chrismeyersfsu/ansible | lib/ansible/plugins/filter/ipaddr.py | 19 | 19242 | # (c) 2014, Maciej Delmanowski <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 1:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except:
return False
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
if v.size > 1:
return str(v.network)
def _prefix_query(v):
return int(v.prefixlen)
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_unicast() and not v_ip.is_private() and \
not v_ip.is_loopback() and not v_ip.is_netmask() and \
not v_ip.is_hostmask():
return value
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _gateway_query,
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'gateway': _gateway_query,
'gw': _gateway_query,
'host': _host_query,
'host/prefix': _gateway_query,
'hostmask': _hostmask_query,
'hostnet': _gateway_query,
'int': _int_query,
'ip': _ip_query,
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'netmask': _netmask_query,
'network': _network_query,
'prefix': _prefix_query,
'private': _private_query,
'public': _public_query,
'revdns': _revdns_query,
'router': _gateway_query,
'size': _size_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value == True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
### ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except:
return False
else:
return value
except:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipwrap(value, query = ''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version = False, alias = 'ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version = False, alias = 'ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except:
return value
def ipv4(value, query = ''):
return ipaddr(value, query, version = 4, alias = 'ipv4')
def ipv6(value, query = ''):
return ipaddr(value, query, version = 6, alias = 'ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
def ipsubnet(value, query = '', index = 'x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return str(value)
elif str(query).isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except:
return False
except:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except:
return False
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query = ''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except:
return False
if not query:
return False
try:
mac = hwaddr(query, alias = 'slaac')
eui = netaddr.EUI(mac)
except:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query = '', alias = 'hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query = ''):
return hwaddr(value, query, alias = 'macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
' installed on the ansible controller'.format(f_name))
def ip4_hex(arg):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{:02x}{:02x}{:02x}{:02x}'.format(*numbers)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'ipaddr': ipaddr,
'ipwrap': ipwrap,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'nthhost': nthhost,
'slaac': slaac,
'ip4_hex': ip4_hex,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python-netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
| gpl-3.0 | 3,888,740,059,127,878,700 | 2,857,256,568,020,880,000 | 26.254958 | 106 | 0.527128 | false |
mxamin/youtube-dl | youtube_dl/extractor/criterion.py | 1 | 1284 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CriterionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?criterion\.com/films/(?P<id>[0-9]+)-.+'
_TEST = {
'url': 'http://www.criterion.com/films/184-le-samourai',
'md5': 'bc51beba55685509883a9a7830919ec3',
'info_dict': {
'id': '184',
'ext': 'mp4',
'title': 'Le Samouraï',
'description': 'md5:a2b4b116326558149bef81f76dcbb93f',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
return {
'id': video_id,
'url': final_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense | -7,744,789,118,403,734,000 | -8,478,020,459,735,086,000 | 30.292683 | 77 | 0.535464 | false |
qedsoftware/commcare-hq | custom/opm/constants.py | 1 | 1732 | from corehq.apps.fixtures.models import FixtureDataItem
from corehq.util.quickcache import quickcache
DOMAIN = 'opm'
PREG_REG_XMLNS = "http://openrosa.org/formdesigner/D127C457-3E15-4F5E-88C3-98CD1722C625"
VHND_XMLNS = "http://openrosa.org/formdesigner/ff5de10d75afda15cddb3b00a0b1e21d33a50d59"
BIRTH_PREP_XMLNS = "http://openrosa.org/formdesigner/50378991-FEC3-408D-B4A5-A264F3B52184"
DELIVERY_XMLNS = "http://openrosa.org/formdesigner/492F8F0E-EE7D-4B28-B890-7CDA5F137194"
CHILD_FOLLOWUP_XMLNS = "http://openrosa.org/formdesigner/C90C2C1F-3B34-47F3-B3A3-061EAAC1A601"
CFU1_XMLNS = "http://openrosa.org/formdesigner/d642dd328514f2af92c093d414d63e5b2670b9c"
CFU2_XMLNS = "http://openrosa.org/formdesigner/9ef423bba8595a99976f0bc9532617841253a7fa"
CFU3_XMLNS = "http://openrosa.org/formdesigner/f15b9f8fb92e2552b1885897ece257609ed16649"
GROWTH_MONITORING_XMLNS= "http://openrosa.org/formdesigner/F1356F3F-C695-491F-9277-7F9B5522200C"
CLOSE_FORM = "http://openrosa.org/formdesigner/41A1B3E0-C1A4-41EA-AE90-71A328F0D8FD"
CHILDREN_FORMS = [CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS, CHILD_FOLLOWUP_XMLNS]
OPM_XMLNSs = [PREG_REG_XMLNS, VHND_XMLNS, BIRTH_PREP_XMLNS, DELIVERY_XMLNS,
CHILD_FOLLOWUP_XMLNS, CFU1_XMLNS, CFU2_XMLNS, CFU3_XMLNS,
GROWTH_MONITORING_XMLNS, CLOSE_FORM]
# TODO Move these to a cached fixtures lookup
MONTH_AMT = 250
TWO_YEAR_AMT = 2000
THREE_YEAR_AMT = 3000
@quickcache([], timeout=30 * 60)
def get_fixture_data():
fixtures = FixtureDataItem.get_indexed_items(DOMAIN, 'condition_amounts', 'condition')
return dict((k, int(fixture['rs_amount'])) for k, fixture in fixtures.items())
class InvalidRow(Exception):
"""
Raise this in the row constructor to skip row
"""
| bsd-3-clause | 7,705,814,574,614,277,000 | -1,376,278,335,824,819,700 | 44.578947 | 96 | 0.769053 | false |
SANBI-SA/tools-iuc | data_managers/data_manager_humann2_database_downloader/data_manager/data_manager_humann2_download.py | 9 | 5204 | #!/usr/bin/env python
#
# Data manager for reference data for the 'humann2' Galaxy tools
import datetime
import json
import optparse
import os
import shutil
import subprocess
import sys
HUMANN2_REFERENCE_DATA = {
"full": "Full",
"DEMO": "Demo",
"uniref50_diamond": "Full UniRef50",
"uniref50_ec_filtered_diamond": "EC-filtered UniRef50",
"uniref50_GO_filtered_rapsearch2": "GO filtered UniRef50 for rapsearch2",
"uniref90_diamond": "Full UniRef90",
"uniref90_ec_filtered_diamond": "EC-filtered UniRef90",
"DEMO_diamond": "Demo"
}
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print str(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
def download_humann2_db(data_tables, table_name, database, build, target_dir):
"""Download HUMAnN2 database
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
table_name: name of the table
database: database to download (chocophlan or uniref)
build: build of the database to download
target_dir: directory to put copy or link to the data file
"""
value = "%s-%s-%s" % (database, build, datetime.date.today().isoformat())
db_target_dir = os.path.join(target_dir, database)
build_target_dir = os.path.join(db_target_dir, build)
cmd = "humann2_databases --download %s %s %s --update-config no" % (
database,
build,
db_target_dir)
subprocess.check_call(cmd, shell=True)
shutil.move(os.path.join(db_target_dir, database), build_target_dir)
add_data_table_entry(
data_tables,
table_name,
dict(
dbkey=build,
value=value,
name=HUMANN2_REFERENCE_DATA[build],
path=build_target_dir))
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = optparse.OptionParser(description='Download HUMAnN2 database')
parser.add_option('--database', help="Database name")
parser.add_option('--build', help="Build of the database")
options, args = parser.parse_args()
print("args : %s" % args)
# Check for JSON file
if len(args) != 1:
sys.stderr.write("Need to supply JSON file name")
sys.exit(1)
jsonfile = args[0]
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print("Making %s" % target_dir)
os.mkdir(target_dir)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
if options.database == "chocophlan":
table_name = 'humann2_nucleotide_database'
else:
table_name = 'humann2_protein_database'
add_data_table(data_tables, table_name)
# Fetch data from specified data sources
download_humann2_db(
data_tables,
table_name,
options.database,
options.build,
target_dir)
# Write output JSON
print("Outputting JSON")
print(str(json.dumps(data_tables)))
open(jsonfile, 'wb').write(json.dumps(data_tables))
print("Done.")
| mit | -2,866,012,387,882,455,000 | 758,114,004,133,121,000 | 28.568182 | 78 | 0.656226 | false |
tonioo/modoboa | modoboa/lib/u2u_decode.py | 1 | 2282 | # -*- coding: utf-8 -*-
"""
Unstructured rfc2047 header to unicode.
A stupid (and not accurate) answer to https://bugs.python.org/issue1079.
"""
from __future__ import unicode_literals
import re
from email.header import decode_header, make_header
from email.utils import parseaddr
from django.utils.encoding import smart_text
# check spaces between encoded_words (and strip them)
sre = re.compile(r"\?=[ \t]+=\?")
# re pat for MIME encoded_word (without trailing spaces)
mre = re.compile(r"=\?[^?]*?\?[bq]\?[^?\t]*?\?=", re.I)
# re do detect encoded ASCII characters
ascii_re = re.compile(r"=[\dA-F]{2,3}", re.I)
def clean_spaces(m):
"""Replace unencoded spaces in string.
:param str m: a match object
:return: the cleaned string
"""
return m.group(0).replace(" ", "=20")
def clean_non_printable_char(m):
"""Strip non printable characters."""
code = int(m.group(0)[1:], 16)
if code < 20:
return ""
return m.group(0)
def decode_mime(m):
"""Substitute matching encoded_word with unicode equiv."""
h = decode_header(clean_spaces(m))
try:
u = smart_text(make_header(h))
except (LookupError, UnicodeDecodeError):
return m.group(0)
return u
def clean_header(header):
"""Clean header function."""
header = "".join(header.splitlines())
header = sre.sub("?==?", header)
return ascii_re.sub(clean_non_printable_char, header)
def u2u_decode(s):
"""utility function for (final) decoding of mime header
note: resulting string is in one line (no \n within)
note2: spaces between enc_words are stripped (see RFC2047)
"""
return mre.sub(decode_mime, clean_header(s)).strip(" \r\t\n")
def decode_address(value):
"""Special function for address decoding.
We need a dedicated processing because RFC1342 explicitely says
address MUST NOT contain encoded-word:
These are the ONLY locations where an encoded-word may appear. In
particular, an encoded-word MUST NOT appear in any portion of an
"address". In addition, an encoded-word MUST NOT be used in a
Received header field.
"""
phrase, address = parseaddr(clean_header(value))
if phrase:
phrase = mre.sub(decode_mime, phrase)
return phrase, address
| isc | 7,916,478,409,102,813,000 | 7,128,462,162,486,009,000 | 26.493976 | 72 | 0.660824 | false |
klothe/tablib | tablib/packages/yaml/dumper.py | 542 | 2719 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from emitter import *
from serializer import *
from representer import *
from resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| mit | -1,351,192,226,354,084,000 | 8,406,362,687,038,625,000 | 42.854839 | 73 | 0.623391 | false |
overdrive3000/skytools | python/skytools/scripting.py | 3 | 36632 |
"""Useful functions and classes for database scripts.
"""
import errno
import logging
import logging.config
import logging.handlers
import optparse
import os
import select
import signal
import sys
import time
import skytools
import skytools.skylog
try:
import skytools.installer_config
default_skylog = skytools.installer_config.skylog
except ImportError:
default_skylog = 0
__pychecker__ = 'no-badexcept'
__all__ = ['BaseScript', 'UsageError', 'daemonize', 'DBScript']
class UsageError(Exception):
"""User induced error."""
#
# daemon mode
#
def daemonize():
"""Turn the process into daemon.
Goes background and disables all i/o.
"""
# launch new process, kill parent
pid = os.fork()
if pid != 0:
os._exit(0)
# start new session
os.setsid()
# stop i/o
fd = os.open("/dev/null", os.O_RDWR)
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
if fd > 2:
os.close(fd)
#
# Pidfile locking+cleanup & daemonization combined
#
def run_single_process(runnable, daemon, pidfile):
"""Run runnable class, possibly daemonized, locked on pidfile."""
# check if another process is running
if pidfile and os.path.isfile(pidfile):
if skytools.signal_pidfile(pidfile, 0):
print("Pidfile exists, another process running?")
sys.exit(1)
else:
print("Ignoring stale pidfile")
# daemonize if needed
if daemon:
daemonize()
# clean only own pidfile
own_pidfile = False
try:
if pidfile:
data = str(os.getpid())
skytools.write_atomic(pidfile, data)
own_pidfile = True
runnable.run()
finally:
if own_pidfile:
try:
os.remove(pidfile)
except: pass
#
# logging setup
#
_log_config_done = 0
_log_init_done = {}
def _load_log_config(fn, defs):
"""Fixed fileConfig."""
# Work around fileConfig default behaviour to disable
# not only old handlers on load (which slightly makes sense)
# but also old logger objects (which does not make sense).
if sys.hexversion >= 0x2060000:
logging.config.fileConfig(fn, defs, False)
else:
logging.config.fileConfig(fn, defs)
root = logging.getLogger()
for lg in root.manager.loggerDict.values():
lg.disabled = 0
def _init_log(job_name, service_name, cf, log_level, is_daemon):
"""Logging setup happens here."""
global _log_init_done, _log_config_done
got_skylog = 0
use_skylog = cf.getint("use_skylog", default_skylog)
# if non-daemon, avoid skylog if script is running on console.
# set use_skylog=2 to disable.
if not is_daemon and use_skylog == 1:
if os.isatty(sys.stdout.fileno()):
use_skylog = 0
# load logging config if needed
if use_skylog and not _log_config_done:
# python logging.config braindamage:
# cannot specify external classess without such hack
logging.skylog = skytools.skylog
skytools.skylog.set_service_name(service_name, job_name)
# load general config
flist = cf.getlist('skylog_locations',
['skylog.ini', '~/.skylog.ini', '/etc/skylog.ini'])
for fn in flist:
fn = os.path.expanduser(fn)
if os.path.isfile(fn):
defs = {'job_name': job_name, 'service_name': service_name}
_load_log_config(fn, defs)
got_skylog = 1
break
_log_config_done = 1
if not got_skylog:
sys.stderr.write("skylog.ini not found!\n")
sys.exit(1)
# avoid duplicate logging init for job_name
log = logging.getLogger(job_name)
if job_name in _log_init_done:
return log
_log_init_done[job_name] = 1
# tune level on root logger
root = logging.getLogger()
root.setLevel(log_level)
# compatibility: specify ini file in script config
def_fmt = '%(asctime)s %(process)s %(levelname)s %(message)s'
def_datefmt = '' # None
logfile = cf.getfile("logfile", "")
if logfile:
fstr = cf.get('logfmt_file', def_fmt)
fstr_date = cf.get('logdatefmt_file', def_datefmt)
if log_level < logging.INFO:
fstr = cf.get('logfmt_file_verbose', fstr)
fstr_date = cf.get('logdatefmt_file_verbose', fstr_date)
fmt = logging.Formatter(fstr, fstr_date)
size = cf.getint('log_size', 10*1024*1024)
num = cf.getint('log_count', 3)
hdlr = logging.handlers.RotatingFileHandler(
logfile, 'a', size, num)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
# if skylog.ini is disabled or not available, log at least to stderr
if not got_skylog:
fstr = cf.get('logfmt_console', def_fmt)
fstr_date = cf.get('logdatefmt_console', def_datefmt)
if log_level < logging.INFO:
fstr = cf.get('logfmt_console_verbose', fstr)
fstr_date = cf.get('logdatefmt_console_verbose', fstr_date)
hdlr = logging.StreamHandler()
fmt = logging.Formatter(fstr, fstr_date)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
return log
class BaseScript(object):
"""Base class for service scripts.
Handles logging, daemonizing, config, errors.
Config template::
## Parameters for skytools.BaseScript ##
# how many seconds to sleep between work loops
# if missing or 0, then instead sleeping, the script will exit
loop_delay = 1.0
# where to log
logfile = ~/log/%(job_name)s.log
# where to write pidfile
pidfile = ~/pid/%(job_name)s.pid
# per-process name to use in logging
#job_name = %(config_name)s
# whether centralized logging should be used
# search-path [ ./skylog.ini, ~/.skylog.ini, /etc/skylog.ini ]
# 0 - disabled
# 1 - enabled, unless non-daemon on console (os.isatty())
# 2 - always enabled
#use_skylog = 0
# where to find skylog.ini
#skylog_locations = skylog.ini, ~/.skylog.ini, /etc/skylog.ini
# how many seconds to sleep after catching a exception
#exception_sleep = 20
"""
service_name = None
job_name = None
cf = None
cf_defaults = {}
pidfile = None
# >0 - sleep time if work() requests sleep
# 0 - exit if work requests sleep
# <0 - run work() once [same as looping=0]
loop_delay = 1.0
# 0 - run work() once
# 1 - run work() repeatedly
looping = 1
# result from last work() call:
# 1 - there is probably more work, don't sleep
# 0 - no work, sleep before calling again
# -1 - exception was thrown
work_state = 1
# setup logger here, this allows override by subclass
log = logging.getLogger('skytools.BaseScript')
def __init__(self, service_name, args):
"""Script setup.
User class should override work() and optionally __init__(), startup(),
reload(), reset(), shutdown() and init_optparse().
NB: In case of daemon, __init__() and startup()/work()/shutdown() will be
run in different processes. So nothing fancy should be done in __init__().
@param service_name: unique name for script.
It will be also default job_name, if not specified in config.
@param args: cmdline args (sys.argv[1:]), but can be overridden
"""
self.service_name = service_name
self.go_daemon = 0
self.need_reload = 0
self.exception_count = 0
self.stat_dict = {}
self.log_level = logging.INFO
# parse command line
parser = self.init_optparse()
self.options, self.args = parser.parse_args(args)
# check args
if self.options.version:
self.print_version()
sys.exit(0)
if self.options.daemon:
self.go_daemon = 1
if self.options.quiet:
self.log_level = logging.WARNING
if self.options.verbose > 1:
self.log_level = skytools.skylog.TRACE
elif self.options.verbose:
self.log_level = logging.DEBUG
self.cf_override = {}
if self.options.set:
for a in self.options.set:
k, v = a.split('=', 1)
self.cf_override[k.strip()] = v.strip()
if self.options.ini:
self.print_ini()
sys.exit(0)
# read config file
self.reload()
# init logging
_init_log(self.job_name, self.service_name, self.cf, self.log_level, self.go_daemon)
# send signal, if needed
if self.options.cmd == "kill":
self.send_signal(signal.SIGTERM)
elif self.options.cmd == "stop":
self.send_signal(signal.SIGINT)
elif self.options.cmd == "reload":
self.send_signal(signal.SIGHUP)
def print_version(self):
service = self.service_name
if getattr(self, '__version__', None):
service += ' version %s' % self.__version__
print '%s, Skytools version %s' % (service, skytools.__version__)
def print_ini(self):
"""Prints out ini file from doc string of the script of default for dbscript
Used by --ini option on command line.
"""
# current service name
print("[%s]\n" % self.service_name)
# walk class hierarchy
bases = [self.__class__]
while len(bases) > 0:
parents = []
for c in bases:
for p in c.__bases__:
if p not in parents:
parents.append(p)
doc = c.__doc__
if doc:
self._print_ini_frag(doc)
bases = parents
def _print_ini_frag(self, doc):
# use last '::' block as config template
pos = doc and doc.rfind('::\n') or -1
if pos < 0:
return
doc = doc[pos+2 : ].rstrip()
doc = skytools.dedent(doc)
# merge overrided options into output
for ln in doc.splitlines():
vals = ln.split('=', 1)
if len(vals) != 2:
print(ln)
continue
k = vals[0].strip()
v = vals[1].strip()
if k and k[0] == '#':
print(ln)
k = k[1:]
if k in self.cf_override:
print('%s = %s' % (k, self.cf_override[k]))
elif k in self.cf_override:
if v:
print('#' + ln)
print('%s = %s' % (k, self.cf_override[k]))
else:
print(ln)
print('')
def load_config(self):
"""Loads and returns skytools.Config instance.
By default it uses first command-line argument as config
file name. Can be overridden.
"""
if len(self.args) < 1:
print("need config file, use --help for help.")
sys.exit(1)
conf_file = self.args[0]
return skytools.Config(self.service_name, conf_file,
user_defs = self.cf_defaults,
override = self.cf_override)
def init_optparse(self, parser = None):
"""Initialize a OptionParser() instance that will be used to
parse command line arguments.
Note that it can be overridden both directions - either DBScript
will initialize an instance and pass it to user code or user can
initialize and then pass to DBScript.init_optparse().
@param parser: optional OptionParser() instance,
where DBScript should attach its own arguments.
@return: initialized OptionParser() instance.
"""
if parser:
p = parser
else:
p = optparse.OptionParser()
p.set_usage("%prog [options] INI")
# generic options
p.add_option("-q", "--quiet", action="store_true",
help = "log only errors and warnings")
p.add_option("-v", "--verbose", action="count",
help = "log verbosely")
p.add_option("-d", "--daemon", action="store_true",
help = "go background")
p.add_option("-V", "--version", action="store_true",
help = "print version info and exit")
p.add_option("", "--ini", action="store_true",
help = "display sample ini file")
p.add_option("", "--set", action="append",
help = "override config setting (--set 'PARAM=VAL')")
# control options
g = optparse.OptionGroup(p, 'control running process')
g.add_option("-r", "--reload",
action="store_const", const="reload", dest="cmd",
help = "reload config (send SIGHUP)")
g.add_option("-s", "--stop",
action="store_const", const="stop", dest="cmd",
help = "stop program safely (send SIGINT)")
g.add_option("-k", "--kill",
action="store_const", const="kill", dest="cmd",
help = "kill program immediately (send SIGTERM)")
p.add_option_group(g)
return p
def send_signal(self, sig):
if not self.pidfile:
self.log.warning("No pidfile in config, nothing to do")
elif os.path.isfile(self.pidfile):
alive = skytools.signal_pidfile(self.pidfile, sig)
if not alive:
self.log.warning("pidfile exists, but process not running")
else:
self.log.warning("No pidfile, process not running")
sys.exit(0)
def set_single_loop(self, do_single_loop):
"""Changes whether the script will loop or not."""
if do_single_loop:
self.looping = 0
else:
self.looping = 1
def _boot_daemon(self):
run_single_process(self, self.go_daemon, self.pidfile)
def start(self):
"""This will launch main processing thread."""
if self.go_daemon:
if not self.pidfile:
self.log.error("Daemon needs pidfile")
sys.exit(1)
self.run_func_safely(self._boot_daemon)
def stop(self):
"""Safely stops processing loop."""
self.looping = 0
def reload(self):
"Reload config."
# avoid double loading on startup
if not self.cf:
self.cf = self.load_config()
else:
self.cf.reload()
self.log.info ("Config reloaded")
self.job_name = self.cf.get("job_name")
self.pidfile = self.cf.getfile("pidfile", '')
self.loop_delay = self.cf.getfloat("loop_delay", self.loop_delay)
self.exception_sleep = self.cf.getfloat("exception_sleep", 20)
self.exception_quiet = self.cf.getlist("exception_quiet", [])
self.exception_grace = self.cf.getfloat("exception_grace", 5*60)
self.exception_reset = self.cf.getfloat("exception_reset", 15*60)
def hook_sighup(self, sig, frame):
"Internal SIGHUP handler. Minimal code here."
self.need_reload = 1
last_sigint = 0
def hook_sigint(self, sig, frame):
"Internal SIGINT handler. Minimal code here."
self.stop()
t = time.time()
if t - self.last_sigint < 1:
self.log.warning("Double ^C, fast exit")
sys.exit(1)
self.last_sigint = t
def stat_get(self, key):
"""Reads a stat value."""
try:
value = self.stat_dict[key]
except KeyError:
value = None
return value
def stat_put(self, key, value):
"""Sets a stat value."""
self.stat_dict[key] = value
def stat_increase(self, key, increase = 1):
"""Increases a stat value."""
try:
self.stat_dict[key] += increase
except KeyError:
self.stat_dict[key] = increase
def send_stats(self):
"Send statistics to log."
res = []
for k, v in self.stat_dict.items():
res.append("%s: %s" % (k, v))
if len(res) == 0:
return
logmsg = "{%s}" % ", ".join(res)
self.log.info(logmsg)
self.stat_dict = {}
def reset(self):
"Something bad happened, reset all state."
pass
def run(self):
"Thread main loop."
# run startup, safely
self.run_func_safely(self.startup)
while 1:
# reload config, if needed
if self.need_reload:
self.reload()
self.need_reload = 0
# do some work
work = self.run_once()
if not self.looping or self.loop_delay < 0:
break
# remember work state
self.work_state = work
# should sleep?
if not work:
if self.loop_delay > 0:
self.sleep(self.loop_delay)
if not self.looping:
break
else:
break
# run shutdown, safely?
self.shutdown()
def run_once(self):
state = self.run_func_safely(self.work, True)
# send stats that was added
self.send_stats()
return state
last_func_fail = None
def run_func_safely(self, func, prefer_looping = False):
"Run users work function, safely."
try:
r = func()
if self.last_func_fail and time.time() > self.last_func_fail + self.exception_reset:
self.last_func_fail = None
# set exception count to 0 after success
self.exception_count = 0
return r
except UsageError, d:
self.log.error(str(d))
sys.exit(1)
except MemoryError, d:
try: # complex logging may not succeed
self.log.exception("Job %s out of memory, exiting" % self.job_name)
except MemoryError:
self.log.fatal("Out of memory")
sys.exit(1)
except SystemExit, d:
self.send_stats()
if prefer_looping and self.looping and self.loop_delay > 0:
self.log.info("got SystemExit(%s), exiting" % str(d))
self.reset()
raise d
except KeyboardInterrupt, d:
self.send_stats()
if prefer_looping and self.looping and self.loop_delay > 0:
self.log.info("got KeyboardInterrupt, exiting")
self.reset()
sys.exit(1)
except Exception, d:
try: # this may fail too
self.send_stats()
except:
pass
if self.last_func_fail is None:
self.last_func_fail = time.time()
emsg = str(d).rstrip()
self.reset()
self.exception_hook(d, emsg)
# reset and sleep
self.reset()
if prefer_looping and self.looping and self.loop_delay > 0:
# increase exception count & sleep
self.exception_count += 1
self.sleep_on_exception()
return -1
sys.exit(1)
def sleep(self, secs):
"""Make script sleep for some amount of time."""
try:
time.sleep(secs)
except IOError, ex:
if ex.errno != errno.EINTR:
raise
def sleep_on_exception(self):
"""Make script sleep for some amount of time when an exception occurs.
To implement more advance exception sleeping like exponential backoff you
can override this method. Also note that you can use self.exception_count
to track the number of consecutive exceptions.
"""
self.sleep(self.exception_sleep)
def _is_quiet_exception(self, ex):
return ((self.exception_quiet == ["ALL"] or ex.__class__.__name__ in self.exception_quiet)
and self.last_func_fail and time.time() < self.last_func_fail + self.exception_grace)
def exception_hook(self, det, emsg):
"""Called on after exception processing.
Can do additional logging.
@param det: exception details
@param emsg: exception msg
"""
lm = "Job %s crashed: %s" % (self.job_name, emsg)
if self._is_quiet_exception(det):
self.log.warning(lm)
else:
self.log.exception(lm)
def work(self):
"""Here should user's processing happen.
Return value is taken as boolean - if true, the next loop
starts immediately. If false, DBScript sleeps for a loop_delay.
"""
raise Exception("Nothing implemented?")
def startup(self):
"""Will be called just before entering main loop.
In case of daemon, if will be called in same process as work(),
unlike __init__().
"""
self.started = time.time()
# set signals
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self.hook_sighup)
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, self.hook_sigint)
def shutdown(self):
"""Will be called just after exiting main loop.
In case of daemon, if will be called in same process as work(),
unlike __init__().
"""
pass
# define some aliases (short-cuts / backward compatibility cruft)
stat_add = stat_put # Old, deprecated function.
stat_inc = stat_increase
##
## DBScript
##
#: how old connections need to be closed
DEF_CONN_AGE = 20*60 # 20 min
class DBScript(BaseScript):
"""Base class for database scripts.
Handles database connection state.
Config template::
## Parameters for skytools.DBScript ##
# default lifetime for database connections (in seconds)
#connection_lifetime = 1200
"""
def __init__(self, service_name, args):
"""Script setup.
User class should override work() and optionally __init__(), startup(),
reload(), reset() and init_optparse().
NB: in case of daemon, the __init__() and startup()/work() will be
run in different processes. So nothing fancy should be done in __init__().
@param service_name: unique name for script.
It will be also default job_name, if not specified in config.
@param args: cmdline args (sys.argv[1:]), but can be overridden
"""
self.db_cache = {}
self._db_defaults = {}
self._listen_map = {} # dbname: channel_list
BaseScript.__init__(self, service_name, args)
def connection_hook(self, dbname, conn):
pass
def set_database_defaults(self, dbname, **kwargs):
self._db_defaults[dbname] = kwargs
def add_connect_string_profile(self, connstr, profile):
"""Add extra profile info to connect string.
"""
if profile:
extra = self.cf.get("%s_extra_connstr" % profile, '')
if extra:
connstr += ' ' + extra
return connstr
def get_database(self, dbname, autocommit = 0, isolation_level = -1,
cache = None, connstr = None, profile = None):
"""Load cached database connection.
User must not store it permanently somewhere,
as all connections will be invalidated on reset.
"""
max_age = self.cf.getint('connection_lifetime', DEF_CONN_AGE)
if not cache:
cache = dbname
params = {}
defs = self._db_defaults.get(cache, {})
params.update(defs)
if isolation_level >= 0:
params['isolation_level'] = isolation_level
elif autocommit:
params['isolation_level'] = 0
elif params.get('autocommit', 0):
params['isolation_level'] = 0
elif not 'isolation_level' in params:
params['isolation_level'] = skytools.I_READ_COMMITTED
if not 'max_age' in params:
params['max_age'] = max_age
if cache in self.db_cache:
dbc = self.db_cache[cache]
if connstr is None:
connstr = self.cf.get(dbname, '')
if connstr:
connstr = self.add_connect_string_profile(connstr, profile)
dbc.check_connstr(connstr)
else:
if not connstr:
connstr = self.cf.get(dbname)
connstr = self.add_connect_string_profile(connstr, profile)
# connstr might contain password, it is not a good idea to log it
filtered_connstr = connstr
pos = connstr.lower().find('password')
if pos >= 0:
filtered_connstr = connstr[:pos] + ' [...]'
self.log.debug("Connect '%s' to '%s'" % (cache, filtered_connstr))
dbc = DBCachedConn(cache, connstr, params['max_age'], setup_func = self.connection_hook)
self.db_cache[cache] = dbc
clist = []
if cache in self._listen_map:
clist = self._listen_map[cache]
return dbc.get_connection(params['isolation_level'], clist)
def close_database(self, dbname):
"""Explicitly close a cached connection.
Next call to get_database() will reconnect.
"""
if dbname in self.db_cache:
dbc = self.db_cache[dbname]
dbc.reset()
del self.db_cache[dbname]
def reset(self):
"Something bad happened, reset all connections."
for dbc in self.db_cache.values():
dbc.reset()
self.db_cache = {}
BaseScript.reset(self)
def run_once(self):
state = BaseScript.run_once(self)
# reconnect if needed
for dbc in self.db_cache.values():
dbc.refresh()
return state
def exception_hook(self, d, emsg):
"""Log database and query details from exception."""
curs = getattr(d, 'cursor', None)
conn = getattr(curs, 'connection', None)
cname = getattr(conn, 'my_name', None)
if cname:
# Properly named connection
cname = d.cursor.connection.my_name
sql = getattr(curs, 'query', None) or '?'
if len(sql) > 200: # avoid logging londiste huge batched queries
sql = sql[:60] + " ..."
lm = "Job %s got error on connection '%s': %s. Query: %s" % (
self.job_name, cname, emsg, sql)
if self._is_quiet_exception(d):
self.log.warning(lm)
else:
self.log.exception(lm)
else:
BaseScript.exception_hook(self, d, emsg)
def sleep(self, secs):
"""Make script sleep for some amount of time."""
fdlist = []
for dbname in self._listen_map.keys():
if dbname not in self.db_cache:
continue
fd = self.db_cache[dbname].fileno()
if fd is None:
continue
fdlist.append(fd)
if not fdlist:
return BaseScript.sleep(self, secs)
try:
if hasattr(select, 'poll'):
p = select.poll()
for fd in fdlist:
p.register(fd, select.POLLIN)
p.poll(int(secs * 1000))
else:
select.select(fdlist, [], [], secs)
except select.error, d:
self.log.info('wait canceled')
def _exec_cmd(self, curs, sql, args, quiet = False, prefix = None):
"""Internal tool: Run SQL on cursor."""
if self.options.verbose:
self.log.debug("exec_cmd: %s" % skytools.quote_statement(sql, args))
_pfx = ""
if prefix:
_pfx = "[%s] " % prefix
curs.execute(sql, args)
ok = True
rows = curs.fetchall()
for row in rows:
try:
code = row['ret_code']
msg = row['ret_note']
except KeyError:
self.log.error("Query does not conform to exec_cmd API:")
self.log.error("SQL: %s" % skytools.quote_statement(sql, args))
self.log.error("Row: %s" % repr(row.copy()))
sys.exit(1)
level = code / 100
if level == 1:
self.log.debug("%s%d %s" % (_pfx, code, msg))
elif level == 2:
if quiet:
self.log.debug("%s%d %s" % (_pfx, code, msg))
else:
self.log.info("%s%s" % (_pfx, msg,))
elif level == 3:
self.log.warning("%s%s" % (_pfx, msg,))
else:
self.log.error("%s%s" % (_pfx, msg,))
self.log.debug("Query was: %s" % skytools.quote_statement(sql, args))
ok = False
return (ok, rows)
def _exec_cmd_many(self, curs, sql, baseargs, extra_list, quiet = False, prefix=None):
"""Internal tool: Run SQL on cursor multiple times."""
ok = True
rows = []
for a in extra_list:
(tmp_ok, tmp_rows) = self._exec_cmd(curs, sql, baseargs + [a], quiet, prefix)
if not tmp_ok:
ok = False
rows += tmp_rows
return (ok, rows)
def exec_cmd(self, db_or_curs, q, args, commit = True, quiet = False, prefix = None):
"""Run SQL on db with code/value error handling."""
if hasattr(db_or_curs, 'cursor'):
db = db_or_curs
curs = db.cursor()
else:
db = None
curs = db_or_curs
(ok, rows) = self._exec_cmd(curs, q, args, quiet, prefix)
if ok:
if commit and db:
db.commit()
return rows
else:
if db:
db.rollback()
if self.options.verbose:
raise Exception("db error")
# error is already logged
sys.exit(1)
def exec_cmd_many(self, db_or_curs, sql, baseargs, extra_list,
commit = True, quiet = False, prefix = None):
"""Run SQL on db multiple times."""
if hasattr(db_or_curs, 'cursor'):
db = db_or_curs
curs = db.cursor()
else:
db = None
curs = db_or_curs
(ok, rows) = self._exec_cmd_many(curs, sql, baseargs, extra_list, quiet, prefix)
if ok:
if commit and db:
db.commit()
return rows
else:
if db:
db.rollback()
if self.options.verbose:
raise Exception("db error")
# error is already logged
sys.exit(1)
def execute_with_retry (self, dbname, stmt, args, exceptions = None):
""" Execute SQL and retry if it fails.
Return number of retries and current valid cursor, or raise an exception.
"""
sql_retry = self.cf.getbool("sql_retry", False)
sql_retry_max_count = self.cf.getint("sql_retry_max_count", 10)
sql_retry_max_time = self.cf.getint("sql_retry_max_time", 300)
sql_retry_formula_a = self.cf.getint("sql_retry_formula_a", 1)
sql_retry_formula_b = self.cf.getint("sql_retry_formula_b", 5)
sql_retry_formula_cap = self.cf.getint("sql_retry_formula_cap", 60)
elist = exceptions or tuple()
stime = time.time()
tried = 0
dbc = None
while True:
try:
if dbc is None:
if dbname not in self.db_cache:
self.get_database(dbname, autocommit=1)
dbc = self.db_cache[dbname]
if dbc.isolation_level != skytools.I_AUTOCOMMIT:
raise skytools.UsageError ("execute_with_retry: autocommit required")
else:
dbc.reset()
curs = dbc.get_connection(dbc.isolation_level).cursor()
curs.execute (stmt, args)
break
except elist, e:
if not sql_retry or tried >= sql_retry_max_count or time.time() - stime >= sql_retry_max_time:
raise
self.log.info("Job %s got error on connection %s: %s" % (self.job_name, dbname, e))
except:
raise
# y = a + bx , apply cap
y = sql_retry_formula_a + sql_retry_formula_b * tried
if sql_retry_formula_cap is not None and y > sql_retry_formula_cap:
y = sql_retry_formula_cap
tried += 1
self.log.info("Retry #%i in %i seconds ...", tried, y)
self.sleep(y)
return tried, curs
def listen(self, dbname, channel):
"""Make connection listen for specific event channel.
Listening will be activated on next .get_database() call.
Basically this means that DBScript.sleep() will poll for events
on that db connection, so when event appears, script will be
woken up.
"""
if dbname not in self._listen_map:
self._listen_map[dbname] = []
clist = self._listen_map[dbname]
if channel not in clist:
clist.append(channel)
def unlisten(self, dbname, channel='*'):
"""Stop connection for listening on specific event channel.
Listening will stop on next .get_database() call.
"""
if dbname not in self._listen_map:
return
if channel == '*':
del self._listen_map[dbname]
return
clist = self._listen_map[dbname]
try:
clist.remove(channel)
except ValueError:
pass
class DBCachedConn(object):
"""Cache a db connection."""
def __init__(self, name, loc, max_age = DEF_CONN_AGE, verbose = False, setup_func=None, channels=[]):
self.name = name
self.loc = loc
self.conn = None
self.conn_time = 0
self.max_age = max_age
self.isolation_level = -1
self.verbose = verbose
self.setup_func = setup_func
self.listen_channel_list = []
def fileno(self):
if not self.conn:
return None
return self.conn.cursor().fileno()
def get_connection(self, isolation_level = -1, listen_channel_list = []):
# default isolation_level is READ COMMITTED
if isolation_level < 0:
isolation_level = skytools.I_READ_COMMITTED
# new conn?
if not self.conn:
self.isolation_level = isolation_level
self.conn = skytools.connect_database(self.loc)
self.conn.my_name = self.name
self.conn.set_isolation_level(isolation_level)
self.conn_time = time.time()
if self.setup_func:
self.setup_func(self.name, self.conn)
else:
if self.isolation_level != isolation_level:
raise Exception("Conflict in isolation_level")
self._sync_listen(listen_channel_list)
# done
return self.conn
def _sync_listen(self, new_clist):
if not new_clist and not self.listen_channel_list:
return
curs = self.conn.cursor()
for ch in self.listen_channel_list:
if ch not in new_clist:
curs.execute("UNLISTEN %s" % skytools.quote_ident(ch))
for ch in new_clist:
if ch not in self.listen_channel_list:
curs.execute("LISTEN %s" % skytools.quote_ident(ch))
if self.isolation_level != skytools.I_AUTOCOMMIT:
self.conn.commit()
self.listen_channel_list = new_clist[:]
def refresh(self):
if not self.conn:
return
#for row in self.conn.notifies():
# if row[0].lower() == "reload":
# self.reset()
# return
if not self.max_age:
return
if time.time() - self.conn_time >= self.max_age:
self.reset()
def reset(self):
if not self.conn:
return
# drop reference
conn = self.conn
self.conn = None
self.listen_channel_list = []
# close
try:
conn.close()
except: pass
def check_connstr(self, connstr):
"""Drop connection if connect string has changed.
"""
if self.loc != connstr:
self.reset()
| isc | 2,695,545,039,418,328,000 | -5,105,625,632,156,520,000 | 31.590747 | 110 | 0.542395 | false |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.5/django/contrib/localflavor/it/it_province.py | 110 | 2779 | # -*- coding: utf-8 -*
from __future__ import unicode_literals
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', 'L’Aquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini'),
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
| lgpl-3.0 | 8,560,273,270,078,093,000 | -7,913,151,546,555,402,000 | 23.13913 | 64 | 0.420389 | false |
AnhellO/DAS_Sistemas | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/contrib/gis/management/commands/ogrinspect.py | 20 | 5848 | import argparse
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
from django.utils.inspect import get_func_args
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = (
'Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode'
)
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument(
'--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.',
)
parser.add_argument(
'--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field (defaults to `geom`)'
)
parser.add_argument(
'--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.',
)
parser.add_argument(
'--multi-geom', action='store_true', dest='multi_geom',
help='Treat the geometry in the data source as a geometry collection.',
)
parser.add_argument(
'--name-field', dest='name_field',
help='Specifies a field name to return for the __str__() method.',
)
parser.add_argument(
'--no-imports', action='store_false', dest='imports',
help='Do not include `from django.contrib.gis.db import models` statement.',
)
parser.add_argument(
'--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.',
)
parser.add_argument(
'--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.',
)
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in get_func_args(_ogrinspect) and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {
'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s': '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s': '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
| mit | -2,393,042,198,363,644,000 | -8,895,938,618,617,639,000 | 42.969925 | 112 | 0.582763 | false |
almeidapaulopt/erpnext | erpnext/accounts/report/share_balance/share_balance.py | 19 | 1475 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
if not filters.get("date"):
frappe.throw(_("Please select date"))
columns = get_columns(filters)
date = filters.get("date")
data = []
if not filters.get("shareholder"):
pass
else:
share_type, no_of_shares, rate, amount = 1, 2, 3, 4
all_shares = get_all_shares(filters.get("shareholder"))
for share_entry in all_shares:
row = False
for datum in data:
if datum[share_type] == share_entry.share_type:
datum[no_of_shares] += share_entry.no_of_shares
datum[amount] += share_entry.amount
if datum[no_of_shares] == 0:
datum[rate] = 0
else:
datum[rate] = datum[amount] / datum[no_of_shares]
row = True
break
# new entry
if not row:
row = [filters.get("shareholder"),
share_entry.share_type, share_entry.no_of_shares, share_entry.rate, share_entry.amount]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Shareholder") + ":Link/Shareholder:150",
_("Share Type") + "::90",
_("No of Shares") + "::90",
_("Average Rate") + ":Currency:90",
_("Amount") + ":Currency:90"
]
return columns
def get_all_shares(shareholder):
return frappe.get_doc('Shareholder', shareholder).share_balance
| gpl-3.0 | 7,503,120,952,566,602,000 | -1,238,827,089,450,735,900 | 24.431034 | 92 | 0.656949 | false |
zhuwenping/python-for-android | python-modules/twisted/twisted/internet/test/test_qtreactor.py | 59 | 1108 | # Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
from twisted.trial import unittest
from twisted.python.runtime import platform
from twisted.python.util import sibpath
from twisted.internet.utils import getProcessOutputAndValue
skipWindowsNopywin32 = None
if platform.isWindows():
try:
import win32process
except ImportError:
skipWindowsNopywin32 = ("On windows, spawnProcess is not available "
"in the absence of win32process.")
class QtreactorTestCase(unittest.TestCase):
"""
Tests for L{twisted.internet.qtreactor}.
"""
def test_importQtreactor(self):
"""
Attempting to import L{twisted.internet.qtreactor} should raise an
C{ImportError} indicating that C{qtreactor} is no longer a part of
Twisted.
"""
sys.modules["qtreactor"] = None
from twisted.plugins.twisted_qtstub import errorMessage
try:
import twisted.internet.qtreactor
except ImportError, e:
self.assertEquals(str(e), errorMessage)
| apache-2.0 | 5,467,506,030,007,144,000 | -5,956,360,342,413,224,000 | 30.657143 | 76 | 0.680505 | false |
justinpotts/mozillians | vendor-local/lib/python/kombu/transport/SQS.py | 10 | 11233 | """
kombu.transport.SQS
===================
Amazon SQS transport.
:copyright: (c) 2010 - 2012 by Ask Solem
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
import string
from Queue import Empty
from anyjson import loads, dumps
from boto import exception
from boto import sdb as _sdb
from boto import sqs as _sqs
from boto.sdb.domain import Domain
from boto.sdb.connection import SDBConnection
from boto.sqs.connection import SQSConnection
from boto.sqs.message import Message
from kombu.exceptions import StdChannelError
from kombu.utils import cached_property, uuid
from kombu.utils.encoding import safe_str
from . import virtual
# dots are replaced by dash, all other punctuation
# replaced by underscore.
CHARS_REPLACE_TABLE = dict((ord(c), 0x5f)
for c in string.punctuation if c not in '-_.')
CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-'
class Table(Domain):
"""Amazon SimpleDB domain describing the message routing table."""
# caches queues already bound, so we don't have to declare them again.
_already_bound = set()
def routes_for(self, exchange):
"""Iterator giving all routes for an exchange."""
return self.select("""WHERE exchange = '%s'""" % exchange)
def get_queue(self, queue):
"""Get binding for queue."""
qid = self._get_queue_id(queue)
if qid:
return self.get_item(qid)
def create_binding(self, queue):
"""Get binding item for queue.
Creates the item if it doesn't exist.
"""
item = self.get_queue(queue)
if item:
return item, item["id"]
id = uuid()
return self.new_item(id), id
def queue_bind(self, exchange, routing_key, pattern, queue):
if queue not in self._already_bound:
binding, id = self.create_binding(queue)
binding.update(exchange=exchange,
routing_key=routing_key or "",
pattern=pattern or "",
queue=queue or "",
id=id)
binding.save()
self._already_bound.add(queue)
def queue_delete(self, queue):
"""delete queue by name."""
self._already_bound.discard(queue)
item = self._get_queue_item(queue)
if item:
self.delete_item(item)
def exchange_delete(self, exchange):
"""Delete all routes for `exchange`."""
for item in self.routes_for(exchange):
self.delete_item(item["id"])
def get_item(self, item_name):
"""Uses `consistent_read` by default."""
# Domain is an old-style class, can't use super().
for consistent_read in (False, True):
item = Domain.get_item(self, item_name, consistent_read)
if item:
return item
def select(self, query='', next_token=None, consistent_read=True,
max_items=None):
"""Uses `consistent_read` by default."""
query = """SELECT * FROM `%s` %s""" % (self.name, query)
return Domain.select(self, query, next_token,
consistent_read, max_items)
def _try_first(self, query='', **kwargs):
for c in (False, True):
for item in self.select(query, consistent_read=c, **kwargs):
return item
def get_exchanges(self):
return list(set(i["exchange"] for i in self.select()))
def _get_queue_item(self, queue):
return self._try_first("""WHERE queue = '%s' limit 1""" % queue)
def _get_queue_id(self, queue):
item = self._get_queue_item(queue)
if item:
return item["id"]
class Channel(virtual.Channel):
Table = Table
default_region = "us-east-1"
domain_format = "kombu%(vhost)s"
_sdb = None
_sqs = None
_queue_cache = {}
_noack_queues = set()
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
# SQS blows up when you try to create a new queue if one already
# exists with a different visibility_timeout, so this prepopulates
# the queue_cache to protect us from recreating
# queues that are known to already exist.
queues = self.sqs.get_all_queues()
for queue in queues:
self._queue_cache[queue.name] = queue
def basic_consume(self, queue, no_ack, *args, **kwargs):
if no_ack:
self._noack_queues.add(queue)
return super(Channel, self).basic_consume(queue, no_ack,
*args, **kwargs)
def basic_cancel(self, consumer_tag):
if consumer_tag in self._consumers:
queue = self._tag_to_queue[consumer_tag]
self._noack_queues.discard(queue)
return super(Channel, self).basic_cancel(consumer_tag)
def entity_name(self, name, table=CHARS_REPLACE_TABLE):
"""Format AMQP queue name into a legal SQS queue name."""
return unicode(safe_str(name)).translate(table)
def _new_queue(self, queue, **kwargs):
"""Ensures a queue exists in SQS."""
queue = self.queue_name_prefix + queue
try:
return self._queue_cache[queue]
except KeyError:
q = self._queue_cache[queue] = self.sqs.create_queue(
self.entity_name(queue),
self.visibility_timeout)
return q
def _queue_bind(self, *args):
"""Bind ``queue`` to ``exchange`` with routing key.
Route will be stored in SDB if so enabled.
"""
if self.supports_fanout:
self.table.queue_bind(*args)
def get_table(self, exchange):
"""Get routing table.
Retrieved from SDB if :attr:`supports_fanout`.
"""
if self.supports_fanout:
return [(r["routing_key"], r["pattern"], r["queue"])
for r in self.table.routes_for(exchange)]
return super(Channel, self).get_table(exchange)
def get_exchanges(self):
if self.supports_fanout:
return self.table.get_exchanges()
return super(Channel, self).get_exchanges()
def _delete(self, queue, *args):
"""delete queue by name."""
self._queue_cache.pop(queue, None)
if self.supports_fanout:
self.table.queue_delete(queue)
super(Channel, self)._delete(queue)
def exchange_delete(self, exchange, **kwargs):
"""Delete exchange by name."""
if self.supports_fanout:
self.table.exchange_delete(exchange)
super(Channel, self).exchange_delete(exchange, **kwargs)
def _has_queue(self, queue, **kwargs):
"""Returns True if ``queue`` has been previously declared."""
if self.supports_fanout:
return bool(self.table.get_queue(queue))
return super(Channel, self)._has_queue(queue)
def _put(self, queue, message, **kwargs):
"""Put message onto queue."""
q = self._new_queue(queue)
m = Message()
m.set_body(dumps(message))
q.write(m)
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message to all queues in ``exchange``."""
for route in self.table.routes_for(exchange):
self._put(route["queue"], message, **kwargs)
def _get(self, queue):
"""Try to retrieve a single message off ``queue``."""
q = self._new_queue(queue)
rs = q.get_messages(1)
if rs:
m = rs[0]
payload = loads(rs[0].get_body())
if queue in self._noack_queues:
q.delete_message(m)
else:
payload["properties"]["delivery_info"].update({
"sqs_message": m, "sqs_queue": q, })
return payload
raise Empty()
def basic_ack(self, delivery_tag):
delivery_info = self.qos.get(delivery_tag).delivery_info
try:
queue = delivery_info["sqs_queue"]
except KeyError:
pass
else:
queue.delete_message(delivery_info["sqs_message"])
super(Channel, self).basic_ack(delivery_tag)
def _size(self, queue):
"""Returns the number of messages in a queue."""
return self._new_queue(queue).count()
def _purge(self, queue):
"""Deletes all current messages in a queue."""
q = self._new_queue(queue)
# SQS is slow at registering messages, so run for a few
# iterations to ensure messages are deleted.
size = 0
for i in xrange(10):
size += q.count()
if not size:
break
q.clear()
return size
def close(self):
super(Channel, self).close()
for conn in (self._sqs, self._sdb):
if conn:
try:
conn.close()
except AttributeError, exc: # FIXME ???
if "can't set attribute" not in str(exc):
raise
def _get_regioninfo(self, regions):
if self.region:
for _r in regions:
if _r.name == self.region:
return _r
def _aws_connect_to(self, fun, regions):
conninfo = self.conninfo
region = self._get_regioninfo(regions)
return fun(region=region,
aws_access_key_id=conninfo.userid,
aws_secret_access_key=conninfo.password,
port=conninfo.port)
def _next_delivery_tag(self):
return uuid() # See #73
@property
def sqs(self):
if self._sqs is None:
self._sqs = self._aws_connect_to(SQSConnection, _sqs.regions())
return self._sqs
@property
def sdb(self):
if self._sdb is None:
self._sdb = self._aws_connect_to(SDBConnection, _sdb.regions())
return self._sdb
@property
def table(self):
name = self.entity_name(self.domain_format % {
"vhost": self.conninfo.virtual_host})
d = self.sdb.get_object("CreateDomain", {"DomainName": name},
self.Table)
d.name = name
return d
@property
def conninfo(self):
return self.connection.client
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def visibility_timeout(self):
return self.transport_options.get("visibility_timeout")
@cached_property
def queue_name_prefix(self):
return self.transport_options.get("queue_name_prefix", '')
@cached_property
def supports_fanout(self):
return self.transport_options.get("sdb_persistence", False)
@cached_property
def region(self):
return self.transport_options.get("region") or self.default_region
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = None
connection_errors = (exception.SQSError, socket.error)
channel_errors = (exception.SQSDecodeError, StdChannelError)
| bsd-3-clause | -6,196,694,190,026,007,000 | 5,854,606,732,382,690,000 | 31.278736 | 75 | 0.573667 | false |
huangkuan/hack | lib/gcloud/monitoring/test_query.py | 7 | 23503 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
PROJECT = 'my-project'
METRIC_TYPE = 'compute.googleapis.com/instance/uptime'
METRIC_LABELS = {'instance_name': 'instance-1'}
METRIC_LABELS2 = {'instance_name': 'instance-2'}
RESOURCE_TYPE = 'gce_instance'
RESOURCE_LABELS = {
'project_id': 'my-project',
'zone': 'us-east1-a',
'instance_id': '1234567890123456789',
}
RESOURCE_LABELS2 = {
'project_id': 'my-project',
'zone': 'us-east1-b',
'instance_id': '9876543210987654321',
}
METRIC_KIND = 'DELTA'
VALUE_TYPE = 'DOUBLE'
TS0 = '2016-04-06T22:05:00.042Z'
TS1 = '2016-04-06T22:05:01.042Z'
TS2 = '2016-04-06T22:05:02.042Z'
class TestAligner(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import Aligner
return Aligner
def test_one(self):
self.assertTrue(hasattr(self._getTargetClass(), 'ALIGN_RATE'))
def test_names(self):
for name in self._getTargetClass().__dict__:
if not name.startswith('_'):
self.assertEqual(getattr(self._getTargetClass(), name), name)
class TestReducer(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import Reducer
return Reducer
def test_one(self):
self.assertTrue(hasattr(self._getTargetClass(),
'REDUCE_PERCENTILE_99'))
def test_names(self):
for name in self._getTargetClass().__dict__:
if not name.startswith('_'):
self.assertEqual(getattr(self._getTargetClass(), name), name)
class TestQuery(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import Query
return Query
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_minimal(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client)
self.assertEqual(query._client, client)
self.assertEqual(query._filter.metric_type,
self._getTargetClass().DEFAULT_METRIC_TYPE)
self.assertIsNone(query._start_time)
self.assertIsNone(query._end_time)
self.assertIsNone(query._per_series_aligner)
self.assertIsNone(query._alignment_period_seconds)
self.assertIsNone(query._cross_series_reducer)
self.assertEqual(query._group_by_fields, ())
def test_constructor_maximal(self):
import datetime
T1 = datetime.datetime(2016, 4, 7, 2, 30, 30)
DAYS, HOURS, MINUTES = 1, 2, 3
T0 = T1 - datetime.timedelta(days=DAYS, hours=HOURS, minutes=MINUTES)
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE,
end_time=T1,
days=DAYS, hours=HOURS, minutes=MINUTES)
self.assertEqual(query._client, client)
self.assertEqual(query._filter.metric_type, METRIC_TYPE)
self.assertEqual(query._start_time, T0)
self.assertEqual(query._end_time, T1)
self.assertIsNone(query._per_series_aligner)
self.assertIsNone(query._alignment_period_seconds)
self.assertIsNone(query._cross_series_reducer)
self.assertEqual(query._group_by_fields, ())
def test_constructor_default_end_time(self):
import datetime
from gcloud._testing import _Monkey
from gcloud.monitoring import query as MUT
MINUTES = 5
NOW, T0, T1 = [
datetime.datetime(2016, 4, 7, 2, 30, 30),
datetime.datetime(2016, 4, 7, 2, 25, 0),
datetime.datetime(2016, 4, 7, 2, 30, 0),
]
client = _Client(project=PROJECT, connection=_Connection())
with _Monkey(MUT, _UTCNOW=lambda: NOW):
query = self._makeOne(client, METRIC_TYPE, minutes=MINUTES)
self.assertEqual(query._start_time, T0)
self.assertEqual(query._end_time, T1)
def test_constructor_nonzero_duration_illegal(self):
import datetime
T1 = datetime.datetime(2016, 4, 7, 2, 30, 30)
client = _Client(project=PROJECT, connection=_Connection())
with self.assertRaises(ValueError):
self._makeOne(client, METRIC_TYPE, end_time=T1)
def test_execution_without_interval_illegal(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
with self.assertRaises(ValueError):
list(query)
def test_metric_type(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
self.assertEqual(query.metric_type, METRIC_TYPE)
def test_filter(self):
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
expected = 'metric.type = "{type}"'.format(type=METRIC_TYPE)
self.assertEqual(query.filter, expected)
def test_filter_by_group(self):
GROUP = '1234567'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_group(GROUP)
expected = (
'metric.type = "{type}"'
' AND group.id = "{group}"'
).format(type=METRIC_TYPE, group=GROUP)
self.assertEqual(query.filter, expected)
def test_filter_by_projects(self):
PROJECT1, PROJECT2 = 'project-1', 'project-2'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_projects(PROJECT1, PROJECT2)
expected = (
'metric.type = "{type}"'
' AND project = "{project1}" OR project = "{project2}"'
).format(type=METRIC_TYPE, project1=PROJECT1, project2=PROJECT2)
self.assertEqual(query.filter, expected)
def test_filter_by_resources(self):
ZONE_PREFIX = 'europe-'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_resources(zone_prefix=ZONE_PREFIX)
expected = (
'metric.type = "{type}"'
' AND resource.label.zone = starts_with("{prefix}")'
).format(type=METRIC_TYPE, prefix=ZONE_PREFIX)
self.assertEqual(query.filter, expected)
def test_filter_by_metrics(self):
INSTANCE = 'my-instance'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_metrics(instance_name=INSTANCE)
expected = (
'metric.type = "{type}"'
' AND metric.label.instance_name = "{instance}"'
).format(type=METRIC_TYPE, instance=INSTANCE)
self.assertEqual(query.filter, expected)
def test_request_parameters_minimal(self):
import datetime
T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(end_time=T1)
actual = list(query._build_query_params())
expected = [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
]
self.assertEqual(actual, expected)
def test_request_parameters_maximal(self):
import datetime
T0 = datetime.datetime(2016, 4, 7, 2, 0, 0)
T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)
ALIGNER = 'ALIGN_DELTA'
MINUTES, SECONDS, PERIOD = 1, 30, '90s'
REDUCER = 'REDUCE_MEAN'
FIELD1, FIELD2 = 'resource.zone', 'metric.instance_name'
PAGE_SIZE = 100
PAGE_TOKEN = 'second-page-please'
client = _Client(project=PROJECT, connection=_Connection())
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
query = query.align(ALIGNER, minutes=MINUTES, seconds=SECONDS)
query = query.reduce(REDUCER, FIELD1, FIELD2)
actual = list(query._build_query_params(headers_only=True,
page_size=PAGE_SIZE,
page_token=PAGE_TOKEN))
expected = [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
('aggregation.perSeriesAligner', ALIGNER),
('aggregation.alignmentPeriod', PERIOD),
('aggregation.crossSeriesReducer', REDUCER),
('aggregation.groupByFields', FIELD1),
('aggregation.groupByFields', FIELD2),
('view', 'HEADERS'),
('pageSize', PAGE_SIZE),
('pageToken', PAGE_TOKEN),
]
self.assertEqual(actual, expected)
def test_iteration(self):
import datetime
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
INTERVAL1 = {'startTime': TS0, 'endTime': TS1}
INTERVAL2 = {'startTime': TS1, 'endTime': TS2}
VALUE1 = 60 # seconds
VALUE2 = 60.001 # seconds
SERIES1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE1}},
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE1}},
],
}
SERIES2 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE2}},
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE2}},
],
}
RESPONSE = {'timeSeries': [SERIES1, SERIES2]}
connection = _Connection(RESPONSE)
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query)
self.assertEqual(len(response), 2)
series1, series2 = response
self.assertEqual(series1.metric.labels, METRIC_LABELS)
self.assertEqual(series2.metric.labels, METRIC_LABELS2)
self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)
self.assertEqual([p.value for p in series1.points], [VALUE1, VALUE1])
self.assertEqual([p.value for p in series2.points], [VALUE2, VALUE2])
self.assertEqual([p.end_time for p in series1.points], [TS1, TS2])
self.assertEqual([p.end_time for p in series2.points], [TS1, TS2])
expected_request = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
],
}
request, = connection._requested
self.assertEqual(request, expected_request)
def test_iteration_paged(self):
import copy
import datetime
from gcloud.exceptions import NotFound
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
INTERVAL1 = {'startTime': TS0, 'endTime': TS1}
INTERVAL2 = {'startTime': TS1, 'endTime': TS2}
VALUE1 = 60 # seconds
VALUE2 = 60.001 # seconds
SERIES1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE1}},
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE1}},
],
}
SERIES2_PART1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL2, 'value': {'doubleValue': VALUE2}},
],
}
SERIES2_PART2 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{'interval': INTERVAL1, 'value': {'doubleValue': VALUE2}},
],
}
TOKEN = 'second-page-please'
RESPONSE1 = {'timeSeries': [SERIES1, SERIES2_PART1],
'nextPageToken': TOKEN}
RESPONSE2 = {'timeSeries': [SERIES2_PART2]}
connection = _Connection(RESPONSE1, RESPONSE2)
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query)
self.assertEqual(len(response), 2)
series1, series2 = response
self.assertEqual(series1.metric.labels, METRIC_LABELS)
self.assertEqual(series2.metric.labels, METRIC_LABELS2)
self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)
self.assertEqual([p.value for p in series1.points], [VALUE1, VALUE1])
self.assertEqual([p.value for p in series2.points], [VALUE2, VALUE2])
self.assertEqual([p.end_time for p in series1.points], [TS1, TS2])
self.assertEqual([p.end_time for p in series2.points], [TS1, TS2])
expected_request1 = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
],
}
expected_request2 = copy.deepcopy(expected_request1)
expected_request2['query_params'].append(('pageToken', TOKEN))
request1, request2 = connection._requested
self.assertEqual(request1, expected_request1)
self.assertEqual(request2, expected_request2)
with self.assertRaises(NotFound):
list(query)
def test_iteration_empty(self):
import datetime
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
connection = _Connection({})
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query)
self.assertEqual(len(response), 0)
expected_request = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
],
}
request, = connection._requested
self.assertEqual(request, expected_request)
def test_iteration_headers_only(self):
import datetime
T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)
SERIES1 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
}
SERIES2 = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS2},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS2},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
}
RESPONSE = {'timeSeries': [SERIES1, SERIES2]}
connection = _Connection(RESPONSE)
client = _Client(project=PROJECT, connection=connection)
query = self._makeOne(client, METRIC_TYPE)
query = query.select_interval(start_time=T0, end_time=T1)
response = list(query.iter(headers_only=True))
self.assertEqual(len(response), 2)
series1, series2 = response
self.assertEqual(series1.metric.labels, METRIC_LABELS)
self.assertEqual(series2.metric.labels, METRIC_LABELS2)
self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)
self.assertEqual(series1.points, [])
self.assertEqual(series2.points, [])
expected_request = {
'method': 'GET',
'path': '/projects/{project}/timeSeries/'.format(project=PROJECT),
'query_params': [
('filter', 'metric.type = "{type}"'.format(type=METRIC_TYPE)),
('interval.endTime', T1.isoformat() + 'Z'),
('interval.startTime', T0.isoformat() + 'Z'),
('view', 'HEADERS'),
],
}
request, = connection._requested
self.assertEqual(request, expected_request)
class Test_Filter(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.query import _Filter
return _Filter
def _makeOne(self, metric_type):
return self._getTargetClass()(metric_type)
def test_minimal(self):
obj = self._makeOne(METRIC_TYPE)
expected = 'metric.type = "{type}"'.format(type=METRIC_TYPE)
self.assertEqual(str(obj), expected)
def test_maximal(self):
obj = self._makeOne(METRIC_TYPE)
obj.group_id = '1234567'
obj.projects = 'project-1', 'project-2'
obj.select_resources(resource_type='some-resource',
resource_label='foo')
obj.select_metrics(metric_label_prefix='bar-')
expected = (
'metric.type = "{type}"'
' AND group.id = "1234567"'
' AND project = "project-1" OR project = "project-2"'
' AND resource.label.resource_label = "foo"'
' AND resource.type = "some-resource"'
' AND metric.label.metric_label = starts_with("bar-")'
).format(type=METRIC_TYPE)
self.assertEqual(str(obj), expected)
class Test__build_label_filter(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.monitoring.query import _build_label_filter
return _build_label_filter(*args, **kwargs)
def test_no_labels(self):
self.assertEqual(self._callFUT('resource'), '')
def test_label_is_none(self):
self.assertEqual(self._callFUT('resource', foo=None), '')
def test_metric_labels(self):
actual = self._callFUT(
'metric',
alpha_prefix='a-',
beta_gamma_suffix='-b',
delta_epsilon='xyz',
)
expected = (
'metric.label.alpha = starts_with("a-")'
' AND metric.label.beta_gamma = ends_with("-b")'
' AND metric.label.delta_epsilon = "xyz"'
)
self.assertEqual(actual, expected)
def test_resource_labels(self):
actual = self._callFUT(
'resource',
alpha_prefix='a-',
beta_gamma_suffix='-b',
delta_epsilon='xyz',
)
expected = (
'resource.label.alpha = starts_with("a-")'
' AND resource.label.beta_gamma = ends_with("-b")'
' AND resource.label.delta_epsilon = "xyz"'
)
self.assertEqual(actual, expected)
def test_raw_label_filters(self):
actual = self._callFUT(
'resource',
'resource.label.alpha = starts_with("a-")',
'resource.label.beta_gamma = ends_with("-b")',
'resource.label.delta_epsilon = "xyz"',
)
expected = (
'resource.label.alpha = starts_with("a-")'
' AND resource.label.beta_gamma = ends_with("-b")'
' AND resource.label.delta_epsilon = "xyz"'
)
self.assertEqual(actual, expected)
def test_resource_type(self):
actual = self._callFUT('resource', resource_type='foo')
expected = 'resource.type = "foo"'
self.assertEqual(actual, expected)
def test_resource_type_prefix(self):
actual = self._callFUT('resource', resource_type_prefix='foo-')
expected = 'resource.type = starts_with("foo-")'
self.assertEqual(actual, expected)
def test_resource_type_suffix(self):
actual = self._callFUT('resource', resource_type_suffix='-foo')
expected = 'resource.type = ends_with("-foo")'
self.assertEqual(actual, expected)
class Test__format_timestamp(unittest2.TestCase):
def _callFUT(self, timestamp):
from gcloud.monitoring.query import _format_timestamp
return _format_timestamp(timestamp)
def test_naive(self):
from datetime import datetime
TIMESTAMP = datetime(2016, 4, 5, 13, 30, 0)
timestamp = self._callFUT(TIMESTAMP)
self.assertEqual(timestamp, '2016-04-05T13:30:00Z')
def test_with_timezone(self):
from datetime import datetime
from gcloud._helpers import UTC
TIMESTAMP = datetime(2016, 4, 5, 13, 30, 0, tzinfo=UTC)
timestamp = self._callFUT(TIMESTAMP)
self.assertEqual(timestamp, '2016-04-05T13:30:00Z')
class _Connection(object):
def __init__(self, *responses):
self._responses = list(responses)
self._requested = []
def api_request(self, **kwargs):
from gcloud.exceptions import NotFound
self._requested.append(kwargs)
try:
return self._responses.pop(0)
except IndexError:
raise NotFound('miss')
class _Client(object):
def __init__(self, project, connection):
self.project = project
self.connection = connection
| apache-2.0 | -1,906,380,287,426,255,400 | 3,793,015,746,411,777,000 | 35.838558 | 78 | 0.589286 | false |
drax68/graphite-web | webapp/graphite/metrics/urls.py | 5 | 1063 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls import url
from . import views
urlpatterns = [
url('^index\.json$', views.index_json, name='metrics_index'),
url('^find/?$', views.find_view, name='metrics_find'),
url('^expand/?$', views.expand_view, name='metrics_expand'),
url('^get-metadata/?$', views.get_metadata_view,
name='metrics_get_metadata'),
url('^set-metadata/?$', views.set_metadata_view,
name='metrics_set_metadata'),
url('', views.find_view, name='metrics'),
]
| apache-2.0 | 8,269,570,256,702,655,000 | -6,823,010,634,912,964,000 | 38.37037 | 75 | 0.70461 | false |
pku9104038/edx-platform | cms/envs/dev.py | 2 | 6563 | """
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': GITHUB_REPO_ROOT,
'render_template': 'edxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
'split': {
'ENGINE': 'xmodule.modulestore.split_mongo.SplitMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "localhost:8000"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': '[email protected]:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': '[email protected]:MITx/6002x-fall-2012.git',
'origin': '[email protected]:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': '[email protected]:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
# disable NPS survey in dev mode
FEATURES['STUDIO_NPS_SURVEY'] = False
# Enable URL that shows information about the status of variuous services
FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
| agpl-3.0 | 4,703,454,100,849,937,000 | -8,763,584,624,481,611,000 | 32.65641 | 126 | 0.621362 | false |
imsplitbit/nova | nova/tests/virt/test_block_device.py | 2 | 18934 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import block_device
from nova.conductor import api as conductor_api
from nova import context
from nova.openstack.common import jsonutils
from nova import test
from nova.tests import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.db_api = self.mox.CreateMock(conductor_api.API)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_driver_block_device_base_class(self):
self.base_class_transform_called = False
class DummyBlockDevice(driver_block_device.DriverBlockDevice):
_fields = set(['foo', 'bar'])
_legacy_fields = set(['foo', 'baz'])
def _transform(inst, bdm):
self.base_class_transform_called = True
dummy_device = DummyBlockDevice({'foo': 'foo_val', 'id': 42})
self.assertTrue(self.base_class_transform_called)
self.assertThat(dummy_device, matchers.DictMatches(
{'foo': None, 'bar': None}))
self.assertEqual(dummy_device.id, 42)
self.assertThat(dummy_device.legacy(), matchers.DictMatches(
{'foo': None, 'baz': None}))
self.assertRaises(driver_block_device._NotTransformable,
DummyBlockDevice, {'no_device': True})
def _test_driver_device(self, name):
test_bdm = self.driver_classes[name](
getattr(self, "%s_bdm" % name))
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume = {'id': 'fake-volume-id-1'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': 'fake-volume-id-1'}
self.volume_api.get(self.context,
'fake-volume-id-1').AndReturn(volume)
self.volume_api.check_attach(self.context, volume,
instance=instance).AndReturn(None)
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, volume['id'],
connector).AndReturn(connection_info)
self.volume_api.attach(elevated_context, 'fake-volume-id-1',
'fake_uuid', '/dev/sda1').AndReturn(None)
self.db_api.block_device_mapping_update(elevated_context, 3,
{'connection_info': jsonutils.dumps(expected_conn_info)})
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver, self.db_api)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {}}
expected_conn_info = {'data': {},
'serial': 'fake-volume-id-2'}
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
self.db_api.block_device_mapping_update(self.context, 4,
{'connection_info': jsonutils.dumps(expected_conn_info)})
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver,
self.db_api)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
snapshot = {'id': 'fake-snapshot-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
self.db_api.block_device_mapping_update(
self.context, 4, {'volume_id': 'fake-volume-id-2'}).AndReturn(None)
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
self.mox.StubOutWithMock(self.db_api,
'block_device_mapping_update')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2'}
wait_func = self.mox.CreateMockAnything()
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
self.db_api.block_device_mapping_update(
self.context, 5, {'volume_id': 'fake-volume-id-2'}).AndReturn(None)
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
self.mox.StubOutWithMock(self.db_api,
'block_device_mapping_update')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, self.db_api)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertEqual(no_swap, driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
| apache-2.0 | -434,573,927,574,315,900 | 5,873,798,770,963,434,000 | 38.610879 | 79 | 0.583501 | false |
tudarmstadt-lt/topicrawler | lt.lm/src/main/py/mr_ngram_count.py | 1 | 1297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test:
cat data | map | sort | reduce
cat data | ./x.py -m | sort | ./x.py -r
hadoop jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar \
-files x.py \
-mapper 'x.py -m' \
-reducer 'x.py -r' \
-input in \
-output out
@author: stevo
"""
from __future__ import print_function
from __future__ import division
import itertools as it
import sys
def readlines():
with sys.stdin as f:
for line in f:
if line.strip():
yield line
def mapper(lines):
for line in lines:
print('{}'.format(line.rstrip()))
def line2tuple(lines):
for line in lines:
splits = line.rstrip().split('\t')
yield splits
def reducer(lines, mincount=1):
for key, values in it.groupby(lines, lambda line : line.rstrip()):
num = reduce(lambda x, y: x + 1, values, 0)
if num >= mincount:
print('{}\t{}'.format(key, num))
if len(sys.argv) < 2:
raise Exception('specify mapper (-m) or reducer (-r) function')
t = sys.argv[1]
mincount = int(sys.argv[2]) if len(sys.argv) > 2 else 1
if '-m' == t:
mapper(readlines());
elif '-r' == t:
reducer(readlines(), mincount);
else:
raise Exception('specify mapper (-m) or reducer (-r) function') | apache-2.0 | -4,877,636,518,635,952,000 | 3,249,392,562,201,233,000 | 22.6 | 80 | 0.597533 | false |
Brocade-OpenSource/OpenStack-DNRM-Nova | nova/api/openstack/compute/contrib/migrations.py | 3 | 2622 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
XMLNS = "http://docs.openstack.org/compute/ext/migrations/api/v2.0"
ALIAS = "os-migrations"
def authorize(context, action_name):
action = 'migrations:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class MigrationsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('migrations')
elem = xmlutil.SubTemplateElement(root, 'migration',
selector='migrations')
elem.set('id')
elem.set('source_node')
elem.set('dest_node')
elem.set('source_compute')
elem.set('dest_compute')
elem.set('dest_host')
elem.set('status')
elem.set('instance_uuid')
elem.set('old_instance_type_id')
elem.set('new_instance_type_id')
elem.set('created_at')
elem.set('updated_at')
return xmlutil.MasterTemplate(root, 1)
class MigrationsController(object):
"""Controller for accessing migrations in OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
@wsgi.serializers(xml=MigrationsTemplate)
def index(self, req):
"""Return all migrations in progress."""
context = req.environ['nova.context']
authorize(context, "index")
migrations = self.compute_api.get_migrations(context, req.GET)
return {'migrations': migrations}
class Migrations(extensions.ExtensionDescriptor):
"""Provide data on migrations."""
name = "Migrations"
alias = ALIAS
namespace = XMLNS
updated = "2013-05-30T00:00:00+00:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('os-migrations',
MigrationsController())
resources.append(resource)
return resources
| apache-2.0 | 3,734,471,494,399,624,000 | 1,855,518,477,323,094,300 | 33.051948 | 78 | 0.655606 | false |
ArcaniteSolutions/truffe2 | truffe2/truffe/management/commands/import_ndfs.py | 2 | 4367 | # -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now
from accounting_core.models import CostCenter, AccountingYear, Account
from accounting_tools.models import ExpenseClaim, ExpenseClaimFile, ExpenseClaimLine, ExpenseClaimLogging, LinkedInfo
from app.ldaputils import get_attrs_of_sciper
from users.models import TruffeUser
import json
import os
import sys
class Command(BaseCommand):
""" Requirements : files in /media/uploads/_generic/ExpenseClaim/"""
help = 'Import notes de frais'
def handle(self, *args, **options):
data = json.loads(sys.stdin.read())
root_user = TruffeUser.objects.get(username=179189)
expenseclaim_ct = ContentType.objects.get(app_label="accounting_tools", model="expenseclaim")
status_mapping = {'1': '0_draft', '2': '2_agep_validable', '3': '4_archived'}
for ndf_data in data['data']:
try:
ay = AccountingYear.objects.get(name=ndf_data['accounting_year__name'])
except:
print u"AccountingYear not found !!", ndf_data['accounting_year__name']
ay = None
if ay:
try:
costcenter = CostCenter.objects.get(account_number=ndf_data['costcenter__account_number'], accounting_year=ay)
except:
print u"CostCenter not found !!", ndf_data['costcenter__account_number']
costcenter = None
if costcenter:
try:
user = TruffeUser.objects.get(username=ndf_data['creator_username'])
except TruffeUser.DoesNotExist:
print "Creation of user {!r}".format(ndf_data['creator_username'])
user = TruffeUser(username=ndf_data['creator_username'], is_active=True)
user.last_name, user.first_name, user.email = get_attrs_of_sciper(ndf_data['creator_username'])
user.save()
except Exception as e:
print "user is root_user", e
user = root_user
ndf, created = ExpenseClaim.objects.get_or_create(costcenter=costcenter, accounting_year=ay, user=user, status=status_mapping[ndf_data['status']],
comment=ndf_data['commentaire'], name=ndf_data['name'], nb_proofs=ndf_data['nb_just'])
if created:
ExpenseClaimLogging(who=user, what='imported', object=ndf).save()
print "+ {!r}".format(ndf.name)
if ndf_data['linked_info']:
linked, created = LinkedInfo.objects.get_or_create(object_id=ndf.pk, content_type=expenseclaim_ct, user_pk=user.pk, **ndf_data['linked_info'])
if created:
print " (I) {!r} {!r}".format(linked.first_name, linked.last_name)
for line_data in ndf_data['lines']:
account = Account.objects.get(account_number=line_data['account__account_number'], accounting_year=ay)
__, created = ExpenseClaimLine.objects.get_or_create(expense_claim=ndf, label=line_data['name'], account=account, proof=line_data['just'],
order=line_data['order'], value=line_data['amount'], value_ttc=line_data['amount'], tva=0)
if created:
print " (+) {!r}".format(line_data['name'])
for file_data in ndf_data['uploads']:
if not os.path.isfile(os.path.join('media', 'uploads', '_generic', 'ExpenseClaim', file_data.split('/')[-1])):
print " (!) Missing file {}".format(file_data)
else:
__, created = ExpenseClaimFile.objects.get_or_create(uploader=user, object=ndf, file=os.path.join('uploads', '_generic', 'ExpenseClaim', file_data.split('/')[-1]), defaults={'upload_date': now()})
if created:
print " (L) {!r}".format(file_data)
| bsd-2-clause | 5,222,903,545,355,257,000 | 3,076,442,124,151,485,000 | 52.91358 | 224 | 0.550263 | false |
pcsforeducation/incrowd | incrowd/notify/utils.py | 2 | 1296 | from __future__ import unicode_literals
import logging
from notify.models import Notification
logger = logging.getLogger(__name__)
def ping_filter(message, users, sending_user, notify_text, notify_type,
notify_id=None):
for user in users:
if username_in_message(message, user.username):
# Create notification
if user == sending_user:
continue
note = Notification(
text='{} {}: {}'.format(
sending_user.username, notify_text, message),
user=user,
from_user=sending_user,
type=notify_type,
identifier=notify_id)
note.save()
logger.info("Created notification for user {} from {}"
.format(note.user, note.from_user))
return message
def username_in_message(message, username):
message = message.lower()
username = username.lower()
# Check if @username in message. Edge case for username at the end of
# the message.
if '@' + username + ' ' in message.lower():
return True
try:
return (message.index('@' + username) ==
len(message.lower()) - len('@' + username))
except ValueError:
return False
| apache-2.0 | -3,591,543,909,887,145,500 | -3,884,786,745,302,170,600 | 30.609756 | 73 | 0.5625 | false |
Ziqi-Li/bknqgis | bokeh/bokeh/sphinxext/example_handler.py | 1 | 2905 | import sys
from ..application.handlers.code_runner import CodeRunner
from ..application.handlers.handler import Handler
from ..io import set_curdoc, curdoc
class ExampleHandler(Handler):
""" A stripped-down handler similar to CodeHandler but that does
some appropriate monkeypatching to
"""
_output_funcs = ['output_notebook', 'output_file', 'reset_output']
_io_funcs = ['show', 'save']
def __init__(self, source, filename):
super(ExampleHandler, self).__init__(self)
self._runner = CodeRunner(source, filename, [])
def modify_document(self, doc):
if self.failed:
return
module = self._runner.new_module()
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
set_curdoc(doc)
old_io, old_doc = self._monkeypatch()
try:
self._runner.run(module, lambda: None)
finally:
self._unmonkeypatch(old_io, old_doc)
set_curdoc(old_doc)
def _monkeypatch(self):
def _pass(*args, **kw): pass
def _add_root(obj, *args, **kw):
from bokeh.io import curdoc
curdoc().add_root(obj)
def _curdoc(*args, **kw):
return curdoc()
# these functions are transitively imported from io into plotting,
# so we have to patch them all. Assumption is that no other patching
# has occurred, i.e. we can just save the funcs being patched once,
# from io, and use those as the originals to replace everywhere
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
old_io = {}
for f in self._output_funcs + self._io_funcs:
old_io[f] = getattr(io, f)
for mod in mods:
for f in self._output_funcs:
setattr(mod, f, _pass)
for f in self._io_funcs:
setattr(mod, f, _add_root)
import bokeh.document as d
old_doc = d.Document
d.Document = _curdoc
return old_io, old_doc
def _unmonkeypatch(self, old_io, old_doc):
import bokeh.io as io
import bokeh.plotting as p
mods = [io, p]
# TODO (bev) restore when bkcharts package is ready (but remove at 1.0 release)
# import bkcharts as c
# mods.append(c)
for mod in mods:
for f in old_io:
setattr(mod, f, old_io[f])
import bokeh.document as d
d.Document = old_doc
@property
def failed(self):
return self._runner.failed
@property
def error(self):
return self._runner.error
@property
def error_detail(self):
return self._runner.error_detail
| gpl-2.0 | 2,121,788,243,878,657,000 | 8,131,616,200,188,748,000 | 27.203883 | 87 | 0.578313 | false |
BurningNetel/ctf-manager | CTFmanager/tests/views/event/test_event.py | 1 | 6138 | import json
from django.core.urlresolvers import reverse
from CTFmanager.tests.views.base import ViewTestCase
class EventPageAJAXJoinEventTest(ViewTestCase):
""" Tests that a user can join an event
A user should be able to join upcoming events.
And get a response without the page reloading
"""
def get_valid_event_join_post(self):
event = self.create_event()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
return _json, event
def test_POST_returns_expected_json_on_valid_post(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(200, _json['status_code'])
def test_POST_gives_correct_user_count(self):
_json, event = self.get_valid_event_join_post()
self.assertEqual(1, _json['members'])
def test_logout_POST_gives_401_and_negative(self):
self.client.logout()
_json, event = self.get_valid_event_join_post()
self.assertEqual(-1, _json['members'])
self.assertEqual(401, _json['status_code'])
def test_duplicate_POST_gives_304_and_negative(self):
_json, event = self.get_valid_event_join_post()
response = self.client.post(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(-1, _json['members'])
self.assertEqual(304, _json['status_code'])
def test_valid_DELETE_gives_valid_json(self):
event = self.create_event_join_user()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(200, _json['status_code'])
self.assertEqual(0, _json['members'])
def test_duplicate_DELETE_gives_304_and_negative(self):
event = self.create_event_join_user()
self.client.delete(reverse('event_join', args=[event.name]))
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(304, _json['status_code'])
self.assertEqual(-1, _json['members'])
def test_logout_then_DELTE_gives_401_and_negative(self):
event = self.create_event_join_user()
self.client.logout()
response = self.client.delete(reverse('event_join', args=[event.name]))
_json = json.loads(response.content.decode())
self.assertEqual(401, _json['status_code'])
self.assertEqual(-1, _json['members'])
def create_event_join_user(self):
event = self.create_event()
event.join(self.user)
return event
class EventPageTest(ViewTestCase):
def test_events_page_requires_authentication(self):
self.client.logout()
response = self.client.get(reverse('events'))
self.assertRedirects(response, reverse('login') + '?next=' + reverse('events'))
def test_events_page_renders_events_template(self):
response = self.client.get(reverse('events'))
self.assertTemplateUsed(response, 'event/events.html')
def test_events_page_contains_new_event_button(self):
response = self.client.get(reverse('events'))
expected = 'id="btn_add_event" href="/events/new/">Add Event</a>'
self.assertContains(response, expected)
def test_events_page_displays_only_upcoming_events(self):
event_future = self.create_event("hatCTF", True)
event_past = self.create_event("RuCTF_2015", False)
response = self.client.get(reverse('events'))
_event = response.context['events']
self.assertEqual(_event[0], event_future)
self.assertEqual(len(_event), 1)
self.assertNotEqual(_event[0], event_past)
def test_events_page_has_correct_headers(self):
response = self.client.get(reverse('events'))
expected = 'Upcoming Events'
expected2 = 'Archive'
self.assertContains(response, expected)
self.assertContains(response, expected2)
def test_empty_events_set_shows_correct_message(self):
response = self.client.get(reverse('events'))
expected = 'No upcoming events!'
self.assertContains(response, expected)
def test_events_page_display_archive(self):
event_past = self.create_event('past_event', False)
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertContains(response, '<table id="table_archive"')
self.assertContains(response, event_past.name)
self.assertEqual(archive[0], event_past)
def test_events_page_displays_error_message_when_nothing_in_archive(self):
response = self.client.get(reverse('events'))
archive = response.context['archive']
self.assertEqual(len(archive), 0)
self.assertContains(response, 'No past events!')
def test_event_page_displays_event_members_count(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, '0 Participating')
event.members.add(self.user)
event.save()
response = self.client.get(reverse('events'))
self.assertContains(response, '1 Participating')
def test_event_page_displays_correct_button_text(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, 'Join</button>')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, 'Leave</button>')
def test_event_page_shows_username_in_popup(self):
event = self.create_event()
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 1)
self.assertContains(response, 'Nobody has joined yet!')
event.join(self.user)
response = self.client.get(reverse('events'))
self.assertContains(response, self.user.username, 2)
self.assertNotContains(response, 'Nobody has joined yet!') | gpl-3.0 | 4,454,293,392,298,463,700 | 7,679,653,440,372,594,000 | 38.352564 | 87 | 0.654774 | false |
crchemist/scioncc | src/pyon/core/test/test_thread.py | 2 | 3210 | #!/usr/bin/env python
__author__ = 'Adam R. Smith'
from pyon.core.thread import PyonThreadManager, PyonThread
from pyon.core.exception import ContainerError
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from unittest import SkipTest
from nose.plugins.attrib import attr
import time
@attr('UNIT', group='process')
class ProcessTest(PyonTestCase):
def setUp(self):
self.counter = 0
def increment(self, amount=1):
self.counter += amount
def test_proc(self):
self.counter = 0
proc = PyonThread(self.increment, 2)
proc.start()
self.assertEqual(self.counter, 0)
time.sleep(0.2)
proc.join()
self.assertGreaterEqual(self.counter, 2)
def test_supervisor(self):
self.counter = 0
sup = PyonThreadManager()
sup.start()
proc = sup.spawn(self.increment, amount=2)
self.assertEqual(self.counter, 0)
time.sleep(0.2)
sup.join_children()
self.assertGreaterEqual(self.counter, 2)
def test_supervisor_shutdown(self):
""" Test shutdown joining/forcing with timeouts. """
sup = PyonThreadManager()
sup.start()
import gevent
#Note: commented MM 7/2015. time.sleep seems not monkey-patched on Ubuntu?
#self.assertIs(time.sleep, gevent.hub.sleep)
# Test that it takes at least the given timeout to join_children, but not much more
proc_sleep_secs, proc_count = 0.01, 5
[sup.spawn(time.sleep, seconds=proc_sleep_secs) for i in xrange(5)]
elapsed = sup.shutdown(2*proc_sleep_secs)
# MM, 1/12: Ok, I loosened the timing boundaries. Do the tests still work?
# Enabled 0.2s of slack for all tests
self.assertLess(elapsed - proc_sleep_secs, 0.2)
# this could be trouble
self.assertLess(elapsed, 0.2 + proc_sleep_secs*3)
# Test that a small timeout forcibly shuts down without waiting
wait_secs = 0.0001
[sup.spawn(time.sleep, seconds=proc_sleep_secs) for i in xrange(5)]
elapsed = sup.shutdown(wait_secs)
self.assertLess(elapsed - wait_secs, 0.2)
# this could be trouble too
self.assertLess(elapsed, 0.2 + proc_sleep_secs)
# Test that no timeout waits until all finished
[sup.spawn(time.sleep, seconds=proc_sleep_secs) for i in xrange(5)]
elapsed = sup.shutdown()
self.assertLess(elapsed - proc_sleep_secs, 0.2)
def test_ensure_ready(self):
# GreenProcess by default will signal ready immediately, but we can still pass it through to make sure it's ok
sup = PyonThreadManager()
sup.start()
proc = sup.spawn(self.increment, amount=5)
sup.ensure_ready(proc)
self.assertEqual(self.counter, 5)
def test_ensure_ready_failed_proc(self):
# yes the error we print is intentional and annoying, sorry
def failboat():
self.increment(5, 1) # too many params, will fail
sup = PyonThreadManager()
sup.start()
proc = sup.spawn(failboat)
self.assertRaises(ContainerError, sup.ensure_ready, proc)
| bsd-2-clause | -7,219,191,683,320,618,000 | 4,765,358,092,648,057,000 | 31.424242 | 118 | 0.643302 | false |
Allow2CEO/browser-ios | brave/node_modules/bloom-filter-cpp/vendor/depot_tools/third_party/gsutil/pkg_util.py | 51 | 2026 | #!/usr/bin/env python
# Utilities to facilitate maintaining one master list of package contents
# in MANIFEST.in and allow us to import that list into various packaging
# tools (e.g. rpmbuid and setup.py).
# Define the file in which we maintain package contents. Rather than
# hard-coding our package contents, to ease maintenance we read the
# manifest file to obtain the list of files and directories to include.
MANIFEST_IN = 'MANIFEST.in'
# Define input and output files for customizing the rpm package spec.
SPEC_IN = 'gsutil.spec.in'
SPEC_OUT = 'gsutil.spec'
# Root of rpmbuild tree for file enumeration in gsutil.spec file.
RPM_ROOT = '%{_datadir}/%{name}/'
def parse_manifest(files, dirs):
'''Parse contents of manifest file and append results to passed lists
of files and directories.
'''
f = open(MANIFEST_IN, 'r')
for line in f:
line = line.strip()
# Skip empty or comment lines.
if (len(line) <= 0) or (line[0] == '#'):
continue
tokens = line.split()
if len(tokens) >= 0:
if tokens[0] == 'include':
files.extend(tokens[1:])
elif tokens[0] == 'recursive-include' and tokens[2] == '*':
dirs.append(tokens[1])
else:
err = 'Unsupported type ' + tokens[0] + ' in ' + MANIFEST_IN + ' file.'
raise Exception(err)
f.close()
# When executed as a separate script, create a dynamically generated rpm
# spec file. Otherwise, when loaded as a module by another script, no
# specific actions are taken, other than making utility functions available
# to the loading script.
if __name__ == '__main__':
# Running as main so generate a new rpm spec file.
files = []
dirs = []
parse_manifest(files, dirs)
fin = open(SPEC_IN, 'r')
fout = open(SPEC_OUT, 'w')
for line in fin:
if line.strip() == '###FILES_GO_HERE###':
for file in files:
fout.write(RPM_ROOT + file + '\n')
for dir in dirs:
fout.write(RPM_ROOT + dir + '/\n')
else:
fout.write(line)
fout.close()
fin.close()
| mpl-2.0 | 1,089,122,709,149,984,600 | 7,293,036,056,248,832,000 | 32.766667 | 79 | 0.649062 | false |
Jay-Jay-D/LeanSTP | Algorithm.Python/ScheduledUniverseSelectionModelRegressionAlgorithm.py | 2 | 5460 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Selection import *
from datetime import datetime, timedelta
### <summary>
### Regression algorithm for testing ScheduledUniverseSelectionModel scheduling functions.
### </summary>
class ScheduledUniverseSelectionModelRegressionAlgorithm(QCAlgorithm):
'''Regression algorithm for testing ScheduledUniverseSelectionModel scheduling functions.'''
def Initialize(self):
self.UniverseSettings.Resolution = Resolution.Hour
self.SetStartDate(2017, 1, 1)
self.SetEndDate(2017, 2, 1)
# selection will run on mon/tues/thurs at 00:00/06:00/12:00/18:00
self.SetUniverseSelection(ScheduledUniverseSelectionModel(
self.DateRules.Every(DayOfWeek.Monday, DayOfWeek.Tuesday, DayOfWeek.Thursday),
self.TimeRules.Every(timedelta(hours = 12)),
self.SelectSymbols
))
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(1)))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
# some days of the week have different behavior the first time -- less securities to remove
self.seenDays = []
def SelectSymbols(self, dateTime):
symbols = []
weekday = dateTime.weekday()
if weekday == 0 or weekday == 1:
symbols.append(Symbol.Create('SPY', SecurityType.Equity, Market.USA))
elif weekday == 2:
# given the date/time rules specified in Initialize, this symbol will never be selected (not invoked on wednesdays)
symbols.append(Symbol.Create('AAPL', SecurityType.Equity, Market.USA))
else:
symbols.append(Symbol.Create('IBM', SecurityType.Equity, Market.USA))
if weekday == 1 or weekday == 3:
symbols.append(Symbol.Create('EURUSD', SecurityType.Forex, Market.FXCM))
elif weekday == 4:
# given the date/time rules specified in Initialize, this symbol will never be selected (every 6 hours never lands on hour==1)
symbols.append(Symbol.Create('EURGBP', SecurityType.Forex, Market.FXCM))
else:
symbols.append(Symbol.Create('NZDUSD', SecurityType.Forex, Market.FXCM))
return symbols
def OnSecuritiesChanged(self, changes):
self.Log("{}: {}".format(self.Time, changes))
weekday = self.Time.weekday()
if weekday == 0:
self.ExpectAdditions(changes, 'SPY', 'NZDUSD')
if weekday not in self.seenDays:
self.seenDays.append(weekday)
self.ExpectRemovals(changes, None)
else:
self.ExpectRemovals(changes, 'EURUSD', 'IBM')
if weekday == 1:
self.ExpectAdditions(changes, 'EURUSD')
if weekday not in self.seenDays:
self.seenDays.append(weekday)
self.ExpectRemovals(changes, 'NZDUSD')
else:
self.ExpectRemovals(changes, 'NZDUSD')
if weekday == 2 or weekday == 4:
# selection function not invoked on wednesdays (2) or friday (4)
self.ExpectAdditions(changes, None)
self.ExpectRemovals(changes, None)
if weekday == 3:
self.ExpectAdditions(changes, "IBM")
self.ExpectRemovals(changes, "SPY")
def OnOrderEvent(self, orderEvent):
self.Log("{}: {}".format(self.Time, orderEvent))
def ExpectAdditions(self, changes, *tickers):
if tickers is None and changes.AddedSecurities.Count > 0:
raise Exception("{}: Expected no additions: {}".format(self.Time, self.Time.weekday()))
for ticker in tickers:
if ticker is not None and ticker not in [s.Symbol.Value for s in changes.AddedSecurities]:
raise Exception("{}: Expected {} to be added: {}".format(self.Time, ticker, self.Time.weekday()))
def ExpectRemovals(self, changes, *tickers):
if tickers is None and changes.RemovedSecurities.Count > 0:
raise Exception("{}: Expected no removals: {}".format(self.Time, self.Time.weekday()))
for ticker in tickers:
if ticker is not None and ticker not in [s.Symbol.Value for s in changes.RemovedSecurities]:
raise Exception("{}: Expected {} to be removed: {}".format(self.Time, ticker, self.Time.weekday()))
| apache-2.0 | 5,560,401,031,332,531,000 | 3,583,951,113,918,060,000 | 42.31746 | 138 | 0.674973 | false |
RT-Thread/rt-thread | bsp/stm32/stm32f107-uc-eval/rtconfig.py | 28 | 4023 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| apache-2.0 | 7,724,228,605,285,090,000 | 9,099,746,476,724,039,000 | 25.82 | 152 | 0.561521 | false |
jvoegele/picard | picard/ui/collectionmenu.py | 2 | 3181 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2013 Michael Wiencek
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import locale
from PyQt4 import QtCore, QtGui
from picard.collection import user_collections, load_user_collections
class CollectionMenu(QtGui.QMenu):
def __init__(self, albums, *args):
QtGui.QMenu.__init__(self, *args)
self.ids = set(a.id for a in albums)
self.update_collections()
def update_collections(self):
self.clear()
for id, collection in sorted(user_collections.iteritems(),
key=lambda (k, v):
(locale.strxfrm(v.name.encode('utf-8')), k)):
action = QtGui.QWidgetAction(self)
action.setDefaultWidget(CollectionCheckBox(self, collection))
self.addAction(action)
self.addSeparator()
self.refresh_action = self.addAction(_("Refresh List"))
def refresh_list(self):
self.refresh_action.setEnabled(False)
load_user_collections(self.update_collections)
def mouseReleaseEvent(self, event):
# Not using self.refresh_action.triggered because it closes the menu
if self.actionAt(event.pos()) == self.refresh_action and self.refresh_action.isEnabled():
self.refresh_list()
class CollectionCheckBox(QtGui.QCheckBox):
def __init__(self, menu, collection):
self.menu = menu
self.collection = collection
QtGui.QCheckBox.__init__(self, self.label())
releases = collection.releases & menu.ids
if len(releases) == len(menu.ids):
self.setCheckState(QtCore.Qt.Checked)
elif not releases:
self.setCheckState(QtCore.Qt.Unchecked)
else:
self.setCheckState(QtCore.Qt.PartiallyChecked)
def nextCheckState(self):
ids = self.menu.ids
if ids & self.collection.pending:
return
diff = ids - self.collection.releases
if diff:
self.collection.add_releases(diff, self.updateText)
self.setCheckState(QtCore.Qt.Checked)
else:
self.collection.remove_releases(ids & self.collection.releases, self.updateText)
self.setCheckState(QtCore.Qt.Unchecked)
def updateText(self):
self.setText(self.label())
def label(self):
c = self.collection
return ungettext("%s (%i release)", "%s (%i releases)", c.size) % (c.name, c.size)
| gpl-2.0 | -2,770,563,809,871,662,000 | -940,760,698,361,805,400 | 35.988372 | 97 | 0.653568 | false |
ctb/cvxpy | cvxpy/atoms/affine/upper_tri.py | 11 | 2344 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
import cvxpy.utilities as u
import cvxpy.interface as intf
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class upper_tri(AffAtom):
"""The vectorized strictly upper triagonal entries.
"""
def __init__(self, expr):
super(upper_tri, self).__init__(expr)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Vectorize the upper triagonal entries.
"""
value = np.zeros(self.size[0])
count = 0
for i in range(values[0].shape[0]):
for j in range(values[0].shape[1]):
if i < j:
value[count] = values[0][i, j]
count += 1
return value
def validate_arguments(self):
"""Checks that the argument is a square matrix.
"""
if not self.args[0].size[0] == self.args[0].size[1]:
raise ValueError(
"Argument to upper_tri must be a square matrix."
)
def shape_from_args(self):
"""A vector.
"""
rows, cols = self.args[0].size
return u.Shape(rows*(cols-1)//2, 1)
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Vectorized strictly upper triagonal entries.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.upper_tri(arg_objs[0]), [])
| gpl-3.0 | 8,306,884,462,635,710,000 | -2,111,215,970,158,969,900 | 29.441558 | 68 | 0.612628 | false |
Kraymer/beets | beetsplug/bucket.py | 13 | 8178 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the %bucket{} function for path formatting.
"""
from __future__ import division, absolute_import, print_function
from datetime import datetime
import re
import string
from six.moves import zip
from itertools import tee
from beets import plugins, ui
ASCII_DIGITS = string.digits + string.ascii_lowercase
class BucketError(Exception):
pass
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def span_from_str(span_str):
"""Build a span dict from the span string representation.
"""
def normalize_year(d, yearfrom):
"""Convert string to a 4 digits year
"""
if yearfrom < 100:
raise BucketError(u"%d must be expressed on 4 digits" % yearfrom)
# if two digits only, pick closest year that ends by these two
# digits starting from yearfrom
if d < 100:
if (d % 100) < (yearfrom % 100):
d = (yearfrom - yearfrom % 100) + 100 + d
else:
d = (yearfrom - yearfrom % 100) + d
return d
years = [int(x) for x in re.findall('\d+', span_str)]
if not years:
raise ui.UserError(u"invalid range defined for year bucket '%s': no "
u"year found" % span_str)
try:
years = [normalize_year(x, years[0]) for x in years]
except BucketError as exc:
raise ui.UserError(u"invalid range defined for year bucket '%s': %s" %
(span_str, exc))
res = {'from': years[0], 'str': span_str}
if len(years) > 1:
res['to'] = years[-1]
return res
def complete_year_spans(spans):
"""Set the `to` value of spans if empty and sort them chronologically.
"""
spans.sort(key=lambda x: x['from'])
for (x, y) in pairwise(spans):
if 'to' not in x:
x['to'] = y['from'] - 1
if spans and 'to' not in spans[-1]:
spans[-1]['to'] = datetime.now().year
def extend_year_spans(spans, spanlen, start=1900, end=2014):
"""Add new spans to given spans list so that every year of [start,end]
belongs to a span.
"""
extended_spans = spans[:]
for (x, y) in pairwise(spans):
# if a gap between two spans, fill the gap with as much spans of
# spanlen length as necessary
for span_from in range(x['to'] + 1, y['from'], spanlen):
extended_spans.append({'from': span_from})
# Create spans prior to declared ones
for span_from in range(spans[0]['from'] - spanlen, start, -spanlen):
extended_spans.append({'from': span_from})
# Create spans after the declared ones
for span_from in range(spans[-1]['to'] + 1, end, spanlen):
extended_spans.append({'from': span_from})
complete_year_spans(extended_spans)
return extended_spans
def build_year_spans(year_spans_str):
"""Build a chronologically ordered list of spans dict from unordered spans
stringlist.
"""
spans = []
for elem in year_spans_str:
spans.append(span_from_str(elem))
complete_year_spans(spans)
return spans
def str2fmt(s):
"""Deduces formatting syntax from a span string.
"""
regex = re.compile(r"(?P<bef>\D*)(?P<fromyear>\d+)(?P<sep>\D*)"
r"(?P<toyear>\d*)(?P<after>\D*)")
m = re.match(regex, s)
res = {'fromnchars': len(m.group('fromyear')),
'tonchars': len(m.group('toyear'))}
res['fmt'] = "%s%%s%s%s%s" % (m.group('bef'),
m.group('sep'),
'%s' if res['tonchars'] else '',
m.group('after'))
return res
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation.
"""
args = (str(yearfrom)[-fromnchars:])
if tonchars:
args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:])
return fmt % args
def extract_modes(spans):
"""Extract the most common spans lengths and representation formats
"""
rangelen = sorted([x['to'] - x['from'] + 1 for x in spans])
deflen = sorted(rangelen, key=rangelen.count)[-1]
reprs = [str2fmt(x['str']) for x in spans]
deffmt = sorted(reprs, key=reprs.count)[-1]
return deflen, deffmt
def build_alpha_spans(alpha_spans_str, alpha_regexs):
"""Extract alphanumerics from string and return sorted list of chars
[from...to]
"""
spans = []
for elem in alpha_spans_str:
if elem in alpha_regexs:
spans.append(re.compile(alpha_regexs[elem]))
else:
bucket = sorted([x for x in elem.lower() if x.isalnum()])
if bucket:
begin_index = ASCII_DIGITS.index(bucket[0])
end_index = ASCII_DIGITS.index(bucket[-1])
else:
raise ui.UserError(u"invalid range defined for alpha bucket "
u"'%s': no alphanumeric character found" %
elem)
spans.append(
re.compile(
"^[" + ASCII_DIGITS[begin_index:end_index + 1] +
ASCII_DIGITS[begin_index:end_index + 1].upper() + "]"
)
)
return spans
class BucketPlugin(plugins.BeetsPlugin):
def __init__(self):
super(BucketPlugin, self).__init__()
self.template_funcs['bucket'] = self._tmpl_bucket
self.config.add({
'bucket_year': [],
'bucket_alpha': [],
'bucket_alpha_regex': {},
'extrapolate': False
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
self.year_spans = build_year_spans(self.config['bucket_year'].get())
if self.year_spans and self.config['extrapolate']:
[self.ys_len_mode,
self.ys_repr_mode] = extract_modes(self.year_spans)
self.year_spans = extend_year_spans(self.year_spans,
self.ys_len_mode)
self.alpha_spans = build_alpha_spans(
self.config['bucket_alpha'].get(),
self.config['bucket_alpha_regex'].get()
)
def find_bucket_year(self, year):
"""Return bucket that matches given year or return the year
if no matching bucket.
"""
for ys in self.year_spans:
if ys['from'] <= int(year) <= ys['to']:
if 'str' in ys:
return ys['str']
else:
return format_span(self.ys_repr_mode['fmt'],
ys['from'], ys['to'],
self.ys_repr_mode['fromnchars'],
self.ys_repr_mode['tonchars'])
return year
def find_bucket_alpha(self, s):
"""Return alpha-range bucket that matches given string or return the
string initial if no matching bucket.
"""
for (i, span) in enumerate(self.alpha_spans):
if span.match(s):
return self.config['bucket_alpha'].get()[i]
return s[0].upper()
def _tmpl_bucket(self, text, field=None):
if not field and len(text) == 4 and text.isdigit():
field = 'year'
if field == 'year':
func = self.find_bucket_year
else:
func = self.find_bucket_alpha
return func(text)
| mit | 8,793,212,588,693,092,000 | -7,470,598,028,554,202,000 | 32.379592 | 78 | 0.56224 | false |
jeffmurphy/cif-router | poc/cif-router.py | 1 | 21349 | #!/usr/bin/python
#
#
# cif-router proof of concept
#
# cif-router [-p pubport] [-r routerport] [-m myname] [-h]
# -p default: 5556
# -r default: 5555
# -m default: cif-router
#
# cif-router is a zmq device with the following sockets:
# XPUB
# for republishing messages
# XSUB
# for subscribing to message feeds
# ROUTER
# for routing REQ/REP messages between clients
# also for accepting REQs from clients
# locally accepted types:
# REGISTER, UNREGISTER, LIST-CLIENTS
# locally generated replies:
# UNAUTHORIZED, OK, FAILED
#
# communication between router and clients is via CIF.msg passing
# the 'ControlStruct' portion of CIF.msg is used for communication
#
# a typical use case:
#
# cif-smrt's REQ connects to ROUTER and sends a REGISTER message with dst=cif-router
# cif-router's ROUTER responds with SUCCESS (if valid) or UNAUTHORIZED (if not valid)
# the apikey will be validated during this step
# cif-router's XSUB connects to cif-smrt's XPUB
# cif-smrt begins publishing CIF messages
# cif-router re-publishes the CIF messages to clients connected to cif-router's XPUB
# clients may be: cif-correlator, cif-db
import sys
import zmq
import time
import datetime
import threading
import getopt
import json
import pprint
import struct
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import control_pb2
import cifsupport
sys.path.append('../../libcif/lib')
from CIF.RouterStats import *
from CIF.CtrlCommands.Clients import *
from CIF.CtrlCommands.Ping import *
from CIFRouter.MiniClient import *
from CIF.CtrlCommands.ThreadTracker import ThreadTracker
myname = "cif-router"
def dosubscribe(client, m):
client = m.src
if client in publishers :
print "dosubscribe: we've seen this client before. re-using old connection."
return control_pb2.ControlType.SUCCESS
elif clients.isregistered(client) == True:
if clients.apikey(client) == m.apikey:
print "dosubscribe: New publisher to connect to " + client
publishers[client] = time.time()
addr = m.iPublishRequest.ipaddress
port = m.iPublishRequest.port
print "dosubscribe: connect our xsub -> xpub on " + addr + ":" + str(port)
xsub.connect("tcp://" + addr + ":" + str(port))
return control_pb2.ControlType.SUCCESS
print "dosubscribe: iPublish from a registered client with a bad apikey: " + client + " " + m.apikey
print "dosubscribe: iPublish from a client who isnt registered: \"" + client + "\""
return control_pb2.ControlType.FAILED
def list_clients(client, apikey):
if clients.isregistered(client) == True and clients.apikey(client) == apikey:
return clients.asmessage()
return None
def make_register_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.REGISTER
msg.dst = msgfrom
msg.src = "cif-router"
print "mrr " + _apikey
msg.apikey = _apikey
return msg
def make_unregister_reply(msgfrom, _apikey):
msg = control_pb2.ControlType()
msg.version = msg.version # required
msg.type = control_pb2.ControlType.REPLY
msg.command = control_pb2.ControlType.UNREGISTER
msg.dst = msgfrom
msg.src = "cif-router"
msg.apikey = _apikey
return msg
def make_msg_seq(msg):
_md5 = hashlib.md5()
_md5.update(msg.SerializeToString())
return _md5.digest()
def handle_miniclient_reply(socket, routerport, publisherport):
pending_registers = miniclient.pending_apikey_lookups()
print "pending_apikey_lookups: ", pending_registers
for apikey in pending_registers:
if apikey in register_wait_map:
reply_to = register_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_register_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
msg.status = control_pb2.ControlType.SUCCESS
clients.register(reply_to['msgfrom'], reply_to['from_zmqid'], apikey)
print " Register succeeded."
else:
print " Register failed: key expired"
else:
print " Register failed: key revoked"
else:
print " Register failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del register_wait_map[apikey]
elif apikey in unregister_wait_map:
reply_to = unregister_wait_map[apikey]
apikey_results = miniclient.get_pending_apikey(apikey)
print " send reply to: ", reply_to
msg = make_unregister_reply(reply_to['msgfrom'], apikey)
msg.status = control_pb2.ControlType.FAILED
if apikey_results != None:
if apikey_results.revoked == False:
if apikey_results.expires == 0 or apikey_results.expires >= time.time():
msg.status = control_pb2.ControlType.SUCCESS
clients.unregister(reply_to['msgfrom'])
print " Unregister succeeded."
else:
print " Unregister failed: key expired"
else:
print " Unregister failed: key revoked"
else:
print " Unregister failed: unknown key"
msg.seq = reply_to['msgseq']
socket.send_multipart([reply_to['from_zmqid'], '', msg.SerializeToString()])
del unregister_wait_map[apikey]
miniclient.remove_pending_apikey(apikey)
def myrelay(pubport):
relaycount = 0
print "[myrelay] Create XPUB socket on " + str(pubport)
xpub = context.socket(zmq.PUB)
xpub.bind("tcp://*:" + str(pubport))
while True:
try:
relaycount = relaycount + 1
m = xsub.recv()
_m = msg_pb2.MessageType()
_m.ParseFromString(m)
if _m.type == msg_pb2.MessageType.QUERY:
mystats.setrelayed(1, 'QUERY')
elif _m.type == msg_pb2.MessageType.REPLY:
mystats.setrelayed(1, 'REPLY')
elif _m.type == msg_pb2.MessageType.SUBMISSION:
mystats.setrelayed(1, 'SUBMISSION')
for bmt in _m.submissionRequest:
mystats.setrelayed(1, bmt.baseObjectType)
print "[myrelay] total:%d got:%d bytes" % (relaycount, len(m))
#print "[myrelay] got msg on our xsub socket: " , m
xpub.send(m)
except Exception as e:
print "[myrelay] invalid message received: ", e
def usage():
print "cif-router [-r routerport] [-p pubport] [-m myid] [-a myapikey] [-dn dbname] [-dk dbkey] [-h]"
print " routerport = 5555, pubport = 5556, myid = cif-router"
print " dbkey = a8fd97c3-9f8b-477b-b45b-ba06719a0088"
print " dbname = cif-db"
try:
opts, args = getopt.getopt(sys.argv[1:], 'p:r:m:h')
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
global mystats
global clients
global thread_tracker
context = zmq.Context()
clients = Clients()
mystats = RouterStats()
publishers = {}
routerport = 5555
publisherport = 5556
myid = "cif-router"
dbkey = 'a8fd97c3-9f8b-477b-b45b-ba06719a0088'
dbname = 'cif-db'
global apikey
apikey = 'a1fd11c1-1f1b-477b-b45b-ba06719a0088'
miniclient = None
miniclient_id = myid + "-miniclient"
register_wait_map = {}
unregister_wait_map = {}
for o, a in opts:
if o == "-r":
routerport = a
elif o == "-p":
publisherport = a
elif o == "-m":
myid = a
elif o == "-dk":
dbkey = a
elif o == "-dn":
dbname = a
elif o == "-a":
apikey = a
elif o == "-h":
usage()
sys.exit(2)
print "Create ROUTER socket on " + str(routerport)
global socket
socket = context.socket(zmq.ROUTER)
socket.bind("tcp://*:" + str(routerport))
socket.setsockopt(zmq.IDENTITY, myname)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
print "Create XSUB socket"
xsub = context.socket(zmq.SUB)
xsub.setsockopt(zmq.SUBSCRIBE, '')
print "Connect XSUB<->XPUB"
thread = threading.Thread(target=myrelay, args=(publisherport,))
thread.start()
while not thread.isAlive():
print "waiting for pubsub relay thread to become alive"
time.sleep(1)
thread_tracker = ThreadTracker(False)
thread_tracker.add(id=thread.ident, user='Router', host='localhost', state='Running', info="PUBSUB Relay")
print "Entering event loop"
try:
open_for_business = False
while True:
sockets_with_data_ready = dict(poller.poll(1000))
#print "[up " + str(int(mystats.getuptime())) + "s]: Wakeup: "
if miniclient != None:
if miniclient.pending() == True:
print "\tMiniclient has replies we need to handle."
handle_miniclient_reply(socket, routerport, publisherport)
if sockets_with_data_ready and sockets_with_data_ready.get(socket) == zmq.POLLIN:
print "[up " + str(int(mystats.getuptime())) + "s]: Got an inbound message"
rawmsg = socket.recv_multipart()
#print " Got ", rawmsg
msg = control_pb2.ControlType()
try:
msg.ParseFromString(rawmsg[2])
except Exception as e:
print "Received message isn't a protobuf: ", e
mystats.setbad()
else:
from_zmqid = rawmsg[0] # save the ZMQ identity of who sent us this message
#print "Got msg: "#, msg.seq
try:
cifsupport.versionCheck(msg)
except Exception as e:
print "\tReceived message has incompatible version: ", e
mystats.setbadversion(1, msg.version)
else:
if cifsupport.isControl(msg):
msgfrom = msg.src
msgto = msg.dst
msgcommand = msg.command
msgcommandtext = control_pb2._CONTROLTYPE_COMMANDTYPE.values_by_number[msg.command].name
msgid = msg.seq
if msgfrom != '' and msg.apikey != '':
if msgto == myname and msg.type == control_pb2.ControlType.REPLY:
print "\tREPLY for me: ", msgcommand
if msgcommand == control_pb2.ControlType.APIKEY_GET:
print "\tReceived a REPLY for an APIKEY_GET"
elif msgto == myname and msg.type == control_pb2.ControlType.COMMAND:
print "\tCOMMAND for me: ", msgcommandtext
mystats.setcontrols(1, msgcommandtext)
"""
For REGISTER:
We allow only the db to register with us while we are not
open_for_business. Once the DB registers, we are open_for_business
since we can then start validating apikeys. Until that time, we can
only validate the dbkey that is specified on the command line when
you launch this program.
"""
if msgcommand == control_pb2.ControlType.REGISTER:
print "\tREGISTER from: " + msgfrom
msg.status = control_pb2.ControlType.FAILED
msg.type = control_pb2.ControlType.REPLY
msg.seq = msgid
if msgfrom == miniclient_id and msg.apikey == apikey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
print "\tMiniClient has registered."
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif msgfrom == dbname and msg.apikey == dbkey:
clients.register(msgfrom, from_zmqid, msg.apikey)
msg.status = control_pb2.ControlType.SUCCESS
msg.registerResponse.REQport = routerport
msg.registerResponse.PUBport = publisherport
open_for_business = True
print "\tDB has connected successfully. Sending reply to DB."
print "\tStarting embedded client"
miniclient = MiniClient(apikey, "127.0.0.1", "127.0.0.1:" + str(routerport), 5557, miniclient_id, thread_tracker, True)
socket.send_multipart([from_zmqid, '', msg.SerializeToString()])
elif open_for_business == True:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
register_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
else:
print "\tNot open_for_business yet. Go away."
elif msgcommand == control_pb2.ControlType.UNREGISTER:
"""
If the database unregisters, then we are not open_for_business any more.
"""
print "\tUNREGISTER from: " + msgfrom
if open_for_business == True:
if msgfrom == dbname and msg.apikey == dbkey:
print "\t\tDB unregistered. Closing for business."
open_for_business = False
clients.unregister(msgfrom)
msg.status = control_pb2.ControlType.SUCCESS
msg.seq = msgid
socket.send_multipart([ from_zmqid, '', msg.SerializeToString()])
else:
"""
Since we need to wait for the DB to response, we note this pending request, ask the miniclient
to handle the lookup. We will poll the MC to see if the lookup has finished. Reply to the client
will be sent from handle_miniclient_reply()
"""
miniclient.lookup_apikey(msg.apikey)
unregister_wait_map[msg.apikey] = {'msgfrom': msgfrom, 'from_zmqid': from_zmqid, 'msgseq': msg.seq}
elif msgcommand == control_pb2.ControlType.LISTCLIENTS:
print "\tLIST-CLIENTS for: " + msgfrom
if open_for_business == True:
rv = list_clients(msg.src, msg.apikey)
msg.seq = msgid
msg.status = msg.status | control_pb2.ControlType.FAILED
if rv != None:
msg.status = msg.status | control_pb2.ControlType.SUCCESS
msg.listClientsResponse.client.extend(rv.client)
msg.listClientsResponse.connectTimestamp.extend(rv.connectTimestamp)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.STATS:
print "\tSTATS for: " + msgfrom
if open_for_business == True:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
msg.statsResponse.statsType = control_pb2.StatsResponse.ROUTER
msg.statsResponse.stats = mystats.asjson()
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
elif msg.command == control_pb2.ControlType.THREADS_LIST:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
thread_tracker.asmessage(msg.listThreadsResponse)
socket.send_multipart( [ from_zmqid, '', msg.SerializeToString() ] )
if msg.command == control_pb2.ControlType.PING:
c = Ping.makereply(msg)
socket.send_multipart( [ from_zmqid, '', c.SerializeToString() ] )
elif msgcommand == control_pb2.ControlType.IPUBLISH:
print "\tIPUBLISH from: " + msgfrom
if open_for_business == True:
rv = dosubscribe(from_zmqid, msg)
msg.status = rv
socket.send_multipart( [from_zmqid, '', msg.SerializeToString()] )
else:
print "\tCOMMAND for someone else: cmd=", msgcommandtext, "src=", msgfrom, " dst=", msgto
msgto_zmqid = clients.getzmqidentity(msgto)
if msgto_zmqid != None:
socket.send_multipart([msgto_zmqid, '', msg.SerializeToString()])
else:
print "\tUnknown message destination: ", msgto
else:
print "\tmsgfrom and/or msg.apikey is empty"
except KeyboardInterrupt:
print "Shut down."
if thread.isAlive():
try:
thread._Thread__stop()
except:
print(str(thread.getName()) + ' could not be terminated')
sys.exit(0)
| bsd-3-clause | -5,396,847,589,475,279,000 | 855,681,139,869,172,200 | 44.230932 | 161 | 0.492154 | false |
liyu1990/sklearn | sklearn/linear_model/stochastic_gradient.py | 31 | 50760 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause | -6,374,702,741,941,265,000 | -5,066,605,014,556,488,000 | 40.034762 | 79 | 0.55266 | false |
Timurdov/bionic | bionic/Lib/site-packages/pip-1.3.1-py2.7.egg/pip/backwardcompat/__init__.py | 63 | 3519 | """Stuff that differs in different Python versions"""
import os
import imp
import sys
import site
__all__ = ['WindowsError']
uses_pycache = hasattr(imp, 'cache_from_source')
class NeverUsedException(Exception):
"""this exception should never be raised"""
try:
WindowsError = WindowsError
except NameError:
WindowsError = NeverUsedException
try:
#new in Python 3.3
PermissionError = PermissionError
except NameError:
PermissionError = NeverUsedException
console_encoding = sys.__stdout__.encoding
if sys.version_info >= (3,):
from io import StringIO, BytesIO
from functools import reduce
from urllib.error import URLError, HTTPError
from queue import Queue, Empty
from urllib.request import url2pathname
from urllib.request import urlretrieve
from email import message as emailmessage
import urllib.parse as urllib
import urllib.request as urllib2
import configparser as ConfigParser
import xmlrpc.client as xmlrpclib
import urllib.parse as urlparse
import http.client as httplib
def cmp(a, b):
return (a > b) - (a < b)
def b(s):
return s.encode('utf-8')
def u(s):
return s.decode('utf-8')
def console_to_str(s):
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def fwrite(f, s):
f.buffer.write(b(s))
bytes = bytes
string_types = (str,)
raw_input = input
else:
from cStringIO import StringIO
from urllib2 import URLError, HTTPError
from Queue import Queue, Empty
from urllib import url2pathname, urlretrieve
from email import Message as emailmessage
import urllib
import urllib2
import urlparse
import ConfigParser
import xmlrpclib
import httplib
def b(s):
return s
def u(s):
return s
def console_to_str(s):
return s
def fwrite(f, s):
f.write(s)
bytes = str
string_types = (basestring,)
reduce = reduce
cmp = cmp
raw_input = raw_input
BytesIO = StringIO
from distutils.sysconfig import get_python_lib, get_python_version
#site.USER_SITE was created in py2.6
user_site = getattr(site, 'USER_SITE', None)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def home_lib(home):
"""Return the lib dir under the 'home' installation scheme"""
if hasattr(sys, 'pypy_version_info'):
lib = 'site-packages'
else:
lib = os.path.join('lib', 'python')
return os.path.join(home, lib)
## py25 has no builtin ssl module
## only >=py32 has ssl.match_hostname and ssl.CertificateError
try:
import ssl
try:
from ssl import match_hostname, CertificateError
except ImportError:
from pip.backwardcompat.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
ssl = None
# patch for py25 socket to work with http://pypi.python.org/pypi/ssl/
import socket
if not hasattr(socket, 'create_connection'): # for Python 2.5
# monkey-patch socket module
from pip.backwardcompat.socket_create_connection import create_connection
socket.create_connection = create_connection
| apache-2.0 | 1,919,560,192,686,863,400 | 6,454,742,846,458,262,000 | 23.78169 | 90 | 0.666951 | false |
Bismarrck/tensorflow | tensorflow/python/saved_model/saved_model_test.py | 13 | 64354 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class SavedModelTestBase(test.TestCase):
def _get_export_dir(self, label):
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.VariableV1(variable_value, name=variable_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(variable_value, self.evaluate(v))
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name, asset_subdir=""):
parent_dir = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_subdir))
file_io.recursive_create_dir(parent_dir)
asset_filepath = os.path.join(
compat.as_bytes(parent_dir), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
class SavedModelTest(SavedModelTestBase):
def _validate_assets(self,
export_dir,
asset_file_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name,
asset_file_def[asset_id].filename)
self.assertEqual(expected_asset_tensor_name,
asset_file_def[asset_id].tensor_info.name)
def _validate_inputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_inputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_sig_def_keys(self, builder, valid_tensor_info, invalid_key):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_key": valid_tensor_info}, "foo")
self.assertRaises(
KeyError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={invalid_key: foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
@test_util.run_deprecated_v1
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
@test_util.run_deprecated_v1
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
@test_util.run_deprecated_v1
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
@test_util.run_v1_only("b/120545219")
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
@test_util.run_deprecated_v1
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with no variables.
with self.session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
# Restore the graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
@test_util.run_deprecated_v1
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder._SavedModelBuilder,
export_dir)
@test_util.run_deprecated_v1
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
@test_util.run_v1_only("b/120545219")
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(42, name="v")
ops.add_to_collection("foo_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(42, self.evaluate(v))
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(43, name="v")
ops.add_to_collection("bar_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(43, self.evaluate(v))
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
@test_util.run_deprecated_v1
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidationFails(self):
export_dir = self._get_export_dir("test_signature_def_validation_fail")
builder = saved_model_builder._SavedModelBuilder(export_dir)
tensor_without_encoding = meta_graph_pb2.TensorInfo()
tensor_without_encoding.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding)
self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype)
self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info_fail(builder, tensor_empty)
self._validate_outputs_tensor_info_fail(builder, tensor_empty)
valid_tensor_info = meta_graph_pb2.TensorInfo()
valid_tensor_info.name = "foo"
valid_tensor_info.dtype = types_pb2.DT_FLOAT
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.INIT_OP_SIGNATURE_KEY)
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.TRAIN_OP_SIGNATURE_KEY)
@test_util.run_deprecated_v1
def testSignatureDefValidationSucceedsWithName(self):
tensor_with_name = meta_graph_pb2.TensorInfo()
tensor_with_name.name = "foo"
tensor_with_name.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_name_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_name)
export_dir = self._get_export_dir("test_signature_def_validation_name_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_name)
@test_util.run_deprecated_v1
def testSignatureDefValidationSucceedsWithCoo(self):
tensor_with_coo = meta_graph_pb2.TensorInfo()
# TODO(soergel) test validation of each of the fields of coo_sparse
tensor_with_coo.coo_sparse.values_tensor_name = "foo"
tensor_with_coo.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_coo_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_coo)
export_dir = self._get_export_dir("test_signature_def_validation_coo_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_coo)
@test_util.run_deprecated_v1
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionDiffFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_diff_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar bak", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar bak", "asset_file_tensor:0")
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_1",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
@test_util.run_deprecated_v1
def testAssetsNameCollisionSameFilepath(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_path")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor_1")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionSameFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionManyFiles(self):
export_dir = self._get_export_dir("test_assets_name_collision_many_files")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
for i in range(5):
idx = str(i)
asset_list = self._build_asset_collection(
"hello42.txt",
"foo bar baz " + idx,
"asset_file_tensor_" + idx,
asset_subdir=idx)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
for i in range(1, 5):
idx = str(i)
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_" + idx,
"foo bar baz " + idx,
"asset_file_tensor_{}:0".format(idx),
asset_id=i)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz 0", "asset_file_tensor_0:0")
@test_util.run_v1_only("b/120545219")
def testCustomInitOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_init_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
self.evaluate(custom_init_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], init_op=custom_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
@test_util.run_v1_only("b/120545219")
def testTrainOp(self):
export_dir = self._get_export_dir("test_train_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(3, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Tensor)
@test_util.run_v1_only("b/120545219")
def testTrainOpGroup(self):
export_dir = self._get_export_dir("test_train_op_group")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
train_op = control_flow_ops.group()
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Operation)
@test_util.run_v1_only("b/120545219")
def testTrainOpAfterVariables(self):
export_dir = self._get_export_dir("test_train_op_after_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["pre_foo"])
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph(["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Tensor)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["pre_foo"], export_dir)
self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
@test_util.run_deprecated_v1
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_list = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_assets(export_dir, bar_graph.asset_file_def, "bar.txt",
"content_bar", "asset_file_tensor:0")
@test_util.run_deprecated_v1
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_assets(export_dir, bar_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
@test_util.run_v1_only("b/120545219")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.VariableV1(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.VariableV1(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
self.evaluate(variables.global_variables_initializer())
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
@test_util.run_deprecated_v1
def testCustomSaver(self):
export_dir = self._get_export_dir("test_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
custom_saver = training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver)
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertFalse("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "my_saver/restore_all")
@test_util.run_deprecated_v1
def testNoCustomSaver(self):
export_dir = self._get_export_dir("test_no_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"])
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertTrue("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "save/restore_all")
@test_util.run_deprecated_v1
def testMultipleCustomSavers(self):
export_dir = self._get_export_dir("test_multiple_custom_savers")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["tag_0"])
saver_1 = training.Saver()
builder.add_meta_graph(["tag_1"], saver=saver_1)
saver_2 = training.Saver()
builder.add_meta_graph(["tag_2"], saver=saver_2)
# Save the SavedModel to disk.
builder.save()
def _validate_custom_saver(tag_name, saver_name):
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, [tag_name], export_dir)
self.assertEqual(
saved_graph.saver_def.restore_op_name,
saver_name)
_validate_custom_saver("tag_0", "save/restore_all")
_validate_custom_saver("tag_1", "save_1/restore_all")
_validate_custom_saver("tag_2", "save_2/restore_all")
@test_util.run_deprecated_v1
def testImportScope(self):
export_dir = self._get_export_dir("test_scoped_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Build a SavedModel with a variable, an asset, and a constant tensor.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
constant_op.constant("constant value", name="constant_tensor_name")
builder.add_meta_graph_and_variables(
sess, ["tag_name"], assets_list=asset_list)
# Save the asset file path for later comparison.
asset_file_path = asset_list[0].eval()
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
# Restore the SavedModel under an import_scope in a new graph/session.
graph_proto = loader.load(
sess, ["tag_name"], export_dir, import_scope="scope_name")
# The loaded variable tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
"scope_name/v:0",
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name)
self.assertEqual(
42,
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# The loaded asset tensor should be scoped, but the asset file path and
# contents should be unchanged.
asset_list = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
self.assertEqual(1, len(asset_list))
self.assertEqual(asset_file_path, asset_list[0].eval())
self.assertEqual("scope_name/asset_file_tensor:0", asset_list[0].name)
# The static asset data inside graph_proto.collection_def should not be
# scoped.
self._validate_assets(export_dir, graph_proto.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# The constant tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
compat.as_bytes("constant value"),
ops.get_default_graph().get_tensor_by_name(
"scope_name/constant_tensor_name:0").eval())
@test_util.run_deprecated_v1
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variables.VariableV1(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"No OpKernel was registered to support Op 'TestAttr' used by node "
"test_attr \\(defined at .*\\) with these attrs: \\[.*\\]\n"
"Registered devices:.*\n"
"Registered kernels:.*"
):
loader.load(sess, ["foo"], export_dir)
class SavedModelV1Test(SavedModelTestBase):
def _validate_asset_collection(self,
export_dir,
graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[asset_id].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
@test_util.run_deprecated_v1
def testWritingAssetsToCollection(self):
export_dir = self._get_export_dir("test_writing_assets_to_collection")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset list.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir,
constants.LEGACY_INIT_OP_KEY)
@test_util.run_deprecated_v1
def testMainOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir("test_main_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY)
def _testInitOpsWithNonEmptyCollection(self, export_dir, key):
builder = saved_model_builder.SavedModelBuilder(export_dir)
g = ops.Graph()
with self.session(graph=g) as sess:
# Initialize variable `v1` to 1.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.VariableV1(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the init op.
assign_v2 = state_ops.assign(v2, v1)
init_op = control_flow_ops.group(assign_v2, name="init_op")
self.evaluate(variables.global_variables_initializer())
ops.add_to_collection(key, control_flow_ops.no_op())
# ValueError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=init_op)
# We shouldn't be able to add as MAIN_OP, either.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
saved_model_pb = loader_impl.parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
@test_util.run_v1_only("b/120545219")
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
if __name__ == "__main__":
test.main()
| apache-2.0 | 5,936,527,172,474,532,000 | -607,557,145,547,185,700 | 40.979126 | 80 | 0.652065 | false |
EmilianStankov/Viridis-Media-Player | source/playlist_tests.py | 1 | 1257 | import unittest
from playlist import Playlist, load_playlist_from_db
class TestPlaylist(unittest.TestCase):
"""Playlist tests"""
def setUp(self):
self.pl = Playlist("playlist", ["song_one", "song_two"])
self.pl.save_to_db()
def tearDown(self):
self.pl.delete_from_db()
def test_get_playlist_name(self):
self.assertEqual(self.pl.get_name(), "playlist")
def test_get_playlist_files(self):
self.assertEqual(self.pl.get_files(), ["song_one", "song_two"])
def test_add_new_file_to_playlist(self):
self.pl.add_file("song_three")
self.assertEqual(self.pl.get_files(),
["song_one", "song_two", "song_three"])
def test_remove_file_from_playlist(self):
self.pl.remove_file("song_one")
self.assertEqual(self.pl.get_files(), ["song_two"])
def test_remove_file_that_is_not_in_playlist(self):
self.assertRaises(ValueError, self.pl.remove_file("song_three"))
def test_load_playlist_from_database(self):
pl2 = load_playlist_from_db("playlist")
self.assertEqual(pl2.get_name(), "playlist")
self.assertEqual(pl2.get_files(), ["song_one", "song_two"])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 7,440,811,588,212,124,000 | 1,033,541,542,916,080,600 | 30.425 | 72 | 0.618934 | false |
fdouetteau/PyBabe | pybabe/pivot.py | 1 | 2935 |
try:
from collections import OrderedDict
except:
## 2.6 Fallback
from ordereddict import OrderedDict
from base import StreamHeader, StreamFooter, BabeBase
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
class OrderedSet(set):
def __init__(self):
self.list = []
def add(self, elt):
if elt in self:
return
else:
super(OrderedSet, self).add(elt)
self.list.append(elt)
def __iter__(self):
return self.list.__iter__()
def pivot(stream, pivot, group):
"Create a pivot around field, grouping on identical value for 'group'"
groups = OrderedDefaultdict(dict)
pivot_values = OrderedSet()
header = None
group_n = map(StreamHeader.keynormalize, group)
for row in stream:
if isinstance(row, StreamHeader):
header = row
elif isinstance(row, StreamFooter):
# HEADER IS : GROUP + (OTHER FIELDS * EACH VALUE
other_fields = [f for f in header.fields if not f in group and not f == pivot]
other_fields_k = map(StreamHeader.keynormalize, other_fields)
fields = group + [f + "-" + str(v)
for v in pivot_values.list for f in other_fields]
newheader = header.replace(fields=fields)
yield newheader
for _, row_dict in groups.iteritems():
## Create a line per group
mrow = row_dict.itervalues().next()
group_cols = [getattr(mrow, col) for col in group_n]
for v in pivot_values:
if v in row_dict:
mrow = row_dict[v]
group_cols.extend([getattr(mrow, col) for col in other_fields_k])
else:
group_cols.extend([None for col in other_fields])
yield group_cols
yield row
else:
kgroup = ""
for f in group_n:
kgroup = kgroup + str(getattr(row, f))
groups[kgroup][getattr(row, pivot)] = row
pivot_values.add(getattr(row, pivot))
BabeBase.register("pivot", pivot)
| bsd-3-clause | -9,103,102,890,448,385,000 | -880,554,840,838,571,900 | 33.529412 | 90 | 0.560136 | false |
rbn42/stiler | config.py | 1 | 1027 | WinBorder = 2
LeftPadding = 15
BottomPadding = 15
TopPadding = BottomPadding
RightPadding = BottomPadding
NavigateAcrossWorkspaces = True # availabe in Unity7
TempFile = "/dev/shm/.stiler_db"
LockFile = "/dev/shm/.stiler.lock"
# This is the congiguration that works for unity7. If you are using a
# different Desktop Environment, close all windows and execute "wmctrl
# -lG" to find out all the applications need to exclude.
EXCLUDE_APPLICATIONS = ['<unknown>', 'x-nautilus-desktop', 'unity-launcher',
'unity-panel', 'Hud', 'unity-dash', 'Desktop',
'Docky',
'screenkey', 'XdndCollectionWindowImp']
# An alternative method to exclude applications.
EXCLUDE_WM_CLASS = ['wesnoth-1.12']
UNRESIZABLE_APPLICATIONS = ['Screenkey']
RESIZE_STEP = 50
MOVE_STEP = 50
MIN_WINDOW_WIDTH = 50
MIN_WINDOW_HEIGHT = 50
#NOFRAME_WMCLASS = ['Wine']
# In i3-wm's window tree, only one child of a node is allowed to split.
#MAX_KD_TREE_BRANCH = 1
MAX_KD_TREE_BRANCH = 2
| mit | -32,407,984,009,349,660 | -615,717,748,220,351,100 | 31.09375 | 76 | 0.685492 | false |
MattFaus/CrowdTube-Connector | lib/gdata-2.0.18/tests/gdata_tests/apps/emailsettings/live_client_test.py | 23 | 12853 | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'Claudio Cherubino <[email protected]>'
import unittest
import gdata.apps.emailsettings.client
import gdata.apps.emailsettings.data
import gdata.client
import gdata.data
import gdata.gauth
import gdata.test_config as conf
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class EmailSettingsClientTest(unittest.TestCase):
def setUp(self):
self.client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.apps.emailsettings.client.EmailSettingsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'EmailSettingsClientTest',
self.client.auth_service, True)
self.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def testClientConfiguration(self):
self.assertEqual('apps-apis.google.com', self.client.host)
self.assertEqual('2.0', self.client.api_version)
self.assertEqual('apps', self.client.auth_service)
if conf.options.get_value('runlive') == 'true':
self.assertEqual(self.client.domain, conf.options.get_value('appsdomain'))
else:
self.assertEqual(self.client.domain, 'example.com')
def testMakeEmailSettingsUri(self):
self.assertEqual('/a/feeds/emailsettings/2.0/%s/%s/%s' % (self.client.domain,
'abc', 'label'),
self.client.MakeEmailSettingsUri('abc', 'label'))
def testCreateDeleteLabel(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateLabel')
new_label = self.client.CreateLabel(
username=conf.options.get_value('targetusername'),
name='status updates')
self.assert_(isinstance(new_label,
gdata.apps.emailsettings.data.EmailSettingsLabel))
self.assertEqual(new_label.name, 'status updates')
self.client.DeleteLabel(
username=conf.options.get_value('targetusername'),
label='status updates')
def testCreateFilter(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateFilter')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
from_address='[email protected]',
has_the_word='project proposal', mark_as_read=True)
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.from_address, '[email protected]')
self.assertEqual(new_filter.has_the_word, 'project proposal')
self.assertEqual(new_filter.mark_as_read, 'True')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
to_address='[email protected]',
label="announcements")
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.to_address, '[email protected]')
self.assertEqual(new_filter.label, 'announcements')
new_filter = self.client.CreateFilter(
username=conf.options.get_value('targetusername'),
subject='urgent',
does_not_have_the_word='spam',
has_attachments=True,
archive=True)
self.assert_(isinstance(new_filter,
gdata.apps.emailsettings.data.EmailSettingsFilter))
self.assertEqual(new_filter.subject, 'urgent')
self.assertEqual(new_filter.does_not_have_the_word, 'spam')
self.assertEqual(new_filter.has_attachments, 'True')
self.assertEqual(new_filter.archive, 'True')
def testCreateSendAs(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testCreateSendAs')
new_sendas = self.client.CreateSendAs(
username=conf.options.get_value('targetusername'),
name='Sales', address=conf.options.get_value('appsusername'),
reply_to='[email protected]',
make_default=True)
self.assert_(isinstance(new_sendas,
gdata.apps.emailsettings.data.EmailSettingsSendAsAlias))
self.assertEqual(new_sendas.name, 'Sales')
self.assertEqual(new_sendas.address,
conf.options.get_value('appsusername'))
self.assertEqual(new_sendas.reply_to, '[email protected]')
self.assertEqual(new_sendas.make_default, 'True')
def testUpdateWebclip(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateWebclip')
new_webclip = self.client.UpdateWebclip(
username=conf.options.get_value('targetusername'),
enable=True)
self.assert_(isinstance(new_webclip,
gdata.apps.emailsettings.data.EmailSettingsWebClip))
self.assertEqual(new_webclip.enable, 'True')
new_webclip = self.client.UpdateWebclip(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_webclip,
gdata.apps.emailsettings.data.EmailSettingsWebClip))
self.assertEqual(new_webclip.enable, 'False')
def testUpdateForwarding(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateForwarding')
new_forwarding = self.client.UpdateForwarding(
username=conf.options.get_value('targetusername'),
enable=True,
forward_to=conf.options.get_value('appsusername'),
action='KEEP')
self.assert_(isinstance(new_forwarding,
gdata.apps.emailsettings.data.EmailSettingsForwarding))
self.assertEqual(new_forwarding.enable, 'True')
self.assertEqual(new_forwarding.forward_to,
conf.options.get_value('appsusername'))
self.assertEqual(new_forwarding.action, 'KEEP')
new_forwarding = self.client.UpdateForwarding(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_forwarding,
gdata.apps.emailsettings.data.EmailSettingsForwarding))
self.assertEqual(new_forwarding.enable, 'False')
def testUpdatePop(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdatePop')
new_pop = self.client.UpdatePop(
username=conf.options.get_value('targetusername'),
enable=True, enable_for='MAIL_FROM_NOW_ON', action='KEEP')
self.assert_(isinstance(new_pop,
gdata.apps.emailsettings.data.EmailSettingsPop))
self.assertEqual(new_pop.enable, 'True')
self.assertEqual(new_pop.enable_for, 'MAIL_FROM_NOW_ON')
self.assertEqual(new_pop.action, 'KEEP')
new_pop = self.client.UpdatePop(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_pop,
gdata.apps.emailsettings.data.EmailSettingsPop))
self.assertEqual(new_pop.enable, 'False')
def testUpdateImap(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateImap')
new_imap = self.client.UpdateImap(
username=conf.options.get_value('targetusername'),
enable=True)
self.assert_(isinstance(new_imap,
gdata.apps.emailsettings.data.EmailSettingsImap))
self.assertEqual(new_imap.enable, 'True')
new_imap = self.client.UpdateImap(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_imap,
gdata.apps.emailsettings.data.EmailSettingsImap))
self.assertEqual(new_imap.enable, 'False')
def testUpdateVacation(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateVacation')
new_vacation = self.client.UpdateVacation(
username=conf.options.get_value('targetusername'),
enable=True, subject='Out of office',
message='If urgent call me at 555-5555.',
start_date='2011-12-05', end_date='2011-12-06',
contacts_only=True, domain_only=False)
self.assert_(isinstance(new_vacation,
gdata.apps.emailsettings.data.EmailSettingsVacationResponder))
self.assertEqual(new_vacation.enable, 'True')
self.assertEqual(new_vacation.subject, 'Out of office')
self.assertEqual(new_vacation.message, 'If urgent call me at 555-5555.')
self.assertEqual(new_vacation.start_date, '2011-12-05')
self.assertEqual(new_vacation.end_date, '2011-12-06')
self.assertEqual(new_vacation.contacts_only, 'True')
self.assertEqual(new_vacation.domain_only, 'False')
new_vacation = self.client.UpdateVacation(
username=conf.options.get_value('targetusername'),
enable=False)
self.assert_(isinstance(new_vacation,
gdata.apps.emailsettings.data.EmailSettingsVacationResponder))
self.assertEqual(new_vacation.enable, 'False')
def testUpdateSignature(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateSignature')
new_signature = self.client.UpdateSignature(
username=conf.options.get_value('targetusername'),
signature='Regards, Joe')
self.assert_(isinstance(new_signature,
gdata.apps.emailsettings.data.EmailSettingsSignature))
self.assertEqual(new_signature.signature_value, 'Regards, Joe')
new_signature = self.client.UpdateSignature(
username=conf.options.get_value('targetusername'),
signature='')
self.assert_(isinstance(new_signature,
gdata.apps.emailsettings.data.EmailSettingsSignature))
self.assertEqual(new_signature.signature_value, '')
def testUpdateLanguage(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateLanguage')
new_language = self.client.UpdateLanguage(
username=conf.options.get_value('targetusername'),
language='es')
self.assert_(isinstance(new_language,
gdata.apps.emailsettings.data.EmailSettingsLanguage))
self.assertEqual(new_language.language_tag, 'es')
def testUpdateGeneral(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'testUpdateGeneral')
new_general = self.client.UpdateGeneralSettings(
username=conf.options.get_value('targetusername'),
page_size=25, arrows=True)
self.assert_(isinstance(new_general,
gdata.apps.emailsettings.data.EmailSettingsGeneral))
self.assertEqual(new_general.page_size, '25')
self.assertEqual(new_general.arrows, 'True')
new_general = self.client.UpdateGeneralSettings(
username=conf.options.get_value('targetusername'),
shortcuts=False, snippets=True, use_unicode=False)
self.assert_(isinstance(new_general,
gdata.apps.emailsettings.data.EmailSettingsGeneral))
self.assertEqual(new_general.shortcuts, 'False')
self.assertEqual(new_general.snippets, 'True')
self.assertEqual(new_general.use_unicode, 'False')
def suite():
return conf.build_suite([EmailSettingsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| mit | -6,984,701,997,895,955,000 | 1,789,988,648,427,659,500 | 35.618234 | 81 | 0.704271 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.