text
stringlengths 4
1.02M
| meta
dict |
---|---|
'''
InfUserExtensionsObject
'''
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import GlobalData
from Library.Misc import Sdict
class InfUserExtensionItem():
def __init__(self,
Content = '',
UserId = '',
IdString = ''):
self.Content = Content
self.UserId = UserId
self.IdString = IdString
self.SupArchList = []
def SetContent(self, Content):
self.Content = Content
def GetContent(self):
return self.Content
def SetUserId(self, UserId):
self.UserId = UserId
def GetUserId(self):
return self.UserId
def SetIdString(self, IdString):
self.IdString = IdString
def GetIdString(self):
return self.IdString
def SetSupArchList(self, SupArchList):
self.SupArchList = SupArchList
def GetSupArchList(self):
return self.SupArchList
##
#
#
#
class InfUserExtensionObject():
def __init__(self):
self.UserExtension = Sdict()
def SetUserExtension(self, UserExtensionCont, IdContent=None, LineNo=None):
if not UserExtensionCont or UserExtensionCont == '':
return True
#
# IdContent is a list contain UserId and IdString
# For this call the general section header parser, if no definition of
# IdString/UserId, it will return 'COMMON'
#
for IdContentItem in IdContent:
InfUserExtensionItemObj = InfUserExtensionItem()
if IdContentItem[0] == 'COMMON':
UserId = ''
else:
UserId = IdContentItem[0]
if IdContentItem[1] == 'COMMON':
IdString = ''
else:
IdString = IdContentItem[1]
#
# Fill UserExtensionObj members.
#
InfUserExtensionItemObj.SetUserId(UserId)
InfUserExtensionItemObj.SetIdString(IdString)
InfUserExtensionItemObj.SetContent(UserExtensionCont)
InfUserExtensionItemObj.SetSupArchList(IdContentItem[2])
for CheckItem in self.UserExtension:
if IdContentItem[0] == CheckItem[0] and IdContentItem[1] == CheckItem[1]:
if IdContentItem[2].upper() == 'COMMON' or CheckItem[2].upper() == 'COMMON':
#
# For COMMON ARCH type, do special check.
#
Logger.Error('InfParser',
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_UE_SECTION_DUPLICATE_ERROR%\
(IdContentItem[0] + '.' + IdContentItem[1] + '.' + IdContentItem[2]),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=None)
if self.UserExtension.has_key(IdContentItem):
#
# Each UserExtensions section header must have a unique set
# of UserId, IdString and Arch values.
# This means that the same UserId can be used in more than one
# section header, provided the IdString or Arch values are
# different. The same IdString values can be used in more than
# one section header if the UserId or Arch values are
# different. The same UserId and the same IdString can be used
# in a section header if the Arch values are different in each
# of the section headers.
#
Logger.Error('InfParser',
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_UE_SECTION_DUPLICATE_ERROR%\
(IdContentItem[0] + '.' + IdContentItem[1] + '.' + IdContentItem[2]),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=None)
else:
UserExtensionList = []
UserExtensionList.append(InfUserExtensionItemObj)
self.UserExtension[IdContentItem] = UserExtensionList
return True
def GetUserExtension(self):
return self.UserExtension | {
"content_hash": "e5f80ed3000e8955ed208391cf2c2c94",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 98,
"avg_line_length": 37.88235294117647,
"alnum_prop": 0.5312777284826974,
"repo_name": "egraba/vbox_openbsd",
"id": "ac7e7db5cff0aa0153f0003dc1ee87d8b614686b",
"size": "5124",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Object/Parser/InfUserExtensionObject.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "88714"
},
{
"name": "Assembly",
"bytes": "4303680"
},
{
"name": "AutoIt",
"bytes": "2187"
},
{
"name": "Batchfile",
"bytes": "95534"
},
{
"name": "C",
"bytes": "192632221"
},
{
"name": "C#",
"bytes": "64255"
},
{
"name": "C++",
"bytes": "83842667"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "6041"
},
{
"name": "CSS",
"bytes": "26756"
},
{
"name": "D",
"bytes": "41844"
},
{
"name": "DIGITAL Command Language",
"bytes": "56579"
},
{
"name": "DTrace",
"bytes": "1466646"
},
{
"name": "GAP",
"bytes": "350327"
},
{
"name": "Groff",
"bytes": "298540"
},
{
"name": "HTML",
"bytes": "467691"
},
{
"name": "IDL",
"bytes": "106734"
},
{
"name": "Java",
"bytes": "261605"
},
{
"name": "JavaScript",
"bytes": "80927"
},
{
"name": "Lex",
"bytes": "25122"
},
{
"name": "Logos",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "426902"
},
{
"name": "Module Management System",
"bytes": "2707"
},
{
"name": "NSIS",
"bytes": "177212"
},
{
"name": "Objective-C",
"bytes": "5619792"
},
{
"name": "Objective-C++",
"bytes": "81554"
},
{
"name": "PHP",
"bytes": "58585"
},
{
"name": "Pascal",
"bytes": "69941"
},
{
"name": "Perl",
"bytes": "240063"
},
{
"name": "PowerShell",
"bytes": "10664"
},
{
"name": "Python",
"bytes": "9094160"
},
{
"name": "QMake",
"bytes": "3055"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1460572"
},
{
"name": "SourcePawn",
"bytes": "4139"
},
{
"name": "TypeScript",
"bytes": "142342"
},
{
"name": "Visual Basic",
"bytes": "7161"
},
{
"name": "XSLT",
"bytes": "1034475"
},
{
"name": "Yacc",
"bytes": "22312"
}
],
"symlink_target": ""
} |
from prometheus_client import Counter, Histogram
from django_prometheus.utils import Time, TimeSince, PowersOf
requests_total = Counter(
'django_http_requests_before_middlewares_total',
'Total count of requests before middlewares run.')
responses_total = Counter(
'django_http_responses_before_middlewares_total',
'Total count of responses before middlewares run.')
requests_latency_before = Histogram(
'django_http_requests_latency_including_middlewares_seconds',
('Histogram of requests processing time (including middleware '
'processing time).'))
requests_unknown_latency_before = Counter(
'django_http_requests_unknown_latency_including_middlewares_total',
('Count of requests for which the latency was unknown (when computing '
'django_http_requests_latency_including_middlewares_seconds).'))
class PrometheusBeforeMiddleware(object):
"""Monitoring middleware that should run before other middlewares."""
def process_request(self, request):
requests_total.inc()
request.prometheus_before_middleware_event = Time()
def process_response(self, request, response):
responses_total.inc()
if hasattr(request, 'prometheus_before_middleware_event'):
requests_latency_before.observe(TimeSince(
request.prometheus_before_middleware_event))
else:
requests_unknown_latency_before.inc()
return response
requests_latency = Histogram(
'django_http_requests_latency_seconds',
'Histogram of requests processing time.')
requests_unknown_latency = Counter(
'django_http_requests_unknown_latency_total',
'Count of requests for which the latency was unknown.')
# Set in process_request
ajax_requests = Counter(
'django_http_ajax_requests_total',
'Count of AJAX requests.')
requests_by_method = Counter(
'django_http_requests_total_by_method',
'Count of requests by method.',
['method'])
requests_by_transport = Counter(
'django_http_requests_total_by_transport',
'Count of requests by transport.',
['transport'])
# Set in process_view
requests_by_view_transport_method = Counter(
'django_http_requests_total_by_view_transport_method',
'Count of requests by view, transport, method.',
['view', 'transport', 'method'])
requests_body_bytes = Histogram(
'django_http_requests_body_total_bytes',
'Histogram of requests by body size.',
buckets=PowersOf(2, 30))
# Set in process_template_response
responses_by_templatename = Counter(
'django_http_responses_total_by_templatename',
'Count of responses by template name.',
['templatename'])
# Set in process_response
responses_by_status = Counter(
'django_http_responses_total_by_status',
'Count of responses by status.',
['status'])
responses_body_bytes = Histogram(
'django_http_responses_body_total_bytes',
'Histogram of responses by body size.',
buckets=PowersOf(2, 30))
responses_by_charset = Counter(
'django_http_responses_total_by_charset',
'Count of responses by charset.',
['charset'])
responses_streaming = Counter(
'django_http_responses_streaming_total',
'Count of streaming responses.')
# Set in process_exception
exceptions_by_type = Counter(
'django_http_exceptions_total_by_type',
'Count of exceptions by object type.',
['type'])
exceptions_by_view = Counter(
'django_http_exceptions_total_by_view',
'Count of exceptions by view.',
['view_name'])
class PrometheusAfterMiddleware(object):
"""Monitoring middleware that should run after other middlewares."""
def _transport(self, request):
return 'https' if request.is_secure() else 'http'
def _method(self, request):
m = request.method
if m not in ('GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE',
'OPTIONS', 'CONNECT', 'PATCH'):
return '<invalid method>'
return m
def process_request(self, request):
transport = self._transport(request)
method = self._method(request)
requests_by_method.labels(method).inc()
requests_by_transport.labels(transport).inc()
if request.is_ajax():
ajax_requests.inc()
requests_body_bytes.observe(len(request.body))
request.prometheus_after_middleware_event = Time()
def process_view(self, request, view_func, *view_args, **view_kwargs):
transport = self._transport(request)
method = self._method(request)
if hasattr(request, 'resolver_match'):
name = request.resolver_match.view_name or '<unnamed view>'
requests_by_view_transport_method.labels(
name, transport, method).inc()
def process_template_response(self, request, response):
responses_by_templatename.labels(str(
response.template_name)).inc()
return response
def process_response(self, request, response):
responses_by_status.labels(str(response.status_code)).inc()
if hasattr(response, 'charset'):
responses_by_charset.labels(str(response.charset)).inc()
if hasattr(response, 'streaming') and response.streaming:
responses_streaming.inc()
if hasattr(response, 'content'):
responses_body_bytes.observe(len(response.content))
if hasattr(request, 'prometheus_after_middleware_event'):
requests_latency.observe(TimeSince(
request.prometheus_after_middleware_event))
else:
requests_unknown_latency.inc()
return response
def process_exception(self, request, exception):
exceptions_by_type.labels(type(exception).__name__).inc()
if hasattr(request, 'resolver_match'):
name = request.resolver_match.view_name or '<unnamed view>'
exceptions_by_view.labels(name).inc()
if hasattr(request, 'prometheus_after_middleware_event'):
requests_latency.observe(TimeSince(
request.prometheus_after_middleware_event))
else:
requests_unknown_latency.inc()
| {
"content_hash": "06115c6e659bb1055d6e3c15b5a05769",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 75,
"avg_line_length": 39.66233766233766,
"alnum_prop": 0.6743614931237721,
"repo_name": "DingaGa/django-prometheus",
"id": "6196e4727997ca0b13eab6809bef2e6895459839",
"size": "6108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_prometheus/middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5116"
},
{
"name": "Python",
"bytes": "40140"
}
],
"symlink_target": ""
} |
import math
class Sensor_Velocidad:
def __init__(self, radio_, sensor_, nro_secciones):
self.radio = radio_
self.sensor = sensor_
self.nro_seccion = nro_secciones
def get_desplazamiento(self):
#obtener el perimetro de la rueda, el radio debe estar en metros
perimetro = 2*math.pi*self.radio
distandia_seccion_recorrido = perimetro/self.nro_seccion
return distandia_seccion_recorrido
def get_angulo_seccion(self):
return (2*math.pi)/self.nro_seccion
def get_distancia(self, contador):
return contador*self.get_desplazamiento()
| {
"content_hash": "50540f937c6bacfb44b0a5195ef246d7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 30.95,
"alnum_prop": 0.6639741518578353,
"repo_name": "GeorgEncinas/backtraking",
"id": "92bc78382f5ced30631efa4c188b56d5d3cbb0a7",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensor/sensor_velocidad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19456"
}
],
"symlink_target": ""
} |
import os
import platform
import subprocess
import lit.formats
import lit.util
# name: The name of this test suite.
config.name = 'Clang-Unit'
# suffixes: A list of file extensions to treat as test files.
config.suffixes = []
# test_source_root: The root path where tests are located.
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.clang_obj_root, 'unittests')
config.test_source_root = config.test_exec_root
# testFormat: The test format to use to interpret tests.
config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, 'Tests')
# Propagate the temp directory. Windows requires this because it uses \Windows\
# if none of these are present.
if 'TMP' in os.environ:
config.environment['TMP'] = os.environ['TMP']
if 'TEMP' in os.environ:
config.environment['TEMP'] = os.environ['TEMP']
# Propagate path to symbolizer for ASan/MSan.
for symbolizer in ['ASAN_SYMBOLIZER_PATH', 'MSAN_SYMBOLIZER_PATH']:
if symbolizer in os.environ:
config.environment[symbolizer] = os.environ[symbolizer]
def find_shlibpath_var():
if platform.system() in ['Linux', 'FreeBSD', 'NetBSD', 'SunOS']:
yield 'LD_LIBRARY_PATH'
elif platform.system() == 'Darwin':
yield 'DYLD_LIBRARY_PATH'
elif platform.system() == 'Windows':
yield 'PATH'
for shlibpath_var in find_shlibpath_var():
# in stand-alone builds, shlibdir is clang's build tree
# while llvm_libs_dir is installed LLVM (and possibly older clang)
shlibpath = os.path.pathsep.join(
(config.shlibdir,
config.llvm_libs_dir,
config.environment.get(shlibpath_var, '')))
config.environment[shlibpath_var] = shlibpath
break
else:
lit_config.warning("unable to inject shared library path on '{}'"
.format(platform.system()))
| {
"content_hash": "77303b9cabf0b17ae4b99a613bb89fb7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 34.9811320754717,
"alnum_prop": 0.6957928802588996,
"repo_name": "youtube/cobalt_sandbox",
"id": "342b6928eceaf473a41488d434298bfe2d274465",
"size": "1921",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/clang/test/Unit/lit.cfg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
.. _tut_stats_cluster_source_rANOVA:
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Denis Engemannn <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id, copy=False)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.array([], int)] # right hemi is empty
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language). Without this also
# the main effects will be returned.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# Note. for further details on this ANOVA function consider the
# corresponding
# :ref:`time frequency tutorial <tut_stats_cluster_sensor_rANOVA_tfr>`.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
# get f-values only.
###############################################################################
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal).
source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, colormap='mne',
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.show_view('lateral')
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
| {
"content_hash": "a460aa96970afa00aff4b8713b6cf63a",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 42.78091872791519,
"alnum_prop": 0.6292227636904271,
"repo_name": "ARudiuk/mne-python",
"id": "ef899f7c3266695692a87067dbd676d9d1ff37c7",
"size": "12107",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tutorials/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5086775"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "CosPersistencePID.idl"
import CosPersistencePID_idl
_0_CosPersistencePID = omniORB.openModule("CosPersistencePID")
_0_CosPersistencePID__POA = omniORB.openModule("CosPersistencePID__POA")
# #include "CosPersistencePDS.idl"
import CosPersistencePDS_idl
_0_CosPersistencePDS = omniORB.openModule("CosPersistencePDS")
_0_CosPersistencePDS__POA = omniORB.openModule("CosPersistencePDS__POA")
#
# Start of module "CosPersistencePDS_DA"
#
__name__ = "CosPersistencePDS_DA"
_0_CosPersistencePDS_DA = omniORB.openModule("CosPersistencePDS_DA", r"/usr/local/share/idl/omniORB/COS/CosPersistencePDS_DA.idl")
_0_CosPersistencePDS_DA__POA = omniORB.openModule("CosPersistencePDS_DA__POA", r"/usr/local/share/idl/omniORB/COS/CosPersistencePDS_DA.idl")
# typedef ... DAObjectID
class DAObjectID:
_NP_RepositoryId = "IDL:omg.org/CosPersistencePDS_DA/DAObjectID:1.0"
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_0_CosPersistencePDS_DA.DAObjectID = DAObjectID
_0_CosPersistencePDS_DA._d_DAObjectID = (omniORB.tcInternal.tv_string,0)
_0_CosPersistencePDS_DA._ad_DAObjectID = (omniORB.tcInternal.tv_alias, DAObjectID._NP_RepositoryId, "DAObjectID", (omniORB.tcInternal.tv_string,0))
_0_CosPersistencePDS_DA._tc_DAObjectID = omniORB.tcInternal.createTypeCode(_0_CosPersistencePDS_DA._ad_DAObjectID)
omniORB.registerType(DAObjectID._NP_RepositoryId, _0_CosPersistencePDS_DA._ad_DAObjectID, _0_CosPersistencePDS_DA._tc_DAObjectID)
del DAObjectID
# interface PID_DA
_0_CosPersistencePDS_DA._d_PID_DA = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosPersistencePDS_DA/PID_DA:1.0", "PID_DA")
omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/PID_DA:1.0"] = _0_CosPersistencePDS_DA._d_PID_DA
_0_CosPersistencePDS_DA.PID_DA = omniORB.newEmptyClass()
class PID_DA (_0_CosPersistencePID.PID):
_NP_RepositoryId = _0_CosPersistencePDS_DA._d_PID_DA[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosPersistencePDS_DA.PID_DA = PID_DA
_0_CosPersistencePDS_DA._tc_PID_DA = omniORB.tcInternal.createTypeCode(_0_CosPersistencePDS_DA._d_PID_DA)
omniORB.registerType(PID_DA._NP_RepositoryId, _0_CosPersistencePDS_DA._d_PID_DA, _0_CosPersistencePDS_DA._tc_PID_DA)
# PID_DA operations and attributes
PID_DA._d__get_oid = ((),(omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectID:1.0"],),None)
PID_DA._d__set_oid = ((omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectID:1.0"],),(),None)
# PID_DA object reference
class _objref_PID_DA (_0_CosPersistencePID._objref_PID):
_NP_RepositoryId = PID_DA._NP_RepositoryId
def __init__(self, obj):
_0_CosPersistencePID._objref_PID.__init__(self, obj)
def _get_oid(self, *args):
return self._obj.invoke("_get_oid", _0_CosPersistencePDS_DA.PID_DA._d__get_oid, args)
def _set_oid(self, *args):
return self._obj.invoke("_set_oid", _0_CosPersistencePDS_DA.PID_DA._d__set_oid, args)
oid = property(_get_oid, _set_oid)
omniORB.registerObjref(PID_DA._NP_RepositoryId, _objref_PID_DA)
_0_CosPersistencePDS_DA._objref_PID_DA = _objref_PID_DA
del PID_DA, _objref_PID_DA
# PID_DA skeleton
__name__ = "CosPersistencePDS_DA__POA"
class PID_DA (_0_CosPersistencePID__POA.PID):
_NP_RepositoryId = _0_CosPersistencePDS_DA.PID_DA._NP_RepositoryId
_omni_op_d = {"_get_oid": _0_CosPersistencePDS_DA.PID_DA._d__get_oid, "_set_oid": _0_CosPersistencePDS_DA.PID_DA._d__set_oid}
_omni_op_d.update(_0_CosPersistencePID__POA.PID._omni_op_d)
PID_DA._omni_skeleton = PID_DA
_0_CosPersistencePDS_DA__POA.PID_DA = PID_DA
omniORB.registerSkeleton(PID_DA._NP_RepositoryId, PID_DA)
del PID_DA
__name__ = "CosPersistencePDS_DA"
# interface DAObject
_0_CosPersistencePDS_DA._d_DAObject = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0", "DAObject")
omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"] = _0_CosPersistencePDS_DA._d_DAObject
_0_CosPersistencePDS_DA.DAObject = omniORB.newEmptyClass()
class DAObject :
_NP_RepositoryId = _0_CosPersistencePDS_DA._d_DAObject[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosPersistencePDS_DA.DAObject = DAObject
_0_CosPersistencePDS_DA._tc_DAObject = omniORB.tcInternal.createTypeCode(_0_CosPersistencePDS_DA._d_DAObject)
omniORB.registerType(DAObject._NP_RepositoryId, _0_CosPersistencePDS_DA._d_DAObject, _0_CosPersistencePDS_DA._tc_DAObject)
# DAObject operations and attributes
DAObject._d_dado_same = ((omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"], ), (omniORB.tcInternal.tv_boolean, ), None)
DAObject._d_dado_oid = ((), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectID:1.0"], ), None)
DAObject._d_dado_pid = ((), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/PID_DA:1.0"], ), None)
DAObject._d_dado_remove = ((), (), None)
DAObject._d_dado_free = ((), (), None)
# DAObject object reference
class _objref_DAObject (CORBA.Object):
_NP_RepositoryId = DAObject._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def dado_same(self, *args):
return self._obj.invoke("dado_same", _0_CosPersistencePDS_DA.DAObject._d_dado_same, args)
def dado_oid(self, *args):
return self._obj.invoke("dado_oid", _0_CosPersistencePDS_DA.DAObject._d_dado_oid, args)
def dado_pid(self, *args):
return self._obj.invoke("dado_pid", _0_CosPersistencePDS_DA.DAObject._d_dado_pid, args)
def dado_remove(self, *args):
return self._obj.invoke("dado_remove", _0_CosPersistencePDS_DA.DAObject._d_dado_remove, args)
def dado_free(self, *args):
return self._obj.invoke("dado_free", _0_CosPersistencePDS_DA.DAObject._d_dado_free, args)
omniORB.registerObjref(DAObject._NP_RepositoryId, _objref_DAObject)
_0_CosPersistencePDS_DA._objref_DAObject = _objref_DAObject
del DAObject, _objref_DAObject
# DAObject skeleton
__name__ = "CosPersistencePDS_DA__POA"
class DAObject (PortableServer.Servant):
_NP_RepositoryId = _0_CosPersistencePDS_DA.DAObject._NP_RepositoryId
_omni_op_d = {"dado_same": _0_CosPersistencePDS_DA.DAObject._d_dado_same, "dado_oid": _0_CosPersistencePDS_DA.DAObject._d_dado_oid, "dado_pid": _0_CosPersistencePDS_DA.DAObject._d_dado_pid, "dado_remove": _0_CosPersistencePDS_DA.DAObject._d_dado_remove, "dado_free": _0_CosPersistencePDS_DA.DAObject._d_dado_free}
DAObject._omni_skeleton = DAObject
_0_CosPersistencePDS_DA__POA.DAObject = DAObject
omniORB.registerSkeleton(DAObject._NP_RepositoryId, DAObject)
del DAObject
__name__ = "CosPersistencePDS_DA"
# interface DAObjectFactory
_0_CosPersistencePDS_DA._d_DAObjectFactory = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosPersistencePDS_DA/DAObjectFactory:1.0", "DAObjectFactory")
omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectFactory:1.0"] = _0_CosPersistencePDS_DA._d_DAObjectFactory
_0_CosPersistencePDS_DA.DAObjectFactory = omniORB.newEmptyClass()
class DAObjectFactory :
_NP_RepositoryId = _0_CosPersistencePDS_DA._d_DAObjectFactory[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosPersistencePDS_DA.DAObjectFactory = DAObjectFactory
_0_CosPersistencePDS_DA._tc_DAObjectFactory = omniORB.tcInternal.createTypeCode(_0_CosPersistencePDS_DA._d_DAObjectFactory)
omniORB.registerType(DAObjectFactory._NP_RepositoryId, _0_CosPersistencePDS_DA._d_DAObjectFactory, _0_CosPersistencePDS_DA._tc_DAObjectFactory)
# DAObjectFactory operations and attributes
DAObjectFactory._d_create = ((), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"], ), None)
# DAObjectFactory object reference
class _objref_DAObjectFactory (CORBA.Object):
_NP_RepositoryId = DAObjectFactory._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def create(self, *args):
return self._obj.invoke("create", _0_CosPersistencePDS_DA.DAObjectFactory._d_create, args)
omniORB.registerObjref(DAObjectFactory._NP_RepositoryId, _objref_DAObjectFactory)
_0_CosPersistencePDS_DA._objref_DAObjectFactory = _objref_DAObjectFactory
del DAObjectFactory, _objref_DAObjectFactory
# DAObjectFactory skeleton
__name__ = "CosPersistencePDS_DA__POA"
class DAObjectFactory (PortableServer.Servant):
_NP_RepositoryId = _0_CosPersistencePDS_DA.DAObjectFactory._NP_RepositoryId
_omni_op_d = {"create": _0_CosPersistencePDS_DA.DAObjectFactory._d_create}
DAObjectFactory._omni_skeleton = DAObjectFactory
_0_CosPersistencePDS_DA__POA.DAObjectFactory = DAObjectFactory
omniORB.registerSkeleton(DAObjectFactory._NP_RepositoryId, DAObjectFactory)
del DAObjectFactory
__name__ = "CosPersistencePDS_DA"
# interface DAObjectFactoryFinder
_0_CosPersistencePDS_DA._d_DAObjectFactoryFinder = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosPersistencePDS_DA/DAObjectFactoryFinder:1.0", "DAObjectFactoryFinder")
omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectFactoryFinder:1.0"] = _0_CosPersistencePDS_DA._d_DAObjectFactoryFinder
_0_CosPersistencePDS_DA.DAObjectFactoryFinder = omniORB.newEmptyClass()
class DAObjectFactoryFinder :
_NP_RepositoryId = _0_CosPersistencePDS_DA._d_DAObjectFactoryFinder[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosPersistencePDS_DA.DAObjectFactoryFinder = DAObjectFactoryFinder
_0_CosPersistencePDS_DA._tc_DAObjectFactoryFinder = omniORB.tcInternal.createTypeCode(_0_CosPersistencePDS_DA._d_DAObjectFactoryFinder)
omniORB.registerType(DAObjectFactoryFinder._NP_RepositoryId, _0_CosPersistencePDS_DA._d_DAObjectFactoryFinder, _0_CosPersistencePDS_DA._tc_DAObjectFactoryFinder)
# DAObjectFactoryFinder operations and attributes
DAObjectFactoryFinder._d_find_factory = (((omniORB.tcInternal.tv_string,0), ), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectFactory:1.0"], ), None)
# DAObjectFactoryFinder object reference
class _objref_DAObjectFactoryFinder (CORBA.Object):
_NP_RepositoryId = DAObjectFactoryFinder._NP_RepositoryId
def __init__(self, obj):
CORBA.Object.__init__(self, obj)
def find_factory(self, *args):
return self._obj.invoke("find_factory", _0_CosPersistencePDS_DA.DAObjectFactoryFinder._d_find_factory, args)
omniORB.registerObjref(DAObjectFactoryFinder._NP_RepositoryId, _objref_DAObjectFactoryFinder)
_0_CosPersistencePDS_DA._objref_DAObjectFactoryFinder = _objref_DAObjectFactoryFinder
del DAObjectFactoryFinder, _objref_DAObjectFactoryFinder
# DAObjectFactoryFinder skeleton
__name__ = "CosPersistencePDS_DA__POA"
class DAObjectFactoryFinder (PortableServer.Servant):
_NP_RepositoryId = _0_CosPersistencePDS_DA.DAObjectFactoryFinder._NP_RepositoryId
_omni_op_d = {"find_factory": _0_CosPersistencePDS_DA.DAObjectFactoryFinder._d_find_factory}
DAObjectFactoryFinder._omni_skeleton = DAObjectFactoryFinder
_0_CosPersistencePDS_DA__POA.DAObjectFactoryFinder = DAObjectFactoryFinder
omniORB.registerSkeleton(DAObjectFactoryFinder._NP_RepositoryId, DAObjectFactoryFinder)
del DAObjectFactoryFinder
__name__ = "CosPersistencePDS_DA"
# interface PDS_DA
_0_CosPersistencePDS_DA._d_PDS_DA = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosPersistencePDS_DA/PDS_DA:1.0", "PDS_DA")
omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/PDS_DA:1.0"] = _0_CosPersistencePDS_DA._d_PDS_DA
_0_CosPersistencePDS_DA.PDS_DA = omniORB.newEmptyClass()
class PDS_DA (_0_CosPersistencePDS.PDS):
_NP_RepositoryId = _0_CosPersistencePDS_DA._d_PDS_DA[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosPersistencePDS_DA.PDS_DA = PDS_DA
_0_CosPersistencePDS_DA._tc_PDS_DA = omniORB.tcInternal.createTypeCode(_0_CosPersistencePDS_DA._d_PDS_DA)
omniORB.registerType(PDS_DA._NP_RepositoryId, _0_CosPersistencePDS_DA._d_PDS_DA, _0_CosPersistencePDS_DA._tc_PDS_DA)
# PDS_DA operations and attributes
PDS_DA._d_get_data = ((), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"], ), None)
PDS_DA._d_set_data = ((omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"], ), (), None)
PDS_DA._d_lookup = ((omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectID:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"], ), None)
PDS_DA._d_get_pid = ((), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/PID_DA:1.0"], ), None)
PDS_DA._d_get_object_pid = ((omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObject:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/PID_DA:1.0"], ), None)
PDS_DA._d_data_factories = ((), (omniORB.typeMapping["IDL:omg.org/CosPersistencePDS_DA/DAObjectFactoryFinder:1.0"], ), None)
# PDS_DA object reference
class _objref_PDS_DA (_0_CosPersistencePDS._objref_PDS):
_NP_RepositoryId = PDS_DA._NP_RepositoryId
def __init__(self, obj):
_0_CosPersistencePDS._objref_PDS.__init__(self, obj)
def get_data(self, *args):
return self._obj.invoke("get_data", _0_CosPersistencePDS_DA.PDS_DA._d_get_data, args)
def set_data(self, *args):
return self._obj.invoke("set_data", _0_CosPersistencePDS_DA.PDS_DA._d_set_data, args)
def lookup(self, *args):
return self._obj.invoke("lookup", _0_CosPersistencePDS_DA.PDS_DA._d_lookup, args)
def get_pid(self, *args):
return self._obj.invoke("get_pid", _0_CosPersistencePDS_DA.PDS_DA._d_get_pid, args)
def get_object_pid(self, *args):
return self._obj.invoke("get_object_pid", _0_CosPersistencePDS_DA.PDS_DA._d_get_object_pid, args)
def data_factories(self, *args):
return self._obj.invoke("data_factories", _0_CosPersistencePDS_DA.PDS_DA._d_data_factories, args)
omniORB.registerObjref(PDS_DA._NP_RepositoryId, _objref_PDS_DA)
_0_CosPersistencePDS_DA._objref_PDS_DA = _objref_PDS_DA
del PDS_DA, _objref_PDS_DA
# PDS_DA skeleton
__name__ = "CosPersistencePDS_DA__POA"
class PDS_DA (_0_CosPersistencePDS__POA.PDS):
_NP_RepositoryId = _0_CosPersistencePDS_DA.PDS_DA._NP_RepositoryId
_omni_op_d = {"get_data": _0_CosPersistencePDS_DA.PDS_DA._d_get_data, "set_data": _0_CosPersistencePDS_DA.PDS_DA._d_set_data, "lookup": _0_CosPersistencePDS_DA.PDS_DA._d_lookup, "get_pid": _0_CosPersistencePDS_DA.PDS_DA._d_get_pid, "get_object_pid": _0_CosPersistencePDS_DA.PDS_DA._d_get_object_pid, "data_factories": _0_CosPersistencePDS_DA.PDS_DA._d_data_factories}
_omni_op_d.update(_0_CosPersistencePDS__POA.PDS._omni_op_d)
PDS_DA._omni_skeleton = PDS_DA
_0_CosPersistencePDS_DA__POA.PDS_DA = PDS_DA
omniORB.registerSkeleton(PDS_DA._NP_RepositoryId, PDS_DA)
del PDS_DA
__name__ = "CosPersistencePDS_DA"
#
# End of module "CosPersistencePDS_DA"
#
__name__ = "CosPersistencePDS_DA_idl"
_exported_modules = ( "CosPersistencePDS_DA", )
# The end.
| {
"content_hash": "6acfae76fea217b32678a5f68b8871b3",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 371,
"avg_line_length": 45.35014836795252,
"alnum_prop": 0.7401033828436825,
"repo_name": "amonmoce/corba_examples",
"id": "86951dc1324796d05b7da41d09b7b133fcd45ec5",
"size": "15408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omniORBpy-4.2.1/build/python/COS/CosPersistencePDS_DA_idl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "621806"
},
{
"name": "C++",
"bytes": "9176868"
},
{
"name": "CSS",
"bytes": "1124"
},
{
"name": "Groff",
"bytes": "12904"
},
{
"name": "HTML",
"bytes": "1078062"
},
{
"name": "Java",
"bytes": "1838"
},
{
"name": "Makefile",
"bytes": "1832000"
},
{
"name": "Python",
"bytes": "4113588"
},
{
"name": "Shell",
"bytes": "124202"
},
{
"name": "TeX",
"bytes": "513468"
},
{
"name": "Yacc",
"bytes": "28999"
}
],
"symlink_target": ""
} |
import httplib2
import dateutil.parser
import dateutil.tz
from django.utils import simplejson
from django.utils.encoding import force_unicode
from django.conf import settings
from flowrepo import settings
from flowrepo.providers.utils.anyetree import etree
from flowrepo import default_header
DEFAULT_HTTP_HEADERS = {
"User-Agent" : default_header
}
#
# URL fetching sugar
#
def getxml(url, **kwargs):
"""Fetch and parse some XML. Returns an ElementTree"""
xml = fetch_resource(url, **kwargs)
return etree.fromstring(xml)
def getjson(url, **kwargs):
"""Fetch and parse some JSON. Returns the deserialized JSON."""
json = fetch_resource(url, **kwargs)
return simplejson.loads(json)
def fetch_resource(url, method="GET", body=None, username=None, password=None, headers=None):
h = httplib2.Http(timeout=15)
h.force_exception_to_status_code = True
if username is not None or password is not None:
h.add_credentials(username, password)
if headers is None:
headers = DEFAULT_HTTP_HEADERS.copy()
response, content = h.request(url, method, body, headers)
return content
#
# Date handling utils
#
def parsedate(s):
"""
Convert a string into a (local, naive) datetime object.
"""
dt = dateutil.parser.parse(s)
if dt.tzinfo:
dt = dt.astimezone(dateutil.tz.tzlocal()).replace(tzinfo=None)
return dt
def safeint(s):
"""Always returns an int. Returns 0 on failure."""
try:
return int(force_unicode(s))
except (ValueError, TypeError):
return 0
if settings.FLOWREPO_ADJUST_DATETIME:
try:
import pytz
except ImportError:
import logging
log = logging.getLogger('jellyroll.providers.utils')
log.error("Cannot import pytz package and consequently, all datetime objects will be naive. "
"In this particular case, e.g., all commit dates will be expressed in UTC.")
import datetime
import time
UTC = pytz.timezone('UTC')
LOCAL = pytz.timezone(settings.TIME_ZONE)
def utc_to_local_datetime(dt):
"""
Map datetime as UTC object to it's localtime counterpart.
"""
return dt.astimezone(LOCAL)
def utc_to_local_timestamp(ts, orig_tz=UTC):
"""
Convert a timestamp object into a tz-aware datetime object.
"""
timestamp = datetime.datetime.fromtimestamp(ts,tz=orig_tz)
return timestamp.astimezone(LOCAL)
def utc_to_local_timestruct(ts, orig_tz=UTC):
"""
Convert a timestruct object into a tz-aware datetime object.
"""
return utc_to_local_timestamp(time.mktime(ts),orig_tz)
| {
"content_hash": "db5419b2970eba2d603390bc294e6ded",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 101,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.6627218934911243,
"repo_name": "strogo/djpcms",
"id": "0676b67cb87a09ca8c2f94dd1b6c7f3a5fca5a6d",
"size": "2704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djpcms/contrib/flowrepo/providers/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from CIM15.IEC61970.LoadModel.LoadGroup import LoadGroup
class ConformLoadGroup(LoadGroup):
"""A group of loads conforming to an allocation pattern.A group of loads conforming to an allocation pattern.
"""
def __init__(self, ConformLoadSchedules=None, EnergyConsumers=None, *args, **kw_args):
"""Initialises a new 'ConformLoadGroup' instance.
@param ConformLoadSchedules: The ConformLoadSchedules in the ConformLoadGroup.
@param EnergyConsumers: Conform loads assigned to this ConformLoadGroup.
"""
self._ConformLoadSchedules = []
self.ConformLoadSchedules = [] if ConformLoadSchedules is None else ConformLoadSchedules
self._EnergyConsumers = []
self.EnergyConsumers = [] if EnergyConsumers is None else EnergyConsumers
super(ConformLoadGroup, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ConformLoadSchedules", "EnergyConsumers"]
_many_refs = ["ConformLoadSchedules", "EnergyConsumers"]
def getConformLoadSchedules(self):
"""The ConformLoadSchedules in the ConformLoadGroup.
"""
return self._ConformLoadSchedules
def setConformLoadSchedules(self, value):
for x in self._ConformLoadSchedules:
x.ConformLoadGroup = None
for y in value:
y._ConformLoadGroup = self
self._ConformLoadSchedules = value
ConformLoadSchedules = property(getConformLoadSchedules, setConformLoadSchedules)
def addConformLoadSchedules(self, *ConformLoadSchedules):
for obj in ConformLoadSchedules:
obj.ConformLoadGroup = self
def removeConformLoadSchedules(self, *ConformLoadSchedules):
for obj in ConformLoadSchedules:
obj.ConformLoadGroup = None
def getEnergyConsumers(self):
"""Conform loads assigned to this ConformLoadGroup.
"""
return self._EnergyConsumers
def setEnergyConsumers(self, value):
for x in self._EnergyConsumers:
x.LoadGroup = None
for y in value:
y._LoadGroup = self
self._EnergyConsumers = value
EnergyConsumers = property(getEnergyConsumers, setEnergyConsumers)
def addEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.LoadGroup = self
def removeEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.LoadGroup = None
| {
"content_hash": "40e73bf1da6402494fc7021918663e2b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 113,
"avg_line_length": 35.16901408450704,
"alnum_prop": 0.6768121746095315,
"repo_name": "rwl/PyCIM",
"id": "c45331bc179e7ff2c18c32d53d4f234ccffb5f77",
"size": "3597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/LoadModel/ConformLoadGroup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
import numpy
from numpy import asarray, tan, exp, ones, squeeze, sign, \
all, log, sqrt, pi, shape, array, minimum, where
from numpy import random
__all__ = ['anneal']
_double_min = numpy.finfo(float).min
_double_max = numpy.finfo(float).max
class base_schedule(object):
def __init__(self):
self.dwell = 20
self.learn_rate = 0.5
self.lower = -10
self.upper = 10
self.Ninit = 50
self.accepted = 0
self.tests = 0
self.feval = 0
self.k = 0
self.T = None
self.cvar = .05 # 5% coefficient of variation
def init(self, **options):
self.__dict__.update(options)
self.lower = asarray(self.lower)
self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
self.upper = asarray(self.upper)
self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
self.k = 0
self.accepted = 0
self.feval = 0
self.tests = 0
def getstart_temp(self, best_state):
""" Find a matching starting temperature and starting parameters vector
i.e. find x0 such that func(x0) = T0.
Parameters
----------
best_state : _state
A _state object to store the function value and x0 found.
Returns
-------
x0 : array
The starting parameters vector.
"""
print("============================================================")
print("FINDING INITIAL TEMPERATURE WITH A COEFF OF VARIANCE OF", self.cvar)
print("============================================================")
assert(not self.dims is None)
lrange = self.lower
urange = self.upper
cvar = self.cvar
fmax = _double_min
fmin = _double_max
x0 = best_state.x
for _ in range(self.Ninit):
print("sampling T step:", _)
samp = squeeze(random.uniform(0, 1, size=self.dims)) - 0.5
samp = samp/0.5
varx0 = x0 * samp * cvar # random number within the cvar range
x0 = x0 + varx0
fval = self.func(x0, *self.args)
self.feval += 1
if fval > fmax:
fmax = fval
if fval < fmin:
fmin = fval
best_state.cost = fval
best_state.x = array(x0)
self.T0 = (fmax-fmin)*1.5
print("=================================")
print("SET INITIAL TEMPERATURE TO:", self.T0)
print("=================================")
return best_state.x
def accept_test(self, dE):
T = self.T
self.tests += 1
if dE < 0:
self.accepted += 1
return 1
p = exp(-dE*1.0/self.boltzmann/T)
if (p > random.uniform(0.0, 1.0)):
self.accepted += 1
return 1
return 0
def update_guess(self, x0):
pass
def update_temp(self, x0):
pass
# A schedule due to Lester Ingber
class fast_sa(base_schedule):
def init(self, **options):
self.__dict__.update(options)
if self.m is None:
self.m = 1.0
if self.n is None:
self.n = 1.0
self.c = self.m * exp(-self.n * self.quench)
def update_guess(self, x0):
x0 = asarray(x0)
u = squeeze(random.uniform(0.0, 1.0, size=self.dims))
T = self.T
y = sign(u-0.5)*T*((1+1.0/T)**abs(2*u-1)-1.0)
xc = y*(self.upper - self.lower)
xnew = x0 + xc
return xnew
def update_temp(self):
self.T = self.T0*exp(-self.c * self.k**(self.quench))
self.k += 1
return
class cauchy_sa(base_schedule):
def update_guess(self, x0):
x0 = asarray(x0)
numbers = squeeze(random.uniform(-pi/2, pi/2, size=self.dims))
xc = self.learn_rate * self.T * tan(numbers)
xnew = x0 + xc
return xnew
def update_temp(self):
self.T = self.T0/(1+self.k)
self.k += 1
return
class boltzmann_sa(base_schedule):
def update_guess(self, x0):
std = minimum(sqrt(self.T)*ones(self.dims), (self.upper-self.lower)/3.0/self.learn_rate)
x0 = asarray(x0)
xc = squeeze(random.normal(0, 1.0, size=self.dims))
xnew = x0 + xc*std*self.learn_rate
return xnew
def update_temp(self):
self.k += 1
self.T = self.T0 / log(self.k+1.0)
return
class _state(object):
def __init__(self):
self.x = None
self.cost = None
# TODO:
# allow for general annealing temperature profile
# in that case use update given by alpha and omega and
# variation of all previous updates and temperature?
# Simulated annealing
def anneal(func, x0, args=(), schedule='fast', full_output=0,
T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400,
boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0,
lower=-100, upper=100, dwell=50, cvar=0.05):
"""Minimize a function using simulated annealing.
Schedule is a schedule class implementing the annealing schedule.
Available ones are 'fast', 'cauchy', 'boltzmann'
Parameters
----------
func : callable f(x, *args)
Function to be optimized.
x0 : ndarray
Initial guess.
args : tuple
Extra parameters to `func`.
schedule : base_schedule
Annealing schedule to use (a class).
full_output : bool
Whether to return optional outputs.
T0 : float
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
Tf : float
Final goal temperature.
maxeval : int
Maximum function evaluations.
maxaccept : int
Maximum changes to accept.
maxiter : int
Maximum cooling iterations.
learn_rate : float
Scale constant for adjusting guesses.
boltzmann : float
Boltzmann constant in acceptance test
(increase for less stringent test at each temperature).
feps : float
Stopping relative error tolerance for the function value in
last four coolings.
quench, m, n : float
Parameters to alter fast_sa schedule.
lower, upper : float or ndarray
Lower and upper bounds on `x`.
dwell : int
The number of times to search the space at each temperature.
Returns
-------
xmin : ndarray
Point giving smallest value found.
retval : int
Flag indicating stopping condition::
0 : Cooled to global optimum
1 : Cooled to final temperature
2 : Maximum function evaluations
3 : Maximum cooling iterations reached
4 : Maximum accepted query locations reached
Jmin : float
Minimum value of function found.
T : float
Final temperature.
feval : int
Number of function evaluations.
iters : int
Number of cooling iterations.
accept : int
Number of tests accepted.
"""
x0 = asarray(x0)
lower = asarray(lower)
upper = asarray(upper)
schedule = eval(schedule+'_sa()')
# initialize the schedule
schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0,
learn_rate=learn_rate, lower=lower, upper=upper,
m=m, n=n, quench=quench, dwell=dwell, cvar=cvar)
current_state, last_state, best_state = _state(), _state(), _state()
if T0 is None:
best_state.x = x0
x0 = schedule.getstart_temp(best_state)
else:
best_state.x = None
best_state.cost = 300e8
last_state.x = asarray(x0).copy()
fval = func(x0,*args)
schedule.feval += 1
last_state.cost = fval
if last_state.cost < best_state.cost:
best_state.cost = fval
best_state.x = asarray(x0).copy()
schedule.T = schedule.T0
fqueue = [100, 300, 500, 700]
iters = 0
while 1:
for n in range(dwell):
current_state.x = schedule.update_guess(last_state.x)
current_state.cost = func(current_state.x,*args)
schedule.feval += 1
dE = current_state.cost - last_state.cost
if schedule.accept_test(dE):
last_state.x = current_state.x.copy()
last_state.cost = current_state.cost
if last_state.cost < best_state.cost:
best_state.x = last_state.x.copy()
best_state.cost = last_state.cost
schedule.update_temp()
iters += 1
# Stopping conditions
# 0) last saved values of f from each cooling step
# are all very similar (effectively cooled)
# 1) Tf is set and we are below it
# 2) maxeval is set and we are past it
# 3) maxiter is set and we are past it
# 4) maxaccept is set and we are past it
fqueue.append(squeeze(last_state.cost))
fqueue.pop(0)
af = asarray(fqueue)*1.0
if all(abs((af-af[0])/af[0]) < feps):
retval = 0
if abs(af[-1]-best_state.cost) > feps*10:
retval = 5
print("Warning: Cooled to %f at %s but this is not "
"the smallest point found."
% (squeeze(last_state.cost), squeeze(last_state.x)))
break
if (Tf is not None) and (schedule.T < Tf):
retval = 1
break
if (maxeval is not None) and (schedule.feval > maxeval):
retval = 2
break
if (iters > maxiter):
print("Warning: Maximum number of iterations exceeded.")
retval = 3
break
if (maxaccept is not None) and (schedule.accepted > maxaccept):
retval = 4
break
if full_output:
return best_state.x, best_state.cost, schedule.T, \
schedule.feval, iters, schedule.accepted, retval
else:
return best_state.x, retval
if __name__ == "__main__":
from numpy import cos
# minimum expected at ~-0.195
func = lambda x: cos(14.5*x-0.3) + (x+0.2)*x
print(anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='cauchy'))
print(anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='fast'))
print(anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='boltzmann'))
# minimum expected at ~[-0.195, -0.1]
func = lambda x: cos(14.5*x[0]-0.3) + (x[1]+0.2)*x[1] + (x[0]+0.2)*x[0]
print(anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='cauchy'))
print(anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='fast'))
print(anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='boltzmann'))
| {
"content_hash": "7236278ae03f3ea8ba1a2fa65093037e",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 128,
"avg_line_length": 33.368580060422964,
"alnum_prop": 0.5540968764146673,
"repo_name": "johnbachman/pysb",
"id": "46ecadac3ae287b2cf2de635d241088ff946932c",
"size": "11167",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pysb/anneal_mod.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "212"
},
{
"name": "Puppet",
"bytes": "3461"
},
{
"name": "Python",
"bytes": "1055948"
},
{
"name": "Ruby",
"bytes": "2124"
}
],
"symlink_target": ""
} |
"""Test config flow."""
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
async def test_hassio_discovery_startup(hass, aioclient_mock, hassio_client):
"""Test startup and discovery after event."""
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={
"result": "ok",
"data": {
"discovery": [
{
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
}
]
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
assert aioclient_mock.call_count == 0
with patch(
"homeassistant.components.mqtt.config_flow.FlowHandler.async_step_hassio",
return_value={"type": "abort"},
) as mock_mqtt:
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
async def test_hassio_discovery_startup_done(hass, aioclient_mock, hassio_client):
"""Test startup and discovery with hass discovery."""
aioclient_mock.post(
"http://127.0.0.1/supervisor/options",
json={"result": "ok", "data": {}},
)
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={
"result": "ok",
"data": {
"discovery": [
{
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
}
]
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"homeassistant.components.hassio.HassIO.update_hass_api",
return_value={"result": "ok"},
), patch(
"homeassistant.components.hassio.HassIO.get_info",
Mock(side_effect=HassioAPIError()),
), patch(
"homeassistant.components.mqtt.config_flow.FlowHandler.async_step_hassio",
return_value={"type": "abort"},
) as mock_mqtt:
await hass.async_start()
await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
async def test_hassio_discovery_webhook(hass, aioclient_mock, hassio_client):
"""Test discovery webhook."""
aioclient_mock.get(
"http://127.0.0.1/discovery/testuuid",
json={
"result": "ok",
"data": {
"service": "mqtt",
"uuid": "test",
"addon": "mosquitto",
"config": {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
},
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/mosquitto/info",
json={"result": "ok", "data": {"name": "Mosquitto Test"}},
)
with patch(
"homeassistant.components.mqtt.config_flow.FlowHandler.async_step_hassio",
return_value={"type": "abort"},
) as mock_mqtt:
resp = await hassio_client.post(
"/api/hassio_push/discovery/testuuid",
json={"addon": "mosquitto", "service": "mqtt", "uuid": "testuuid"},
)
await hass.async_block_till_done()
assert resp.status == 200
assert aioclient_mock.call_count == 2
assert mock_mqtt.called
mock_mqtt.assert_called_with(
{
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"addon": "Mosquitto Test",
}
)
| {
"content_hash": "d9fc7831b787738d6834bf25260caba9",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 82,
"avg_line_length": 32.68639053254438,
"alnum_prop": 0.45727733526430125,
"repo_name": "tboyce021/home-assistant",
"id": "3bb97a6662e1128cb8a912a24160c7215e5ab29a",
"size": "5524",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/components/hassio/test_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "28861968"
},
{
"name": "Shell",
"bytes": "4815"
}
],
"symlink_target": ""
} |
import heapq
from random import random
from warnings import warn
#--- PRIORITY QUEUE ----------------------------------------------------------------------------------
# Currently not in use.
INFINITY = 1e20
class priorityqueue(dict):
def push(self, e, w):
self[e] = w
def pop(self):
p, w = None, float(INFINITY)
for e in self:
if self[e] <= w: p, w = e, self[e]
if p: del self[p]
return p
#--- DEPTH-FIRST SEARCH ------------------------------------------------------------------------------
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True):
""" Simple, multi-purpose depth-first search.
Visits all the nodes connected to the root, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and ubsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
Note: node._visited is expected to be False for all nodes.
"""
stop = visit(root)
root._visited = True
for node in root.links:
if stop: return True
if not traversable(root, root.links.edge(node)): continue
if not node._visited:
stop = depth_first_search(node, visit, traversable)
return stop
#--- ADJACENCY LIST ----------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" An edge weight map indexed by node id's.
A dictionary indexed by node id1's in which each value is a
dictionary of connected node id2's linking to the edge weight.
If directed, edges go from id1 to id2, but not the other way.
If stochastic, all the weights for the neighbors of a given node sum to 1.
A heuristic can be a function that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
v = {}
for n in graph.nodes:
v[n.id] = {}
for e in graph.edges:
id1 = e.node1.id
id2 = e.node2.id
if reversed:
id1, id2 = id2, id1
#if not v.has_key(id1): v[id1] = {}
#if not v.has_key(id2): v[id2] = {}
v[id1][id2] = 1.0 - e.weight*0.5
if heuristic:
v[id1][id2] += heuristic(id1, id2)
if not directed:
v[id2][id1] = v[id1][id2]
if stochastic:
for id1 in v:
d = sum(v[id1].values())
for id2 in v[id1]:
v[id1][id2] /= d
return v
#--- DIJKSTRA SHORTEST PATH --------------------------------------------------------------------------
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding shortest paths.
Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
Raises an IndexError between nodes on unconnected graphs.
"""
G = adjacency(graph, directed=directed, heuristic=heuristic)
start = id1
end = id2
# Flatten linked list of form [0,[1,[2,[]]]]
def flatten(L):
while len(L) > 0:
yield L[0]
L = L[1]
q = [(0, start, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited vertices.
while True:
(cost1, v1, path) = heapq.heappop(q)
if v1 not in visited:
visited.add(v1)
if v1 == end:
return list(flatten(path))[::-1] + [v1]
path = (v1, path)
for (v2, cost2) in G[v1].iteritems():
if v2 not in visited:
heapq.heappush(q, (cost1 + cost2, v2, path))
#--- BRANDES BETWEENNESS CENTRALITY ------------------------------------------------------------------
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
The algorithm is Brandes' betweenness centrality,
from NetworkX 0.35.1: Aric Hagberg, Dan Schult and Pieter Swart,
based on Dijkstra's algorithm for shortest paths modified from Eppstein.
https://networkx.lanl.gov/wiki
"""
G = graph.keys()
W = adjacency(graph, directed=directed)
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
for s in G:
S = []
P = {}
for v in G: P[v] = []
sigma = dict.fromkeys(G, 0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1
seen = { s: 0 }
Q = [] # use Q as heap with (distance, node id) tuples
heapq.heappush(Q, (0, s, s))
while Q:
(dist, pred, v) = heapq.heappop(Q)
if v in D: continue # already searched this node
sigma[v] = sigma[v] + sigma[pred] # count paths
S.append(v)
D[v] = seen[v]
for w in graph[v].links:
w = w.id
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heapq.heappush(Q, (vw_dist, v, w))
P[w] = [v]
elif vw_dist == seen[w]: # handle equal paths
sigma[w] = sigma[w] + sigma[v]
P[w].append(v)
delta = dict.fromkeys(G,0)
while S:
w = S.pop()
for v in P[w]:
delta[v] = delta[v] + (float(sigma[v]) / float(sigma[w])) * (1.0 + delta[w])
if w != s:
betweenness[w] = betweenness[w] + delta[w]
#-----------------------------------
if normalized:
# Normalize between 0.0 and 1.0.
m = max(betweenness.values())
if m == 0: m = 1
else:
m = 1
betweenness = dict([(id, w/m) for id, w in betweenness.iteritems()])
return betweenness
#--- EIGENVECTOR CENTRALITY --------------------------------------------------------------------------
class NoConvergenceError(Exception): pass
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={},
start=None, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (like Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
The eigenvector calculation is done by the power iteration method.
It has no guarantee of convergence.
A starting vector for the power iteration can be given in the start dict.
You can adjust the importance of a node with the rating dictionary,
which links node id's to a score.
The algorithm is adapted from NetworkX, Aric Hagberg ([email protected]):
https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py
"""
G = graph.keys()
W = adjacency (graph, directed=True, reversed=reversed)
def _normalize(x):
s = sum(x.values())
if s != 0: s = 1.0 / s
for k in x:
x[k] *= s
x = start
if x is None:
x = dict([(n, random()) for n in G])
_normalize(x)
# Power method: y = Ax multiplication.
for i in range(iterations):
x0 = x
x = dict.fromkeys(x0.keys(), 0)
for n in x:
for nbr in W[n]:
r = 1
if rating.has_key(n): r = rating[n]
x[n] += 0.01 + x0[nbr] * W[n][nbr] * r
_normalize(x)
e = sum([abs(x[n]-x0[n]) for n in x])
if e < len(graph.nodes) * tolerance:
if normalized:
# Normalize between 0.0 and 1.0.
m = max(x.values())
if m == 0: m = 1
x = dict([(id, w/m) for id, w in x.iteritems()])
return x
#raise NoConvergenceError
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict([(n, 0) for n in G]) | {
"content_hash": "bef70711130478bbc77d3c1218b62f3c",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 102,
"avg_line_length": 34.21484375,
"alnum_prop": 0.5247174334969745,
"repo_name": "est/nodebox-gl",
"id": "9d9e38626e55b5053eba0582fc2313ef6b9b64e0",
"size": "8759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libraries/graph/proximity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "998878"
},
{
"name": "Perl",
"bytes": "48202"
},
{
"name": "Python",
"bytes": "858930"
}
],
"symlink_target": ""
} |
"""Simple tool for inspecting nearest neighbors and analogies."""
from __future__ import print_function
import re
import sys
from getopt import GetoptError, getopt
from vecs import Vecs
try:
opts, args = getopt(sys.argv[1:], 'v:e:', ['vocab=', 'embeddings='])
except GetoptError as e:
print(e, file=sys.stderr)
sys.exit(2)
opt_vocab = 'vocab.txt'
opt_embeddings = None
for o, a in opts:
if o in ('-v', '--vocab'):
opt_vocab = a
if o in ('-e', '--embeddings'):
opt_embeddings = a
vecs = Vecs(opt_vocab, opt_embeddings)
while True:
sys.stdout.write('query> ')
sys.stdout.flush()
query = sys.stdin.readline().strip()
if not query:
break
parts = re.split(r'\s+', query)
if len(parts) == 1:
res = vecs.neighbors(parts[0])
elif len(parts) == 3:
vs = [vecs.lookup(w) for w in parts]
if any(v is None for v in vs):
print('not in vocabulary: %s' % (
', '.join(tok for tok, v in zip(parts, vs) if v is None)))
continue
res = vecs.neighbors(vs[2] - vs[0] + vs[1])
else:
print('use a single word to query neighbors, or three words for analogy')
continue
if not res:
continue
for word, sim in res[:20]:
print('%0.4f: %s' % (sim, word))
print()
| {
"content_hash": "3be717546faae80dd62fad8fa3f3c748",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 20.8,
"alnum_prop": 0.6121794871794872,
"repo_name": "derekjchow/models",
"id": "0fa828b66b7958ae06f115216cea0106ec029033",
"size": "1869",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "research/swivel/nearest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
} |
import tomviz.operators
class BinaryThreshold(tomviz.operators.CancelableOperator):
def transform(self, dataset, lower_threshold=40.0, upper_threshold=255.0):
"""This filter computes a binary threshold on the data set and
stores the result in a child data set. It does not modify the dataset
passed in."""
# Initial progress
self.progress.value = 0
self.progress.maximum = 100
# Approximate percentage of work completed after each step in the
# transform
STEP_PCT = [20, 40, 75, 90, 100]
# Set up return value
returnValue = None
# Try imports to make sure we have everything that is needed
try:
self.progress.message = "Loading modules"
import itk
from tomviz import itkutils
except Exception as exc:
print("Could not import necessary module(s)")
raise exc
# Add a try/except around the ITK portion. ITK exceptions are
# passed up to the Python layer, so we can at least report what
# went wrong with the script, e.g,, unsupported image type.
try:
self.progress.value = STEP_PCT[0]
self.progress.message = "Converting data to ITK image"
# Get the ITK image
itk_image = itkutils.dataset_to_itk_image(dataset)
itk_input_image_type = type(itk_image)
self.progress.value = STEP_PCT[1]
self.progress.message = "Running filter"
# We change the output type to unsigned char 3D
# (itk.Image.UC3D) to save memory in the output label map
# representation.
itk_output_image_type = itk.Image.UC3
# ITK's BinaryThresholdImageFilter does the hard work
threshold_filter = itk.BinaryThresholdImageFilter[
itk_input_image_type, itk_output_image_type].New()
python_cast = itkutils.get_python_voxel_type(itk_image)
threshold_filter.SetLowerThreshold(python_cast(lower_threshold))
threshold_filter.SetUpperThreshold(python_cast(upper_threshold))
threshold_filter.SetInsideValue(1)
threshold_filter.SetOutsideValue(0)
threshold_filter.SetInput(itk_image)
itkutils.observe_filter_progress(self, threshold_filter,
STEP_PCT[2], STEP_PCT[3])
try:
threshold_filter.Update()
except RuntimeError:
return returnValue
self.progress.message = "Creating child data set"
# Set the output as a new child data object of the current data set
label_map_dataset = dataset.create_child_dataset()
itkutils.set_itk_image_on_dataset(threshold_filter.GetOutput(),
label_map_dataset)
self.progress.value = STEP_PCT[4]
returnValue = {
"thresholded_segmentation": label_map_dataset
}
except Exception as exc:
print("Problem encountered while running %s" %
self.__class__.__name__)
raise exc
return returnValue
| {
"content_hash": "cd474eb36ad938a5bfb8b1fb913e9b06",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 38.70238095238095,
"alnum_prop": 0.5933558904952322,
"repo_name": "OpenChemistry/tomviz",
"id": "de4e9b43e261ebd181c3167610f673596bb7d5a1",
"size": "3251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tomviz/python/BinaryThreshold.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "C",
"bytes": "251"
},
{
"name": "C++",
"bytes": "2173416"
},
{
"name": "CMake",
"bytes": "47116"
},
{
"name": "Dockerfile",
"bytes": "7467"
},
{
"name": "Python",
"bytes": "552224"
},
{
"name": "Shell",
"bytes": "10796"
}
],
"symlink_target": ""
} |
import collections
import concurrent.futures
import copy
import datetime
import decimal
import functools
import hashlib
import itertools
import jinja2
import json
import os
import requests
from tarfile import ReadError
import time
from pathlib import PosixPath, WindowsPath
from contextlib import contextmanager
from dbt.exceptions import ConnectionException
from dbt.events.functions import fire_event
from dbt.events.types import RetryExternalCall
from dbt import flags
from enum import Enum
from typing_extensions import Protocol
from typing import (
Tuple, Type, Any, Optional, TypeVar, Dict, Union, Callable, List, Iterator,
Mapping, Iterable, AbstractSet, Set, Sequence
)
import dbt.exceptions
DECIMALS: Tuple[Type[Any], ...]
try:
import cdecimal # typing: ignore
except ImportError:
DECIMALS = (decimal.Decimal,)
else:
DECIMALS = (decimal.Decimal, cdecimal.Decimal)
class ExitCodes(int, Enum):
Success = 0
ModelError = 1
UnhandledError = 2
def coalesce(*args):
for arg in args:
if arg is not None:
return arg
return None
def get_profile_from_project(project):
target_name = project.get('target', {})
profile = project.get('outputs', {}).get(target_name, {})
return profile
def get_model_name_or_none(model):
if model is None:
name = '<None>'
elif isinstance(model, str):
name = model
elif isinstance(model, dict):
name = model.get('alias', model.get('name'))
elif hasattr(model, 'alias'):
name = model.alias
elif hasattr(model, 'name'):
name = model.name
else:
name = str(model)
return name
MACRO_PREFIX = 'dbt_macro__'
DOCS_PREFIX = 'dbt_docs__'
def get_dbt_macro_name(name):
if name is None:
raise dbt.exceptions.InternalException('Got None for a macro name!')
return f'{MACRO_PREFIX}{name}'
def get_dbt_docs_name(name):
if name is None:
raise dbt.exceptions.InternalException('Got None for a doc name!')
return f'{DOCS_PREFIX}{name}'
def get_materialization_macro_name(materialization_name, adapter_type=None,
with_prefix=True):
if adapter_type is None:
adapter_type = 'default'
name = f'materialization_{materialization_name}_{adapter_type}'
return get_dbt_macro_name(name) if with_prefix else name
def get_docs_macro_name(docs_name, with_prefix=True):
return get_dbt_docs_name(docs_name) if with_prefix else docs_name
def get_test_macro_name(test_name, with_prefix=True):
name = f'test_{test_name}'
return get_dbt_macro_name(name) if with_prefix else name
def split_path(path):
return path.split(os.sep)
def merge(*args):
if len(args) == 0:
return None
if len(args) == 1:
return args[0]
lst = list(args)
last = lst.pop(len(lst) - 1)
return _merge(merge(*lst), last)
def _merge(a, b):
to_return = a.copy()
to_return.update(b)
return to_return
# http://stackoverflow.com/questions/20656135/python-deep-merge-dictionary-data
def deep_merge(*args):
"""
>>> dbt.utils.deep_merge({'a': 1, 'b': 2, 'c': 3}, {'a': 2}, {'a': 3, 'b': 1}) # noqa
{'a': 3, 'b': 1, 'c': 3}
"""
if len(args) == 0:
return None
if len(args) == 1:
return copy.deepcopy(args[0])
lst = list(args)
last = copy.deepcopy(lst.pop(len(lst) - 1))
return _deep_merge(deep_merge(*lst), last)
def _deep_merge(destination, source):
if isinstance(source, dict):
for key, value in source.items():
deep_merge_item(destination, key, value)
return destination
def deep_merge_item(destination, key, value):
if isinstance(value, dict):
node = destination.setdefault(key, {})
destination[key] = deep_merge(node, value)
elif isinstance(value, tuple) or isinstance(value, list):
if key in destination:
destination[key] = list(value) + list(destination[key])
else:
destination[key] = value
else:
destination[key] = value
def _deep_map_render(
func: Callable[[Any, Tuple[Union[str, int], ...]], Any],
value: Any,
keypath: Tuple[Union[str, int], ...],
) -> Any:
atomic_types: Tuple[Type[Any], ...] = (int, float, str, type(None), bool)
ret: Any
if isinstance(value, list):
ret = [
_deep_map_render(func, v, (keypath + (idx,)))
for idx, v in enumerate(value)
]
elif isinstance(value, dict):
ret = {
k: _deep_map_render(func, v, (keypath + (str(k),)))
for k, v in value.items()
}
elif isinstance(value, atomic_types):
ret = func(value, keypath)
else:
container_types: Tuple[Type[Any], ...] = (list, dict)
ok_types = container_types + atomic_types
raise dbt.exceptions.DbtConfigError(
'in _deep_map_render, expected one of {!r}, got {!r}'
.format(ok_types, type(value))
)
return ret
def deep_map_render(
func: Callable[[Any, Tuple[Union[str, int], ...]], Any],
value: Any
) -> Any:
""" This function renders a nested dictionary derived from a yaml
file. It is used to render dbt_project.yml, profiles.yml, and
schema files.
It maps the function func() onto each non-container value in 'value'
recursively, returning a new value. As long as func does not manipulate
value, then deep_map_render will also not manipulate it.
value should be a value returned by `yaml.safe_load` or `json.load` - the
only expected types are list, dict, native python number, str, NoneType,
and bool.
func() will be called on numbers, strings, Nones, and booleans. Its first
parameter will be the value, and the second will be its keypath, an
iterable over the __getitem__ keys needed to get to it.
:raises: If there are cycles in the value, raises a
dbt.exceptions.RecursionException
"""
try:
return _deep_map_render(func, value, ())
except RuntimeError as exc:
if 'maximum recursion depth exceeded' in str(exc):
raise dbt.exceptions.RecursionException(
'Cycle detected in deep_map_render'
)
raise
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def get_pseudo_test_path(node_name, source_path):
"schema tests all come from schema.yml files. fake a source sql file"
source_path_parts = split_path(source_path)
source_path_parts.pop() # ignore filename
suffix = ["{}.sql".format(node_name)]
pseudo_path_parts = source_path_parts + suffix
return os.path.join(*pseudo_path_parts)
def get_pseudo_hook_path(hook_name):
path_parts = ['hooks', "{}.sql".format(hook_name)]
return os.path.join(*path_parts)
def md5(string):
return hashlib.md5(string.encode('utf-8')).hexdigest()
def get_hash(model):
return hashlib.md5(model.unique_id.encode('utf-8')).hexdigest()
def get_hashed_contents(model):
return hashlib.md5(model.raw_sql.encode('utf-8')).hexdigest()
def flatten_nodes(dep_list):
return list(itertools.chain.from_iterable(dep_list))
class memoized:
'''Decorator. Caches a function's return value each time it is called. If
called later with the same arguments, the cached value is returned (not
reevaluated).
Taken from https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.abc.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
K_T = TypeVar('K_T')
V_T = TypeVar('V_T')
def filter_null_values(input: Dict[K_T, Optional[V_T]]) -> Dict[K_T, V_T]:
return {k: v for k, v in input.items() if v is not None}
def add_ephemeral_model_prefix(s: str) -> str:
return '__dbt__cte__{}'.format(s)
def timestring() -> str:
"""Get the current datetime as an RFC 3339-compliant string"""
# isoformat doesn't include the mandatory trailing 'Z' for UTC.
return datetime.datetime.utcnow().isoformat() + 'Z'
class JSONEncoder(json.JSONEncoder):
"""A 'custom' json encoder that does normal json encoder things, but also
handles `Decimal`s. and `Undefined`s. Decimals can lose precision because
they get converted to floats. Undefined's are serialized to an empty string
"""
def default(self, obj):
if isinstance(obj, DECIMALS):
return float(obj)
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return obj.isoformat()
if isinstance(obj, jinja2.Undefined):
return ""
if hasattr(obj, 'to_dict'):
# if we have a to_dict we should try to serialize the result of
# that!
return obj.to_dict(omit_none=True)
return super().default(obj)
class ForgivingJSONEncoder(JSONEncoder):
def default(self, obj):
# let dbt's default JSON encoder handle it if possible, fallback to
# str()
try:
return super().default(obj)
except TypeError:
return str(obj)
class Translator:
def __init__(self, aliases: Mapping[str, str], recursive: bool = False):
self.aliases = aliases
self.recursive = recursive
def translate_mapping(
self, kwargs: Mapping[str, Any]
) -> Dict[str, Any]:
result: Dict[str, Any] = {}
for key, value in kwargs.items():
canonical_key = self.aliases.get(key, key)
if canonical_key in result:
dbt.exceptions.raise_duplicate_alias(
kwargs, self.aliases, canonical_key
)
result[canonical_key] = self.translate_value(value)
return result
def translate_sequence(self, value: Sequence[Any]) -> List[Any]:
return [self.translate_value(v) for v in value]
def translate_value(self, value: Any) -> Any:
if self.recursive:
if isinstance(value, Mapping):
return self.translate_mapping(value)
elif isinstance(value, (list, tuple)):
return self.translate_sequence(value)
return value
def translate(self, value: Mapping[str, Any]) -> Dict[str, Any]:
try:
return self.translate_mapping(value)
except RuntimeError as exc:
if 'maximum recursion depth exceeded' in str(exc):
raise dbt.exceptions.RecursionException(
'Cycle detected in a value passed to translate!'
)
raise
def translate_aliases(
kwargs: Dict[str, Any], aliases: Dict[str, str], recurse: bool = False,
) -> Dict[str, Any]:
"""Given a dict of keyword arguments and a dict mapping aliases to their
canonical values, canonicalize the keys in the kwargs dict.
If recurse is True, perform this operation recursively.
:return: A dict containing all the values in kwargs referenced by their
canonical key.
:raises: `AliasException`, if a canonical key is defined more than once.
"""
translator = Translator(aliases, recurse)
return translator.translate(kwargs)
# Note that this only affects hologram json validation.
# It has no effect on mashumaro serialization.
def restrict_to(*restrictions):
"""Create the metadata for a restricted dataclass field"""
return {'restrict': list(restrictions)}
def coerce_dict_str(value: Any) -> Optional[Dict[str, Any]]:
"""For annoying mypy reasons, this helper makes dealing with nested dicts
easier. You get either `None` if it's not a Dict[str, Any], or the
Dict[str, Any] you expected (to pass it to dbtClassMixin.from_dict(...)).
"""
if (isinstance(value, dict) and all(isinstance(k, str) for k in value)):
return value
else:
return None
def _coerce_decimal(value):
if isinstance(value, DECIMALS):
return float(value)
return value
def lowercase(value: Optional[str]) -> Optional[str]:
if value is None:
return None
else:
return value.lower()
# some types need to make constants available to the jinja context as
# attributes, and regular properties only work with objects. maybe this should
# be handled by the RelationProxy?
class classproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype):
return self.func(objtype)
def format_bytes(num_bytes):
for unit in ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB']:
if abs(num_bytes) < 1024.0:
return f"{num_bytes:3.1f} {unit}"
num_bytes /= 1024.0
num_bytes *= 1024.0
return f"{num_bytes:3.1f} {unit}"
def format_rows_number(rows_number):
for unit in ['', 'k', 'm', 'b', 't']:
if abs(rows_number) < 1000.0:
return f"{rows_number:3.1f}{unit}".strip()
rows_number /= 1000.0
rows_number *= 1000.0
return f"{rows_number:3.1f}{unit}".strip()
class ConnectingExecutor(concurrent.futures.Executor):
def submit_connected(self, adapter, conn_name, func, *args, **kwargs):
def connected(conn_name, func, *args, **kwargs):
with self.connection_named(adapter, conn_name):
return func(*args, **kwargs)
return self.submit(connected, conn_name, func, *args, **kwargs)
# a little concurrent.futures.Executor for single-threaded mode
class SingleThreadedExecutor(ConnectingExecutor):
def submit(*args, **kwargs):
# this basic pattern comes from concurrent.futures.Executor itself,
# but without handling the `fn=` form.
if len(args) >= 2:
self, fn, *args = args
elif not args:
raise TypeError(
"descriptor 'submit' of 'SingleThreadedExecutor' object needs "
"an argument"
)
else:
raise TypeError(
'submit expected at least 1 positional argument, '
'got %d' % (len(args) - 1)
)
fut = concurrent.futures.Future()
try:
result = fn(*args, **kwargs)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(result)
return fut
@contextmanager
def connection_named(self, adapter, name):
yield
class MultiThreadedExecutor(
ConnectingExecutor,
concurrent.futures.ThreadPoolExecutor,
):
@contextmanager
def connection_named(self, adapter, name):
with adapter.connection_named(name):
yield
class ThreadedArgs(Protocol):
single_threaded: bool
class HasThreadingConfig(Protocol):
args: ThreadedArgs
threads: Optional[int]
def executor(config: HasThreadingConfig) -> ConnectingExecutor:
if config.args.single_threaded:
return SingleThreadedExecutor()
else:
return MultiThreadedExecutor(max_workers=config.threads)
def fqn_search(
root: Dict[str, Any], fqn: List[str]
) -> Iterator[Dict[str, Any]]:
"""Iterate into a nested dictionary, looking for keys in the fqn as levels.
Yield the level config.
"""
yield root
for level in fqn:
level_config = root.get(level, None)
if not isinstance(level_config, dict):
break
# This used to do a 'deepcopy',
# but it didn't seem to be necessary
yield level_config
root = level_config
StringMap = Mapping[str, Any]
StringMapList = List[StringMap]
StringMapIter = Iterable[StringMap]
class MultiDict(Mapping[str, Any]):
"""Implement the mapping protocol using a list of mappings. The most
recently added mapping "wins".
"""
def __init__(self, sources: Optional[StringMapList] = None) -> None:
super().__init__()
self.sources: StringMapList
if sources is None:
self.sources = []
else:
self.sources = sources
def add_from(self, sources: StringMapIter):
self.sources.extend(sources)
def add(self, source: StringMap):
self.sources.append(source)
def _keyset(self) -> AbstractSet[str]:
# return the set of keys
keys: Set[str] = set()
for entry in self._itersource():
keys.update(entry)
return keys
def _itersource(self) -> StringMapIter:
return reversed(self.sources)
def __iter__(self) -> Iterator[str]:
# we need to avoid duplicate keys
return iter(self._keyset())
def __len__(self):
return len(self._keyset())
def __getitem__(self, name: str) -> Any:
for entry in self._itersource():
if name in entry:
return entry[name]
raise KeyError(name)
def __contains__(self, name) -> bool:
return any((name in entry for entry in self._itersource()))
def _connection_exception_retry(fn, max_attempts: int, attempt: int = 0):
"""Attempts to run a function that makes an external call, if the call fails
on a connection error, timeout or decompression issue, it will be tried up to 5 more times.
See https://github.com/dbt-labs/dbt-core/issues/4579 for context on this decompression issues
specifically.
"""
try:
return fn()
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ContentDecodingError,
ReadError,
) as exc:
if attempt <= max_attempts - 1:
fire_event(RetryExternalCall(attempt=attempt, max=max_attempts))
time.sleep(1)
_connection_exception_retry(fn, max_attempts, attempt + 1)
else:
raise ConnectionException('External connection exception occurred: ' + str(exc))
# This is used to serialize the args in the run_results and in the logs.
# We do this separately because there are a few fields that don't serialize,
# i.e. PosixPath, WindowsPath, and types. It also includes args from both
# cli args and flags, which is more complete than just the cli args.
# If new args are added that are false by default (particularly in the
# global options) they should be added to the 'default_false_keys' list.
def args_to_dict(args):
var_args = vars(args).copy()
# update the args with the flags, which could also come from environment
# variables or user_config
flag_dict = flags.get_flag_dict()
var_args.update(flag_dict)
dict_args = {}
# remove args keys that clutter up the dictionary
for key in var_args:
if key == 'cls':
continue
if var_args[key] is None:
continue
# TODO: add more default_false_keys
default_false_keys = (
'debug', 'full_refresh', 'fail_fast', 'warn_error',
'single_threaded', 'log_cache_events', 'store_failures',
'use_experimental_parser',
)
if key in default_false_keys and var_args[key] is False:
continue
if key == 'vars' and var_args[key] == '{}':
continue
# this was required for a test case
if (isinstance(var_args[key], PosixPath) or
isinstance(var_args[key], WindowsPath)):
var_args[key] = str(var_args[key])
dict_args[key] = var_args[key]
return dict_args
| {
"content_hash": "4c183167e5e33c3c4b58de4ef4867b24",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 97,
"avg_line_length": 30.26595744680851,
"alnum_prop": 0.6260607582224454,
"repo_name": "analyst-collective/dbt",
"id": "e1f3fe537c1c230d1fb163e3327a4fdd45e611cc",
"size": "19915",
"binary": false,
"copies": "1",
"ref": "refs/heads/ct-117-readme_docs",
"path": "core/dbt/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "769"
},
{
"name": "Python",
"bytes": "284372"
},
{
"name": "Shell",
"bytes": "625"
}
],
"symlink_target": ""
} |
from numpy import *
from numpy.linalg import svd
from scipy.stats import norm as normal
from scipy import linalg as lin
import time
import itertools
import random
from learners import *
from cvxopt import matrix, solvers, spdiag
solvers.options['show_progress'] = False
solvers.options['maxiters'] = 2000
class HingeLossSGD(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d, tau, tolerance = 10e-6):
LinearLearner.__init__(self, d, w = None)
self.tau = tau
self.tolerance = tolerance
gamma_0 = self.tau
self.rate = lambda t: gamma_0 * (1.0 + 0.1 * t)**(-1.0)
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a support vector machine with standard linear kernel.'''
# Infer the sample size from the data
m = len(Y)
def hinge_loss(w, X, Y):
total = 0.0
for i in xrange(m):
total += max(0.0, 1.0 - Y[i] * dot(w, X[i]) / self.tau)
return total / m
w_star = ones(self.d)
w_star /= lin.norm(w_star, 2)
#print
#print 'w_star:', hinge_loss(w_star, X, Y)
t = 0
delta = -1
index = range(m)
# Pick starting weight vector randomly
self.w = normal.rvs(size = self.d)
#self.w = ones(self.d)
self.w /= lin.norm(self.w, 2)
end_loss = hinge_loss(self.w, X, Y)
# Proceed until the change in loss is small
while delta > self.tolerance or delta < 0.0:
start_loss = end_loss
# Randomize the order
random.shuffle(index)
# Iterate through data once (a single epoch)
for i in xrange(m):
t += 1
# If the margin is violated, make perceptron-like update
if Y[index[i]] * dot(self.w, X[index[i]]) < self.tau:
self.w += self.rate(t) * Y[index[i]] * X[index[i]] / self.tau
# If norm constraint is violated, normalize w
norm_w = lin.norm(self.w, 2)
if norm_w > 1.0:
self.w /= norm_w
# Check the change in loss over the epoch
end_loss = hinge_loss(self.w, X, Y)
delta = start_loss - end_loss
#print end_loss, delta, self.rate(t)
class HingeLossSGD2(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d, tau, v, r, tolerance = 0.0001):
LinearLearner.__init__(self, d, w = None)
self.tau = tau
self.v = v
self.r = r
self.tolerance = tolerance
gamma_0 = self.tau
self.rate = lambda t: gamma_0 * (1.0 + 0.1 * t)**(-1.0)
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a support vector machine with standard linear kernel.'''
# Infer the sample size from the data
m = len(Y)
def hinge_loss(w, X, Y):
total = 0.0
for i in xrange(m):
total += max(0.0, 1.0 - Y[i] * dot(w, X[i]) / self.tau)
return total / m
w_star = ones(self.d)
w_star /= lin.norm(w_star, 2)
#print
#print 'w_star:', hinge_loss(w_star, X, Y)
t = 0
delta = -1
index = range(m)
# Pick starting weight vector randomly
self.w = normal.rvs(size = self.d)
#self.w = ones(self.d)
self.w /= lin.norm(self.w, 2)
end_loss = hinge_loss(self.w, X, Y)
# Proceed until the change in loss is small
while delta > self.tolerance or delta < 0.0:
start_loss = end_loss
# Randomize the order
random.shuffle(index)
# Iterate through data once (a single epoch)
for i in xrange(m):
t += 1
# If the margin is violated, make perceptron-like update
if Y[index[i]] * dot(self.w, X[index[i]]) < self.tau:
self.w += self.rate(t) * Y[index[i]] * X[index[i]] / self.tau
# If norm constraint is violated, normalize w
norm_w = lin.norm(self.w, 2)
if norm_w > 1.0:
self.w /= norm_w
# If other constraint is violated, project w
vw = self.w - self.v
norm_vw = lin.norm(vw, 2)
if norm_vw > self.r:
self.w = self.v + self.r * vw / norm_vw
# Check the change in loss over the epoch
end_loss = hinge_loss(self.w, X, Y)
delta = start_loss - end_loss
#print end_loss, delta, self.rate(t)
class SVM(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a support vector machine with standard linear kernel.'''
# Infer the sample size from the data
m = len(Y)
# Set up the appropriate matrices and call CVXOPT's quadratic programming
P = matrix(dot(X, X.T) * dot(Y, Y.T))
q = matrix(-ones(m))
G = matrix(-identity(m))
h = matrix(zeros(m))
alpha = solvers.qp(P, q, G, h)['x']
# Find the weight vector of the hyperplane from the Lagrange multipliers
self.w = dot(X.T, alpha * Y)
self.w = self.w.reshape((self.d,))
class soft_SVM(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d, C):
LinearLearner.__init__(self, d, w = None)
self.C = C
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a support vector machine with standard linear kernel.'''
# Infer the sample size from the data
m = len(Y)
# Set up the appropriate matrices and call CVXOPT's quadratic programming
P = matrix(dot(X, X.T) * dot(Y, Y.T))
q = matrix(-ones(m))
G = matrix(vstack((-identity(m), identity(m))))
h = matrix(hstack((zeros(m), self.C * ones(m))))
alpha = solvers.qp(P, q, G, h)['x']
# Find the weight vector of the hyperplane from the Lagrange multipliers
self.w = dot(X.T, alpha * Y)
self.w = self.w.reshape((self.d,))
class LinearProgram(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a linear program.'''
# Infer the dimension and sample size from the data
m = len(Y)
# Set up the appropriate matrices and call CVXOPT's linear programming
c = matrix(sign(normal.rvs(loc = 0, scale = 1.0, size = self.d)))
G = matrix(vstack([-Y * X, identity(self.d)]))
h = matrix(vstack([zeros((m, 1)), m**2*ones((self.d, 1))]))
self.w = solvers.lp(c, G, h)['x']
self.w = array(self.w).reshape((self.d,))
class Average(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
def batch_train(self, X, Y):
self.w = (Y * X).sum(axis = 0)
self.w /= lin.norm(self.w, 2)
class BandSelection(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d, num_iters):
self.num_iters = num_iters
self.bandparam = 0
self.radiusparam = 0
LinearLearner.__init__(self, d, w = None)
def param_calc(self, X, k, typeof):
sorteddistance = sorted(X)
#print sorteddistance
length = len(sorteddistance)
print "Range of distances in kernel space is"
print sorteddistance[0]
print sorteddistance[length-1]
ratio = 1
if typeof == "exp":
frac = pow(2, 1 - k) * ratio
elif typeof == "inv":
frac = pow(k, -1) * ratio
elif typeof == "lin":
frac = (1 - (k - 1) / (self.num_iters - 1.0)) * ratio
else:
raise ValueError
print
print frac
self.radiusparam = 2 * frac
print 'radius:', self.radiusparam
num_points = int(ceil(length * frac))
print 'points within band:', num_points
self.bandparam = sorteddistance[num_points - 1]
# print 'band:', self.bandparam
class PCA(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
self.var = None
LinearLearner.__init__(self, d, w = None)
def pca_run(self, X):
n = mean(X, axis=0)
X -= n
Cov = cov(X.T)
eigenval, eigenvec = lin.eig(Cov)
idx = argsort(eigenval)[::-1]
eigenvec = eigenvec[:,idx]
eigenval = eigenval[idx]
self.w = eigenvec[0].real
self.w = self.w.reshape((self.d,))
return eigenval[0].real, eigenvec[0].real
def variance_calc(self, X):
X -= mean(X)
row = len(X)
total = 0
for i in range(row):
total += pow(X[i],2)
self.var = total/row
def train(self, X, Y):
# Getting initial values from PCA
val, vec = self.pca_run(X,Y)
row,col = X.shape
comp = 10*row*log(row)/col
# Check value of first eigenvalue with (10*number of examples*log(number of examples)/dimensions), and iterating likewise
while val >= comp:
# Remove all values from X with greater than the eligible variance
for p in range(row):
print vec,X[p]
rem = pow(dot(vec,X[p]),2)
if rem >= (comp/row):
#c.remove(ids[p]) # Removal of outliers
X = delete(X, p)
Y = delete(Y, p)
# Recalculate the PCA with the new arrays of X and Y
val, vec = self.pca_run(X,Y)
row,col = X.shape
comp = 10*row*log(row)/col
# Calculate w if first eigenvalue is less than the eligible variance
self.w = 1.0/row * dot(X.T,Y)
self.w = self.w.reshape((self.d,))
self.var = val
print self.w, self.var
class soft_SVM_q():
def __init__(self, d, q, C):
self.d = d
self.q = q
self.C = C # Smaller C makes margin more important
# Larger C makes hinge loss more important
self.w = None
def batch_train(self, X, Y):
'''
Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a q-norm support vector machine with standard linear kernel.
'''
m = len(Y)
# First find a feasible solution and create the objective function
lp = soft_SVM(self.d, self.C)
lp.batch_train(X, Y)
s = 1.0 - dot(Y * X, lp.w)
s[s < 0.0] = 0.0
x_0 = hstack((lp.w, s))
F = make_soft_q_svm_primal_objective(self.d, m, self.q, self.C, x_0)
# Set up the appropriate matrices and call CVXOPT's convex programming
G_top = -hstack((Y * X, identity(m)))
G_bottom = -hstack((zeros((m, self.d)), identity(m)))
G_fix1 = hstack((identity(self.d), zeros((self.d, m))))
G_fix2 = -hstack((identity(self.d), zeros((self.d, m))))
G = matrix(vstack((G_top, G_bottom, G_fix1, G_fix2)))
h = matrix(hstack((-ones(m), zeros(m), 1e3 * ones(self.d), 1e3 * ones(self.d) )))
# Change solver options
solvers.options['maxiters'] = 100
solvers.options['abstol'] = 1e-3
solvers.options['reltol'] = 1e-2
result = solvers.cp(F, G, h)
# Reset solver options to defaults
solvers.options['maxiters'] = 2000
solvers.options['abstol'] = 1e-7
solvers.options['reltol'] = 1e-6
z = result['x']
self.w = array(z[:self.d]).reshape((self.d,))
def classify(self, x):
return sign(dot(self.w, x))
def margin(self, x):
return dot(self.w, x)
def make_soft_q_svm_primal_objective(n, m, q, C, x_0 = None):
if x_0 is None:
x_0 = r.normal(0, 0.1, n + m)
# Choose normalization constant so objective function values starts at 10.0
w_0 = x_0[:n]
s_0 = x_0[n:]
scale = 1.0 / (sum(abs(w_0)**q) / q + C * sum(s_0))
x_0 = matrix(x_0.reshape((n + m, 1)))
def F(x = None, z = None):
# Case 1
if x is None and z is None:
return (0, x_0)
# Case 2 and 3
else:
w = x[:n]
s = x[n:]
abs_w = abs(w)
f = scale * (sum(abs_w**q) / q + C * sum(s))
Df_w = sign(w) * abs_w**(q - 1.0)
Df_s = C * ones((m, 1))
Df = scale * vstack((Df_w, Df_s))
Df = matrix(Df.reshape((1, n + m)))
# Case 2 only
if z is None:
return (f, Df)
# Case 3 only
else:
try:
H_w = scale * z * (q - 1.0) * abs_w**(q - 2.0)
except (ValueError, RuntimeWarning):
#print 'abs_w:', abs_w
#print 'power:', (q - 2.0)
H_w = scale * z * (q - 1.0) * (abs_w + 1e-20)**(q - 2.0)
H_s = zeros((m, 1))
diag_H = matrix(vstack((H_w, H_s)))
H = spdiag(diag_H)
return (f, Df, H)
return F
class QP(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
def train(self, X, Y, radius, normfac, prevw):
solvers.options['show_progress'] = False
# Reduce maxiters and tolerance to reasonable levels
solvers.options['maxiters'] = 200
solvers.options['abstol'] = 1e-2
solvers.options['feastol'] = 1e-2
row, col = X.shape
n = row + self.d
prevw = prevw.reshape((self.d, 1))
x_0 = matrix(0.0, (n, 1))
x_0[:row] = 1.0 - Y * dot(X, prevw) / normfac
x_0[row:] = prevw
# x represents all the variables in an array, the first ones Ei and then each dimenstion of w, updated to 1/row
c = matrix(row*[1.0] + self.d*[0.0]) # the objective function represented as a sum of Ei
scale_factor = float(dot(c.T, x_0))
if scale_factor > 0.0:
c /= scale_factor
helper = matrix(array(row*[0.0] + self.d*[1.0]).reshape((n, 1)))
r2 = radius**2
def F(x = None, z = None):
if x is None:
return (2, x_0) # Ei starts from 1 and w starts from 1
w = x[row:]
diff = w - prevw
f = matrix(0.0, (2, 1))
f[0] = dot(diff.T, diff)[0] - r2 # the first non-linear constraint ||w-w[k-1]||^2 < r[k]^2
f[1] = dot(w.T, w)[0] - 1.0 # the second non-linear constraint ||w||^2 < 1
Df = matrix(0.0, (2, n)) # creating the Df martrix, one row for each non-linear equation with variables as columns
Df[0, row:] = 2.0 * diff.T # derivative of first non-linear equation, populates a sparse matrix
Df[1, row:] = 2.0 * w.T # derivative of second non-linear equation, populates a sparse matrix
if z is None:
return f, Df
diag_H = 2.0 * z[0] + 2.0 * z[1] * helper # Each nonlinear constraint has second derivative 2I w.r.t. w and 0 w.r.t. eps
H = spdiag(diag_H)
return f, Df, H
# for linear inequalities
G = matrix(0.0, (row*2, n)) # there are two linear constaints for Ei, and for each Ei the entire w
h = matrix(0.0, (row*2, 1))
for i in range(row):
G[i,i] = -1.0 # -Ei <= 0
G[row+i, i] = -1.0
h[row+i] = -1.0
for j in range(self.d):
G[row+i, row+j] = (-Y[i][0]/normfac)*X[i,j] # -Ei - yi/Tk(w.xi) <= -1
# solve and return w
sol = solvers.cpl(c, F, G, h)
self.w = sol['x'][row:]
self.w = array(self.w).reshape((self.d,))
#print
#print sol['status']
'''
print 'Radius wanted'
print radius
print 'Output of quadratic solver'
print self.w
print ' Norm of output of quadratic solver pre-normalization'
print sqrt(dot(self.w.T, self.w))
print ' Distance to the previous weight vector pre-normalization'
print sqrt(dot((self.w-prevw).T, (self.w-prevw)))
'''
self.w = self.w/sqrt(dot(self.w.T,self.w)) # Normalizing the vector output
'''
print 'Output of quadratic solver post -norm'
print self.w
print ' Norm of output of quadratic solver post-normalization'
print sqrt(dot(self.w.T, self.w))
print ' Distance to the previous weight vector post-normalization'
print sqrt(dot((self.w-prevw).T, (self.w-prevw)))
'''
class OutlierRemoval(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
self.weightdist = None
def train(self, X, band, radius, normfac, prevw, bound):
# Set max iterations to 5000
max_iterations = 2000
out_itercount = 1
row, col = X.shape
# Calculate the variance limit in the data
# sigma = pow(radius,2) + lin.norm(prevw,2)
# Set q(x) to 1 for start
q = ones(row)
# Objective function for q(x)
def objectiveq(q, sep, X):
return sum(q * pow(dot(sep, X.T), 2)) / row
# Constraint on q(x)
def constraintq(q, bound):
# Repeat the following until convergence
while True:
# Threshold at 0 and 1
q[q < 0.0] = 0.0
q[q > 1.0] = 1.0
# Check the total weight
if sum(q) >= (1.0 - bound) * row - 0.01:
break
# Scale up the weights, but only increase those less than 1
else:
q[q < 1.0] *= 1.0 / sum(q[q < 1.0]) * ((1.0 - bound) * row - sum(q[q == 1.0]))
return q
# Starting the outer gradient descent loop for q(x)
end_obj = inf
diff = 1
# print
# print end_obj
start_outer = time.time()
while (diff > pow(10,-4) or diff < 0) and out_itercount < max_iterations:
start_obj = end_obj
# Use SVD to maximize over w
linsep, new_obj = constrained_variance_maximization(X, q, prevw, radius)
# update q
outer_rate = 0.1
w_dot_x_2 = pow(dot(linsep, X.T), 2)
q -= outer_rate * w_dot_x_2 / lin.norm(w_dot_x_2, 2)
# check constraints
q = constraintq(q, bound)
#print "the distribution weights"
# print q
# print min(q)
end_obj = objectiveq(q, linsep , X)
# print end_obj
diff = start_obj - end_obj
#print 'Start Obj and End Obj w.r.t to q ' + str(start_obj) + " " + str(end_obj)
#print('\n')
out_itercount = out_itercount + 1
# print out_itercount
end_outer = time.time()
#print " Total time for outer loop run " + str(end_outer - start_outer)
#print 'Optimal q satisfying all conditions is '
#print q
self.weightdist = q
def constrained_variance_maximization(X, q, u, r):
# X is n x d
# q is n x 1
# u is d x 1
# r is scalar
# Returns (w, val) where w maximizes sum_{i=1}^n q[i] * dot(w, x[i])^2
# subject to ||w|| = 1 and ||w - u|| <= r,
# and where val is the value of that maximum.
n, d = X.shape
q = q.reshape((n, 1))
u = u.reshape((d, 1))
Xq = sqrt(q) * X
XqT_Xq = dot(Xq.T, Xq)
# First check if the first principle component satisfies the constraints
left, diagonal, right = svd(XqT_Xq)
w1 = right[0].reshape((d, 1))
val1 = diagonal[0]
if lin.norm(u - w1, 2) <= r or lin.norm(u + w1, 2) <= r:
return w1.reshape((d,)), val1
# Now project the data
Xq_proj = Xq - dot(Xq, u) * tile(u.T, (n, 1))
# Find the first principle component of the projected data
left, diagonal, right = svd(dot(Xq_proj.T, Xq_proj))
v = right[0].reshape((d, 1))
# This should be close to zero
# assert abs(dot(u.T, v)) <= 0.01
# Construct the vector and the value in the original space
c1 = (1.0 + dot(u.T, u) - r**2) / 2.0
c2 = sqrt(1.0 - c1**2)
w = c1 * u + c2 * v
val = dot(dot(w.T, XqT_Xq), w)[0, 0]
# Check the result
# print
# print dot(dot(u.T, XqT_Xq), u)[0, 0]
# print val
# print val1
# print lin.norm(w, 2)
# print lin.norm(u - w, 2), r
# assert dot(dot(u.T, XqT_Xq), u) <= val <= val1
# assert 0.99 <= lin.norm(w, 2) <= 1.01
# assert lin.norm(u - w, 2) <= r + 0.01
return w.reshape((d,)), val
'''
class QPwithoutBandConstraint(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
def train(self, X, Y, radius, normfac, prevw):
#Have commented out all the equations relating to the band constraint from this solver
solvers.options['show_progress'] = True
solvers.options['maxiters'] = 10000
row, col = X.shape
# x represents all the variables in an array, the first ones Ei and then each dimenstion of w, updated to 1/row
c = matrix(row*[1.0] + self.d*[0.0]) # the objective function represented as a sum of Ei
def F(x=None, z=None):
if x is None: return 1, matrix(row*[1.0] + self.d*[1.0]) # Ei starts from 1 and w starts from 1
f = matrix(0.0, (1,1))
#f[0] = sqrt(dot((x[row:].T-prevw),(x[row:].T-prevw).T))-radius # the first non-linear constraint ||w-w[k-1]||^2 < r[k]
f[0] = sqrt(dot(x[row:].T,x[row:])) -1 # the second non-linear constraint ||w||^2 <1
Df = matrix(0.0, (1,row+self.d)) # creating the Df martrix, one row for each non-linear equation with variables as columns
#Df[0,row:] = 2.0 * (x[row:].T-prevw[:]) # derivative of first non-linear equation, populates a sparse matrix
Df[0,row:] = 2.0 * x[row:].T # derivative of second non-linear equation, populates a sparse matrix
if z is None: return f, Df
secder = matrix(row*[0.0] + self.d*[2.0])
H = matrix(0.0, (row+self.d, row+self.d))
for i in range(self.d):
H[row+i,row+i] = z[0]*secder[row+i] # returns the second derivative, a sparse matrix
return f, Df, H
# for linear inequalities
G = matrix(0.0,(row*2, row+self.d)) # there are two linear constaints for Ei, and for each Ei the entire w
h = matrix(0.0, (row*2, 1))
for i in range(row):
G[i,i] = -1.0 # -Ei <= 0
G[row+i, i] = -1.0
h[row+i] = -1.0
for j in range(self.d):
G[row+i, row+j] = (-Y[i][0]/normfac)*X[i,j] # -Ei - yi/Tk(w.xi) <= -1
# solve and return w
sol = solvers.cpl(c, F, G, h)
self.w = sol['x'][row:]
self.w = array(self.w).reshape((self.d,))
print sol
print 'Radius wanted'
print radius
print 'Output of quadratic solver'
print self.w
print ' Norm of output of quadratic solver pre-normalization'
print sqrt(dot(self.w.T, self.w))
print ' Distance to the previous weight vector pre-normalization'
print sqrt(dot((self.w-prevw).T, (self.w-prevw)))
self.w = self.w/sqrt(dot(self.w.T,self.w)) # Normalizing the vector output
print 'Output of quadratic solver post -norm'
print self.w
print ' Norm of output of quadratic solver post-normalization'
print sqrt(dot(self.w.T, self.w))
print ' Distance to the previous weight vector post-normalization'
print sqrt(dot((self.w-prevw).T, (self.w-prevw)))
class QPwithoutNormConstraint(LinearLearner, PassiveSupervisedLearner):
def __init__(self, d):
LinearLearner.__init__(self, d, w = None)
def train(self, X, Y, radius, normfac, prevw):
#Have commented out all the equations relating to the norm constraint on W from this solver
#solvers.options['show_progress'] = True
#solvers.options['maxiters'] = 10000
row, col = X.shape
# x represents all the variables in an array, the first ones Ei and then each dimenstion of w, updated to 1/row
c = matrix(row*[1.0] + self.d*[0.0]) # the objective function represented as a sum of Ei
def F(x=None, z=None):
if x is None: return 1, matrix(row*[1.0] + self.d*[1.0]) # Ei starts from 1 and w starts from 1
f = matrix(0.0, (1,1))
f[0] = sqrt(dot((x[row:].T-prevw),(x[row:].T-prevw).T))-radius # the first non-linear constraint ||w-w[k-1]||^2 < r[k]
#f[0] = sqrt(dot(x[row:].T,x[row:])) -1 # the second non-linear constraint ||w||^2 <1
Df = matrix(0.0, (1,row+self.d)) # creating the Df martrix, one row for each non-linear equation with variables as columns
Df[0,row:] = 2.0 * (x[row:].T-prevw[:]) # derivative of first non-linear equation, populates a sparse matrix
#Df[0,row:] = 2.0 * x[row:].T # derivative of second non-linear equation, populates a sparse matrix
if z is None: return f, Df
secder = matrix(row*[0.0] + self.d*[2.0])
H = matrix(0.0, (row+self.d, row+self.d))
for i in range(self.d):
H[row+i,row+i] = z[0]*secder[row+i] # returns the second derivative, a sparse matrix
return f, Df, H
# for linear inequalities
G = matrix(0.0,(row*2, row+self.d)) # there are two linear constaints for Ei, and for each Ei the entire w
h = matrix(0.0, (row*2, 1))
for i in range(row):
G[i,i] = -1.0 # -Ei <= 0
G[row+i, i] = -1.0
h[row+i] = -1.0
for j in range(self.d):
G[row+i, row+j] = (-Y[i][0]/normfac)*X[i,j] # -Ei - yi/Tk(w.xi) <= -1
# solve and return w
sol = solvers.cpl(c, F, G, h)
self.w = sol['x'][row:]
#print self.w
self.w = array(self.w).reshape((self.d,))
self.w = self.w/sqrt(dot(self.w.T,self.w)) # Normalizing the vector output
#print sol
'''
#####################################################################################################################################
class KernelSVM(KernelLearner):
def __init__(self, d, kernel):
KernelLearner.__init__(self, d, kernel)
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a support vector machine with standard linear kernel.'''
# Infer the sample size from the data
m = len(Y)
K = zeros((m,m))
for i in range(m):
for j in range(m):
K[i,j] = self.kernel(X[i],X[j])
# Set up the appropriate matrices and call CVXOPT's quadratic programming
P = matrix(K * dot(Y, Y.T))
q = matrix(-ones(m))
G = matrix(-identity(m))
h = matrix(zeros(m))
alpha = solvers.qp(P, q, G, h)['x']
#storing the required values in the KernelLearner.support variable
for i in range(m):
temp = alpha[i] * Y[i]
self.support.append([temp, X[i]])
class Kernel_soft_SVM(KernelLearner):
def __init__(self, d, C, kernel):
KernelLearner.__init__(self, d, kernel)
self.C = C
def batch_train(self, X, Y):
'''Given unlabeled training examples (one per row) in matrix X and their
associated (-1, +1) labels (one per row) in vector Y, returns a weight
vector w that determines a separating hyperplane, if one exists, using
a support vector machine with standard linear kernel.'''
# Infer the sample size from the data
m = len(Y)
K = zeros((m,m))
for i in range(m):
for j in range(m):
K[i,j] = self.kernel(X[i],X[j])
# Set up the appropriate matrices and call CVXOPT's quadratic programming
P = matrix(K * dot(Y, Y.T))
q = matrix(-ones(m))
G = matrix(vstack((-identity(m), identity(m))))
h = matrix(hstack((zeros(m), self.C * ones(m))))
alpha = solvers.qp(P, q, G, h)['x']
#storing the required values in the KernelLearner.support variable
for i in range(m):
temp = alpha[i] * Y[i]
self.support.append([temp, X[i]])
class KernelQP(KernelLearner, PassiveSupervisedLearner):
def __init__(self, d, kernel):
KernelLearner.__init__(self, d, kernel)
def train(self, X, Y, normfac):
solvers.options['show_progress'] = False
# Reduce maxiters and tolerance to reasonable levels
solvers.options['maxiters'] = 200
solvers.options['abstol'] = 1e-2
solvers.options['feastol'] = 1e-2
row, col = X.shape
P = matrix(0.0, (row,row))
# Calculating the Kernel Matrix
for i in range(row):
for j in range(row):
P[i,j] = Y[i] * self.kernel(X[i],X[j]) * Y[j] # It's a PSD matrix, so its okay !
# A point in the solution space for objective
x_0 = matrix(0.5, (row, 1))
normarr = matrix(normfac, (1,row))
def F(x = None, z = None):
if x is None:
return (0, x_0) # Alpha's start from 0.5, first value is zero as there are zero non-linear objectives
term = matrix(sqrt(x.T * P * x))
f = matrix(term - normfac * sum(x)) # return the objective function
# first derivative
Df = (x.T * P)/term - normarr # since for each alpha, normfac will be subtracted, norm arr is an array
if z is None:
return f, Df
term2 = matrix((P*x) * (P*x).T)
H = z[0] * (P/term - term2/pow(term,3)) # Second derivative of the objective function, is a symmetric matrix, so no need for spDiag ?
return f, Df, H
# for linear inequalities
G = matrix(0.0, (row*2, row)) # there are two linear constaints for Alpha
h = matrix(0.0, (row*2, 1))
for i in range(row):
G[i,i] = -1.0 # -Alpha <= 0
G[row+i, i] = 1.0 # Alpha <= 1
h[row+i] = 1.0
# solve and return w
sol = solvers.cp(F, G, h)
alpha = sol['x']
for i in range(row):
self.support.append([alpha[i] * Y[i], X[i]])
#print
#print sol
class KernelQPwithLinearBand(KernelLearner, PassiveSupervisedLearner):
def __init__(self, d, kernel):
KernelLearner.__init__(self, d, kernel)
def train(self, X, Y, normfac, radius, prevw):
# the weight vector w is kept as a tuple - alpha_i * y_i and x_i, send only the required number of rows
solvers.options['show_progress'] = False
# Reduce maxiters and tolerance to reasonable levels
solvers.options['maxiters'] = 2000
solvers.options['abstol'] = 1e-2
solvers.options['feastol'] = 1e-2
row, col = X.shape
P = matrix(0.0, (row+1,row+1))
# Calculating the Kernel Matrix
# Kernel matrix will now include multiple kernel matrices
for i in range(row):
for j in range(row):
P[i,j] = Y[i] * self.kernel(X[i],X[j]) * Y[j] # It's a PSD matrix, so its okay !
# Summing over the kernel values between current set of points and prevw
for i in range(row):
P[i,row] = normfac * Y[i] * sum( prevw[k][0] * self.kernel(prevw[k][1], X[i]) for k in range(len(prevw)) )
P[row,i] = P[i,row]
# summing over the kernels value of the entire prevw matrix
P[row, row] = pow(normfac,2) * sum( prevw[k][0] * self.kernel(prevw[k][1], prevw[r][1]) * prevw[r][0] for k,r in itertools.product(range(len(prevw)), range(len(prevw))) )
# A point in the solution space for objective
x_0 = matrix(0.5, (row+1, 1))
normarr = matrix([normfac]*row + [normfac*(1-pow(radius,2)/2)]).T
def F(x = None, z = None):
if x is None:
return (0, x_0) # Alpha's start from 0.5, first value is zero as there are zero non-linear objectives
term = matrix(sqrt(x.T * P * x))
f = matrix(term - normfac * sum(x[0:row]) - x[row] * normfac * (1-pow(radius,2)/2)) # return the objective function
# first derivative
Df = (x.T * P)/term - normarr # since for each alpha, normfac will be subtracted, norm arr is an array
#print "Rank of Df"
#print linalg.matrix_rank(Df)
#print Df.size
#print "Rank of f"
#print linalg.matrix_rank(f)
if z is None:
return f, Df
term2 = matrix((P*x) * (P*x).T)
H = z[0] * (P/term - term2/pow(term,3)) # Second derivative of the objective function, is a symmetric matrix, so no need for spDiag ?
#print "Rank of hessian"
#print linalg.matrix_rank((P/term - term2/pow(term,3)))
#print "Size of hessian"
#print H.size
return f, Df, H
# for linear inequalities
G = matrix(0.0, (row*2 + 1, row +1)) # there are two linear constaints for Alpha, one for Beta
h = matrix(0.0, (row*2 +1, 1))
for i in range(row):
G[i,i] = -1.0 # -Alpha <= 0
G[row+i, i] = 1.0 # Alpha <= 1
h[row+i] = 1.0
G[row*2, row] = -1.0 # -Beta <= 0
#print "Rank of G"
#print linalg.matrix_rank(G)
#print "Rank of hessian"
#print linalg.matrix_rank(h)
# solve and return w
sol = solvers.cp(F, G, h)
#print sol
alpha = sol['x'][0:row]
beta = sol['x'][row]
row_prev = len(prevw)
templist = []
for i in range(row):
templist.append([alpha[i] * Y[i], X[i]])
# Add Beta * Tau_k to the previous support vectors and store in current support vectors
for i in range(row_prev):
templist.append([prevw[i][0] * beta * normfac, prevw[i][1]])
self.support = templist
#print
#print sol['x']
class StochasticDualCoordinateAscent(KernelLearner, ActiveBatchLearner):
def __init__(self, d, C, kernel):
KernelLearner.__init__(self, d, kernel)
self.C = C
def train(self, X, Y):
row, col = X.shape
alpha = zeros((row,1))
w = sum( Y[i]*X[i]*alpha[i] for i in range(row))
iter_local = 200
for k in range(iter_local):
i = random.randint(0, row-1)
G = Y[i] * sum( alpha[j] * Y[j] * self.kernel(X[j], X[i]) for j in range(row)) -1
if alpha[i] == 0:
PG = min(0, G)
elif alpha[i] == self.C:
PG = max(0, G)
else:
PG = G
kernel_temp = self.kernel(X[i], X[i])
if PG != 0:
alpha_temp = alpha[i]
alpha[i] = min(max(alpha[i] - G/kernel_temp, 0), self.C)
w = w + (alpha[i] - alpha_temp) * Y[i] * X[i]
for i in range(row):
self.support.append([Y[i]*alpha[i], X[i]])
| {
"content_hash": "ca88369f5e221d3e4bbd903c4774754d",
"timestamp": "",
"source": "github",
"line_count": 1060,
"max_line_length": 172,
"avg_line_length": 33.34905660377358,
"alnum_prop": 0.557001414427157,
"repo_name": "Kaushikpatnaik/Active-Learning-and-Best-Response-Dynamics",
"id": "8b9dd3ef0e5d2be5927701b3e23ec343cc853e32",
"size": "35350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passive_learners.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "192878"
}
],
"symlink_target": ""
} |
"""Module for the application configuration"""
import os
class Config(object):
"""Common configurations"""
SQLALCHEMY_DATABASE_URI = os.getenv('db_url')
SQLALCHEMY_TRACK_MODIFICATIONS = False
CSRF_ENABLED = True
SECRET_KEY = os.getenv('secret')
SWAGGER_UI_DOC_EXPANSION = 'list'
class DevelopmentConfig(Config):
"""Development configurations"""
DEBUG = True
SQLALCHEMY_ECHO = True
class ProductionConfig(Config):
"""Production configurations"""
DEBUG = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig(Config):
"""Testing configurations"""
TESTING = True
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = os.getenv('test_db') or 'sqlite:///:memory'
app_config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'testing': TestingConfig
}
| {
"content_hash": "fd89e8915564037c9af23a4d1621f82d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 22.871794871794872,
"alnum_prop": 0.6894618834080718,
"repo_name": "machariamarigi/shopping_list_api",
"id": "aca86c34a1b04b008e56a649c2bba72442410a61",
"size": "892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "81069"
}
],
"symlink_target": ""
} |
from base import Task
from common import phases
from common.tools import log_check_call
from bootstrap import Bootstrap
from common.tasks import apt
import volume
class Format(Task):
description = 'Formatting the volume'
phase = phases.volume_preparation
@classmethod
def run(cls, info):
from base.fs.partitions.unformatted import UnformattedPartition
for partition in info.volume.partition_map.partitions:
if not isinstance(partition, UnformattedPartition):
partition.format()
class TuneVolumeFS(Task):
description = 'Tuning the bootstrap volume filesystem'
phase = phases.volume_preparation
predecessors = [Format]
@classmethod
def run(cls, info):
from base.fs.partitions.unformatted import UnformattedPartition
import re
# Disable the time based filesystem check
for partition in info.volume.partition_map.partitions:
if not isinstance(partition, UnformattedPartition):
if re.match('^ext[2-4]$', partition.filesystem) is not None:
log_check_call(['/sbin/tune2fs', '-i', '0', partition.device_path])
class AddXFSProgs(Task):
description = 'Adding `xfsprogs\' to the image packages'
phase = phases.preparation
predecessors = [apt.AddDefaultSources]
@classmethod
def run(cls, info):
info.packages.add('xfsprogs')
class CreateMountDir(Task):
description = 'Creating mountpoint for the root partition'
phase = phases.volume_mounting
@classmethod
def run(cls, info):
import os
info.root = os.path.join(info.workspace, 'root')
os.makedirs(info.root)
class MountRoot(Task):
description = 'Mounting the root partition'
phase = phases.volume_mounting
predecessors = [CreateMountDir]
@classmethod
def run(cls, info):
info.volume.partition_map.root.mount(destination=info.root)
class CreateBootMountDir(Task):
description = 'Creating mountpoint for the boot partition'
phase = phases.volume_mounting
predecessors = [MountRoot]
@classmethod
def run(cls, info):
import os.path
os.makedirs(os.path.join(info.root, 'boot'))
class MountBoot(Task):
description = 'Mounting the boot partition'
phase = phases.volume_mounting
predecessors = [CreateBootMountDir]
@classmethod
def run(cls, info):
p_map = info.volume.partition_map
p_map.root.add_mount(p_map.boot, 'boot')
class MountSpecials(Task):
description = 'Mounting special block devices'
phase = phases.os_installation
predecessors = [Bootstrap]
@classmethod
def run(cls, info):
root = info.volume.partition_map.root
root.add_mount('/dev', 'dev', ['--bind'])
root.add_mount('none', 'proc', ['--types', 'proc'])
root.add_mount('none', 'sys', ['--types', 'sysfs'])
root.add_mount('none', 'dev/pts', ['--types', 'devpts'])
class UnmountRoot(Task):
description = 'Unmounting the bootstrap volume'
phase = phases.volume_unmounting
successors = [volume.Detach]
@classmethod
def run(cls, info):
info.volume.partition_map.root.unmount()
class DeleteMountDir(Task):
description = 'Deleting mountpoint for the bootstrap volume'
phase = phases.volume_unmounting
predecessors = [UnmountRoot]
@classmethod
def run(cls, info):
import os
os.rmdir(info.root)
del info.root
class FStab(Task):
description = 'Adding partitions to the fstab'
phase = phases.system_modification
@classmethod
def run(cls, info):
import os.path
p_map = info.volume.partition_map
mount_points = [{'path': '/',
'partition': p_map.root,
'dump': '1',
'pass_num': '1',
}]
if hasattr(p_map, 'boot'):
mount_points.append({'path': '/boot',
'partition': p_map.boot,
'dump': '1',
'pass_num': '2',
})
if hasattr(p_map, 'swap'):
mount_points.append({'path': 'none',
'partition': p_map.swap,
'dump': '1',
'pass_num': '0',
})
fstab_lines = []
for mount_point in mount_points:
partition = mount_point['partition']
mount_opts = ['defaults']
fstab_lines.append('UUID={uuid} {mountpoint} {filesystem} {mount_opts} {dump} {pass_num}'
.format(uuid=partition.get_uuid(),
mountpoint=mount_point['path'],
filesystem=partition.filesystem,
mount_opts=','.join(mount_opts),
dump=mount_point['dump'],
pass_num=mount_point['pass_num']))
fstab_path = os.path.join(info.root, 'etc/fstab')
with open(fstab_path, 'w') as fstab:
fstab.write('\n'.join(fstab_lines))
fstab.write('\n')
| {
"content_hash": "1f55d6621d6aa194961a69a1695d0d72",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 92,
"avg_line_length": 28.023952095808383,
"alnum_prop": 0.6493589743589744,
"repo_name": "brianspeir/Vanilla",
"id": "4f79af5ce12ab2d2a7850a6c7103001eeb0a6f26",
"size": "4680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/bootstrap-vz/common/tasks/filesystem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import glob
import subprocess as sp
import unittest
import numpy as np
import numpy.testing as npt
# ========================================================================
#
# Function definitions
#
# ========================================================================
def runcode(workdir, deck, codedir, background=True):
"""Run the DG code (in background by default) given the input deck
If background is False, then wait for the process to finish and
give me a return code.
"""
cwd = os.getcwd()
os.chdir(workdir)
# Launch the code
log = open('logfile', "w")
proc = sp.Popen(codedir + '/main.py -d ' + deck,
shell=True, stdout=log, stderr=sp.PIPE)
retcode = 0
# If you don't want to send the process to the background
if (not background):
retcode = proc.wait()
log.flush()
os.chdir(cwd)
return retcode
# ========================================================================
def compare_with_golds(workdir):
"""Compare new solution to the gold results"""
# Test density
dat = np.loadtxt(os.path.join(workdir, 'rho0000000001.dat'), delimiter=',')
gold = np.loadtxt(os.path.join(
workdir, 'rho0000000001.gold'), delimiter=',')
npt.assert_array_almost_equal(dat, gold, decimal=10,
err_msg='Failed on density comparison',
verbose=True)
# Test momentum
dat = np.loadtxt(os.path.join(
workdir, 'rhou0000000001.dat'), delimiter=',')
gold = np.loadtxt(os.path.join(
workdir, 'rhou0000000001.gold'), delimiter=',')
npt.assert_array_almost_equal(dat, gold, decimal=10,
err_msg='Failed on momentum comparison')
# Test energy
dat = np.loadtxt(os.path.join(workdir, 'E0000000001.dat'), delimiter=',')
gold = np.loadtxt(os.path.join(workdir, 'E0000000001.gold'), delimiter=',')
npt.assert_array_almost_equal(dat, gold, decimal=10,
err_msg='Failed on energy comparison')
# ========================================================================
#
# Class definitions
#
# ========================================================================
class RegressionsTestCase(unittest.TestCase):
"""Regression tests for `main.py.`"""
# ========================================================================
# Set up
def setUp(self):
# Problem setup
self.regdir = os.path.dirname(os.path.realpath(__file__))
self.codedir = os.path.join(os.path.dirname(self.regdir), 'dg1d')
# ========================================================================
# Execute a test
def launch(self, workdir):
"""Execute the sequence of command to run a test"""
# Run code
[os.remove(f) for f in glob.glob(os.path.join(workdir, '*.dat'))]
runcode(workdir, 'deck.inp', self.codedir, False)
# Test with gold
compare_with_golds(workdir)
# If test passed, make a plot
self.runplot(workdir, False)
# ========================================================================
# Makes the plots
def runplot(self, workdir, background=True):
"""Run the plot tool on a problem directory
If background is False, then wait for the process to finish and
give me a return code.
"""
# Launch the plot
proc = sp.Popen(self.regdir + '/plot.py -f 1 -d ' + workdir,
shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
retcode = 0
# If you don't want to send the process to the background
if (not background):
retcode = proc.wait()
return retcode
# ========================================================================
# Sod shock tube
def test_sodtube(self):
"""Is the Sod shock tube problem correct?"""
workdir = self.regdir + '/sodtube'
self.launch(workdir)
# ========================================================================
# modified Sod shock tube
def test_modified_sodtube(self):
"""Is the modified Sod shock tube problem correct?"""
workdir = self.regdir + '/sodtube_modified'
self.launch(workdir)
# ========================================================================
# 123 problem
def test_123_problem(self):
"""Is the 123 problem correct?"""
workdir = self.regdir + '/123_problem'
self.launch(workdir)
# ========================================================================
# Left Woodward and Colella (blast wave)
def test_blast_wave(self):
"""Is the blast_wave problem correct?"""
workdir = self.regdir + '/blast_wave'
self.launch(workdir)
# ========================================================================
# collision of two strong shocks
def test_strong_shocks(self):
"""Is the collision of two strong shocks problem correct?"""
workdir = self.regdir + '/strong_shocks'
self.launch(workdir)
# ========================================================================
# stationary contact discontinuity
def test_stationary_contact(self):
"""Is the stationary contact discontinuity problem correct?"""
workdir = self.regdir + '/stationary_contact'
self.launch(workdir)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "4a96c21730949cc772233862c719f3b9",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 34,
"alnum_prop": 0.48602033405954975,
"repo_name": "marchdf/dg1d",
"id": "736de43707c89491b331488f939d41e08a136140",
"size": "5672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regressions/regressions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "204"
},
{
"name": "Python",
"bytes": "241579"
}
],
"symlink_target": ""
} |
from write import write, writebr
import sys
IN_BROWSER = sys.platform in ['mozilla', 'ie6', 'opera', 'oldmoz', 'safari']
IN_JS = sys.platform in ['mozilla', 'ie6', 'opera', 'oldmoz',
'safari', 'spidermonkey', 'pyv8']
if IN_BROWSER:
from pyjamas.Timer import Timer
class UnitTest:
def __init__(self):
self.tests_completed=0
self.tests_failed=0
self.tests_passed=0
self.test_methods=[]
self.test_idx = None
# Synonyms for assertion methods
self.assertEqual = self.assertEquals = self.failUnlessEqual
self.assertNotEqual = self.assertNotEquals = self.failIfEqual
self.assertAlmostEqual = self.assertAlmostEquals = self.failUnlessAlmostEqual
self.assertNotAlmostEqual = self.assertNotAlmostEquals = self.failIfAlmostEqual
self.assertRaises = self.failUnlessRaises
self.assert_ = self.assertTrue = self.failUnless
self.assertFalse = self.failIf
def _run_test(self, test_method_name):
self.getTestMethods()
test_method=getattr(self, test_method_name)
self.current_test_name = test_method_name
self.setUp()
try:
try:
test_method()
except Exception,e:
self.fail("uncaught exception:" + str(e))
except:
self.fail("uncaught javascript exception")
self.tearDown()
self.current_test_name = None
def run(self):
self.getTestMethods()
if not IN_BROWSER:
for test_method_name in self.test_methods:
self._run_test(test_method_name)
self.displayStats()
if hasattr(self, "start_next_test"):
self.start_next_test()
return
self.test_idx = 0
Timer(10, self)
def onTimer(self, timer):
for i in range(1):
if self.test_idx >= len(self.test_methods):
self.displayStats()
self.test_idx = 'DONE'
self.start_next_test()
return
self._run_test(self.test_methods[self.test_idx])
self.test_idx += 1
timer.schedule(10)
def setUp(self):
pass
def tearDown(self):
pass
def getName(self):
return self.__class__.__name__
def getNameFmt(self, msg=""):
if self.getName():
if msg:
msg=" " + str(msg)
if self.current_test_name:
msg += " (%s) " % self.getCurrentTestID()
return self.getName() + msg + ": "
return ""
def getCurrentTestID(self):
return "%s/%i" % (self.current_test_name,self.tests_completed)
def getTestMethods(self):
self.test_methods=[]
for m in dir(self):
if self.isTestMethod(m):
self.test_methods.append(m)
def isTestMethod(self, method):
if callable(getattr(self, method)):
if method.find("test") == 0:
return True
return False
def fail(self, msg=None):
self.startTest()
self.tests_failed+=1
if not msg:
msg="assertion failed"
else:
msg = str(msg)
octothorp = msg.find("#")
has_bugreport = octothorp >= 0 and msg[octothorp+1].isdigit()
if has_bugreport:
name_fmt = "Known issue"
bg_colour="#ffc000"
fg_colour="#000000"
else:
bg_colour="#ff8080"
fg_colour="#000000"
name_fmt = "Test failed"
output="<table style='padding-left:20px;padding-right:20px;' cellpadding=2 width=100%><tr><td bgcolor='" + bg_colour + "'><font color='" + fg_colour + "'>"
write(output)
title="<b>" + self.getNameFmt(name_fmt) + "</b>"
write(title + msg)
output="</font></td></tr></table>"
output+= "\n"
write(output)
if sys.platform in ['mozilla', 'ie6', 'opera', 'oldmoz', 'safari']:
from __pyjamas__ import JS
JS("""if (typeof @{{!console}} != 'undefined') {
if (typeof @{{!console}}['error'] == 'function') @{{!console}}['error'](@{{msg}});
if (typeof @{{!console}}['trace'] == 'function') @{{!console}}['trace']();
}""")
return False
def startTest(self):
self.tests_completed+=1
def failIf(self, expr, msg=None):
self.startTest()
if expr:
return self.fail(msg)
def failUnless(self, expr, msg=None):
self.startTest()
if not expr:
return self.fail(msg)
def failUnlessRaises(self, excClass, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except excClass:
return
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
#raise self.failureException, "%s not raised" % excName
self.fail("%s not raised" % excName)
def failUnlessEqual(self, first, second, msg=None):
self.startTest()
if not first == second:
if not msg:
msg=repr(first) + " != " + repr(second)
return self.fail(msg)
def failIfEqual(self, first, second, msg=None):
self.startTest()
if first == second:
if not msg:
msg=repr(first) + " == " + repr(second)
return self.fail(msg)
def failUnlessAlmostEqual(self, first, second, places=7, msg=None):
self.startTest()
if round(second-first, places) != 0:
if not msg:
msg=repr(first) + " != " + repr(second) + " within " + repr(places) + " places"
return self.fail(msg)
def failIfAlmostEqual(self, first, second, places=7, msg=None):
self.startTest()
if round(second-first, places) is 0:
if not msg:
msg=repr(first) + " == " + repr(second) + " within " + repr(places) + " places"
return self.fail(msg)
# based on the Python standard library
def assertRaises(self, excClass, callableObj, *args, **kwargs):
"""
Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
self.startTest()
try:
callableObj(*args, **kwargs)
except excClass, exc:
return
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
self.fail("%s not raised" % excName)
def displayStats(self):
if self.tests_failed:
bg_colour="#ff0000"
fg_colour="#ffffff"
else:
bg_colour="#00ff00"
fg_colour="#000000"
tests_passed=self.tests_completed - self.tests_failed
if sys.platform in ['mozilla', 'ie6', 'opera', 'oldmoz', 'safari']:
output="<table cellpadding=4 width=100%><tr><td bgcolor='" + bg_colour + "'><font face='arial' size=4 color='" + fg_colour + "'><b>"
else:
output = ""
output+=self.getNameFmt() + "Passed %d " % tests_passed + "/ %d" % self.tests_completed + " tests"
if self.tests_failed:
output+=" (%d failed)" % self.tests_failed
if sys.platform in ['mozilla', 'ie6', 'opera', 'oldmoz', 'safari']:
output+="</b></font></td></tr></table>"
else:
output+= "\n"
write(output)
| {
"content_hash": "8b054f126d65388bd917fcc77a0e0e9e",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 163,
"avg_line_length": 33.49145299145299,
"alnum_prop": 0.5379609544468547,
"repo_name": "lancezlin/pyjs",
"id": "1ec3a4ecad58c06622355192287c8e8c8d5669db",
"size": "7837",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pyjs/lib/test/UnitTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515374"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
} |
import core.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0018_openrole'),
('events', '0024_auto_20200319_1924'),
]
operations = [
migrations.CreateModel(
name='JobListingsEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conference', models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018'), ('pycontw-2019', 'PyCon Taiwan 2019'), ('pycontw-2020', 'PyCon Taiwan 2020')], default='pycontw-2020', verbose_name='conference')),
('location', models.CharField(blank=True, choices=[('2-all', 'All rooms'), ('3-r012', 'R0, R1, R2'), ('4-r0', 'R0'), ('5-r1', 'R1'), ('6-r2', 'R2'), ('1-r3', 'R3'), ('7-r4', 'R4')], db_index=True, max_length=6, null=True, verbose_name='location')),
('begin_time', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='begined_joblistingsevent_set', to='events.Time', verbose_name='begin time')),
('end_time', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ended_joblistingsevent_set', to='events.Time', verbose_name='end time')),
('sponsor', core.models.BigForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sponsors.Sponsor', verbose_name='sponsor')),
],
options={
'verbose_name': 'Job Listings',
'verbose_name_plural': 'Job Listings',
},
),
]
| {
"content_hash": "64af3bdfe12135de70c2914fa46ef8e5",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 305,
"avg_line_length": 61.241379310344826,
"alnum_prop": 0.6103603603603603,
"repo_name": "pycontw/pycontw2016",
"id": "3299df8c235666fe97b45cc77b9be92fe8a1b8ae",
"size": "1825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/events/migrations/0025_joblistingsevent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236762"
},
{
"name": "HTML",
"bytes": "605550"
},
{
"name": "JavaScript",
"bytes": "24923"
},
{
"name": "Python",
"bytes": "479686"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
import os
import time
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
from sqlite3 import dbapi2 as sqlite
Error = sqlite.Error
class DB(object):
def __init__(self, path, text=None, timeout=0):
encpath = path
if isinstance(encpath, unicode):
encpath = path.encode("utf-8")
self._db = sqlite.connect(encpath, timeout=timeout)
if text:
self._db.text_factory = text
self._path = path
self.echo = os.environ.get("DBECHO")
self.mod = False
def execute(self, sql, *a, **ka):
s = sql.strip().lower()
# mark modified?
for stmt in "insert", "update", "delete":
if s.startswith(stmt):
self.mod = True
t = time.time()
if ka:
# execute("...where id = :id", id=5)
res = self._db.execute(sql, ka)
else:
# execute("...where id = ?", 5)
res = self._db.execute(sql, a)
if self.echo:
#print a, ka
print sql, "%0.3fms" % ((time.time() - t)*1000)
if self.echo == "2":
print a, ka
return res
def executemany(self, sql, l):
self.mod = True
t = time.time()
self._db.executemany(sql, l)
if self.echo:
print sql, "%0.3fms" % ((time.time() - t)*1000)
if self.echo == "2":
print l
def commit(self):
t = time.time()
self._db.commit()
if self.echo:
print "commit %0.3fms" % ((time.time() - t)*1000)
def executescript(self, sql):
self.mod = True
if self.echo:
print sql
self._db.executescript(sql)
def rollback(self):
self._db.rollback()
def scalar(self, *a, **kw):
res = self.execute(*a, **kw).fetchone()
if res:
return res[0]
return None
def all(self, *a, **kw):
return self.execute(*a, **kw).fetchall()
def first(self, *a, **kw):
c = self.execute(*a, **kw)
res = c.fetchone()
c.close()
return res
def list(self, *a, **kw):
return [x[0] for x in self.execute(*a, **kw)]
def close(self):
self._db.close()
def set_progress_handler(self, *args):
self._db.set_progress_handler(*args)
def __enter__(self):
self._db.execute("begin")
return self
def __exit__(self, exc_type, *args):
self._db.close()
def totalChanges(self):
return self._db.total_changes
def interrupt(self):
self._db.interrupt()
| {
"content_hash": "8e8edfb5614b036ffd0148eadb72c1dd",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 61,
"avg_line_length": 25.794117647058822,
"alnum_prop": 0.5043709616115546,
"repo_name": "bdunnette/omeka2anki",
"id": "c66813c24e9ce2b537025662c7c5f481c059952d",
"size": "2778",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "anki/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "744646"
}
],
"symlink_target": ""
} |
import sys
import os
import json, requests
import unittest
class Rest_API_tests(unittest.TestCase):
def setUp(self):
r = requests.get('http://169.254.169.254/latest/meta-data/public-ipv4')
self.ip_url = "http://"+r.text #'http://54.173.105.55'
def test_query1(self):
"""
Test Query 1:
Sends back a json with metadata from all the time series
"""
url = self.ip_url+'/timeseries'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
def test_query3(self):
"""
Test Query 3:
Connects to both the metadata Postgres table and the similarity database.
Sends back metadata and the timeseries itself in a JSON payload.
"""
url = self.ip_url+'/timeseries/12'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries/-12'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code==404
url = self.ip_url+'/timeseries/54353'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code==400
url = self.ip_url+'/timeseries/54.3'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code==404
def test_query4(self):
"""
Test Query 4:
Supports timeseries?mean_in=1.5,1.53 type queries sends back only metadata.
For continuous variables only range queries are supported with
string mean_in=1.5,1.53 whereas for discrete variables(level here)
queries such as level_in=A,B,C or level=A are supported.
Only one query at a time is supported.
"""
url = self.ip_url+'/timeseries?mean_in=30,50'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?std_in=3,5'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?level_in=B,C'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?level_in=D'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?blarg_in=3,5'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?id_in=1,5'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?mean_in=-100,-45'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?mean_in=-100.4,-45.2'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/timeseries?mean_in=-100.4--45.2'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code==400
def test_query5(self):
"""
Test Query 5:
Takes in an id querystring and uses it as an id into the
similarity database to find the timeseries that are similar.
Sends back the ids of the top N which is passed in as an argument
(default is 5 closest).
"""
url = self.ip_url+'/simquery?id=5&?topn=8'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/simquery?id=20&topn=200'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
print(r.text)
assert "null" in r.text
url = self.ip_url+'/simquery?id=5'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code<400
url = self.ip_url+'/simquery?id=-10'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code==400
url = self.ip_url+'/simquery?id=2000'
r = requests.get(url)
print("Status",r.status_code,url)
assert r.status_code==400
if __name__=='__main__':
try: # pragma: no cover
unittest.main() # pragma: no cover
except SystemExit as inst: # pragma: no cover
if inst.args[0] is True: # pragma: no cover
raise # pragma: no cover
| {
"content_hash": "a636a0314fea4a86f3f328b0ffa0ce20",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 83,
"avg_line_length": 36.56349206349206,
"alnum_prop": 0.58389407423486,
"repo_name": "slac207/cs207project",
"id": "e5b745736bdc4c73a06c41aedc706eed6373fbb9",
"size": "4620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "APIServer/test_get_requests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1416"
},
{
"name": "HTML",
"bytes": "4380"
},
{
"name": "JavaScript",
"bytes": "135139"
},
{
"name": "Jupyter Notebook",
"bytes": "59899"
},
{
"name": "Python",
"bytes": "274063"
},
{
"name": "Shell",
"bytes": "10378"
}
],
"symlink_target": ""
} |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import os
import sys
import re
from html.parser import HTMLParser
import urllib.parse as urlparse
reHyperlink = re.compile(r'<a(\s+.*?)>', re.I)
reAtt = re.compile(r"""(?:\s+([a-z]+)\s*=\s*("[^"]*"|'[^']?'|[^'"\s]+))+""", re.I)
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
reValidChar = re.compile("^[^\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE\uFFFF]*$")
# silly emacs: '
class FindHyperlinks(HTMLParser):
def __init__(self, baseURL):
HTMLParser.__init__(self)
self.stack = []
self.anchors = set()
self.links = []
self.baseURL = baseURL
self.printed = False
def handle_starttag(self, tag, attrs):
# NOTE: I don't think 'a' should be in here. But try debugging
# NumericRangeQuery.html. (Could be javadocs bug, it's a generic type...)
if tag not in ('link', 'meta', 'frame', 'br', 'wbr', 'hr', 'p', 'li', 'img', 'col', 'a', 'dt', 'dd'):
self.stack.append(tag)
if tag == 'a':
id = None
name = None
href = None
for attName, attValue in attrs:
if attName == 'name':
name = attValue
elif attName == 'href':
href = attValue
elif attName == 'id':
id = attValue
if name is not None:
assert href is None
if name in self.anchors:
if name in ('serializedForm',
'serialized_methods',
'readObject(java.io.ObjectInputStream)',
'writeObject(java.io.ObjectOutputStream)') \
and self.baseURL.endswith('/serialized-form.html'):
# Seems like a bug in Javadoc generation... you can't have
# same anchor name more than once...
pass
else:
self.printFile()
raise RuntimeError('anchor "%s" appears more than once' % name)
else:
self.anchors.add(name)
elif href is not None:
assert name is None
href = href.strip()
self.links.append(urlparse.urljoin(self.baseURL, href))
elif id is None:
raise RuntimeError('couldn\'t find an href nor name in link in %s: only got these attrs: %s' % (self.baseURL, attrs))
def handle_endtag(self, tag):
if tag in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a', 'dt', 'dd'):
return
if len(self.stack) == 0:
raise RuntimeError('%s %s:%s: saw </%s> no opening <%s>' % (self.baseURL, self.getpos()[0], self.getpos()[1], tag, self.stack[-1]))
if self.stack[-1] == tag:
self.stack.pop()
else:
raise RuntimeError('%s %s:%s: saw </%s> but expected </%s>' % (self.baseURL, self.getpos()[0], self.getpos()[1], tag, self.stack[-1]))
def printFile(self):
if not self.printed:
print()
print(' ' + self.baseURL)
self.printed = True
def parse(baseURL, html):
global failures
# look for broken unicode
if not reValidChar.match(html):
print(' WARNING: invalid characters detected in: %s' % baseURL)
failures = True
return [], []
parser = FindHyperlinks(baseURL)
try:
parser.feed(html)
parser.close()
except:
# TODO: Python's html.parser is now always lenient, which is no good for us: we want correct HTML in our javadocs
parser.printFile()
print(' WARNING: failed to parse %s:' % baseURL)
traceback.print_exc(file=sys.stdout)
failures = True
return [], []
#print ' %d links, %d anchors' % \
# (len(parser.links), len(parser.anchors))
return parser.links, parser.anchors
failures = False
def checkAll(dirName):
"""
Checks *.html (recursively) under this directory.
"""
global failures
# Find/parse all HTML files first
print()
print('Crawl/parse...')
allFiles = {}
if os.path.isfile(dirName):
root, fileName = os.path.split(dirName)
iter = ((root, [], [fileName]),)
else:
iter = os.walk(dirName)
for root, dirs, files in iter:
for f in files:
main, ext = os.path.splitext(f)
ext = ext.lower()
# maybe?:
# and main not in ('serialized-form'):
if ext in ('.htm', '.html') and \
not f.startswith('.#') and \
main not in ('deprecated-list',):
# Somehow even w/ java 7 generaged javadocs,
# deprecated-list.html can fail to escape generics types
fullPath = os.path.join(root, f).replace(os.path.sep,'/')
fullPath = 'file:%s' % urlparse.quote(fullPath)
# parse and unparse the URL to "normalize" it
fullPath = urlparse.urlunparse(urlparse.urlparse(fullPath))
#print ' %s' % fullPath
allFiles[fullPath] = parse(fullPath, open('%s/%s' % (root, f), encoding='UTF-8').read())
# ... then verify:
print()
print('Verify...')
for fullPath, (links, anchors) in allFiles.items():
#print fullPath
printed = False
for link in links:
origLink = link
# TODO: use urlparse?
idx = link.find('#')
if idx != -1:
anchor = link[idx+1:]
link = link[:idx]
else:
anchor = None
# remove any whitespace from the middle of the link
link = ''.join(link.split())
idx = link.find('?')
if idx != -1:
link = link[:idx]
# TODO: normalize path sep for windows...
if link.startswith('http://') or link.startswith('https://'):
# don't check external links
if link.find('lucene.apache.org/java/docs/mailinglists.html') != -1:
# OK
pass
elif link == 'http://lucene.apache.org/core/':
# OK
pass
elif re.match("^https?://lucene.apache.org/core/[^/]+/index.html", link):
# OK
pass
elif link == 'http://lucene.apache.org/solr/':
# OK
pass
elif link == 'http://lucene.apache.org/solr/resources.html':
# OK
pass
elif link.find('lucene.apache.org/java/docs/discussion.html') != -1:
# OK
pass
elif link.find('lucene.apache.org/core/discussion.html') != -1:
# OK
pass
elif link.find('lucene.apache.org/solr/mirrors-solr-latest-redir.html') != -1:
# OK
pass
elif link.find('lucene.apache.org/solr/guide/') != -1:
# OK
pass
elif link.find('lucene.apache.org/solr/downloads.html') != -1:
# OK
pass
elif (link.find('svn.apache.org') != -1
or link.find('lucene.apache.org') != -1)\
and os.path.basename(fullPath) != 'Changes.html':
if not printed:
printed = True
print()
print(fullPath)
print(' BAD EXTERNAL LINK: %s' % link)
elif link.startswith('mailto:'):
if link.find('@lucene.apache.org') == -1 and link.find('@apache.org') != -1:
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN MAILTO (?): %s' % link)
elif link.startswith('javascript:'):
# ok...?
pass
elif 'org/apache/solr/client/solrj/beans/Field.html' in link:
# see LUCENE-4011: this is a javadocs bug for constants
# on annotations it seems?
pass
elif link.startswith('file:'):
if link not in allFiles:
filepath = urlparse.unquote(urlparse.urlparse(link).path)
if not (os.path.exists(filepath) or os.path.exists(filepath[1:])):
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN LINK: %s' % link)
elif anchor is not None and anchor not in allFiles[link][1]:
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN ANCHOR: %s' % origLink)
else:
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN URL SCHEME: %s' % origLink)
failures = failures or printed
return failures
if __name__ == '__main__':
if checkAll(sys.argv[1]):
print()
print('Broken javadocs links were found! Common root causes:')
# please feel free to add to this list
print('* A typo of some sort for manually created links.')
print('* Public methods referencing non-public classes in their signature.')
sys.exit(1)
sys.exit(0)
| {
"content_hash": "a076be9ecd5773afa03bd42d5c6db107",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 158,
"avg_line_length": 33.581227436823106,
"alnum_prop": 0.5765426789937648,
"repo_name": "apache/solr",
"id": "768a74195e416409cfae3667450141fba75c3358",
"size": "9302",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "dev-tools/scripts/checkJavadocLinks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "509"
},
{
"name": "Batchfile",
"bytes": "91853"
},
{
"name": "CSS",
"bytes": "234034"
},
{
"name": "Emacs Lisp",
"bytes": "73"
},
{
"name": "HTML",
"bytes": "326277"
},
{
"name": "Handlebars",
"bytes": "7549"
},
{
"name": "Java",
"bytes": "35849436"
},
{
"name": "JavaScript",
"bytes": "17639"
},
{
"name": "Python",
"bytes": "219385"
},
{
"name": "Shell",
"bytes": "279599"
},
{
"name": "XSLT",
"bytes": "35107"
}
],
"symlink_target": ""
} |
"""Functional test for moving_averages.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import moving_averages
from tensorflow.python.training import saver as saver_lib
class MovingAveragesTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAssignMovingAverageWithoutZeroDebias(self):
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
if context.executing_eagerly():
self.assertAllClose([10.0, 11.0], self.evaluate(var))
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
self.assertAllClose(
[10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
self.evaluate(var))
else:
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([10.0, 11.0], self.evaluate(var))
assign.op.run()
self.assertAllClose(
[10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testAssignMovingAverage(self):
var = variables.Variable([0.0, 0.0])
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
if context.executing_eagerly():
self.assertAllClose([0.0, 0.0], self.evaluate(var))
assign = moving_averages.assign_moving_average(var, val, decay)
self.assertAllClose(
[1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)],
self.evaluate(var))
else:
assign = moving_averages.assign_moving_average(var, val, decay)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var))
assign.op.run()
self.assertAllClose(
[1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)],
self.evaluate(var))
@test_util.deprecated_graph_mode_only
def testAssignMovingAverageNewNamingMultipleCalls(self):
with variable_scope.variable_scope("scope1") as vs1:
with variable_scope.variable_scope("scope2"):
var = variables.Variable(1.0, name="Var")
moving_averages.assign_moving_average(var, 0.0, 0.99)
moving_averages.assign_moving_average(var, 0.0, 0.99)
expected_names = ["scope1/scope2/Var:0",
"scope1/scope2/scope1/scope2/Var/biased:0",
"scope1/scope2/scope1/scope2/Var/local_step:0",
"scope1/scope2/scope1/scope2/Var/biased_1:0",
"scope1/scope2/scope1/scope2/Var/local_step_1:0"]
actual_names = [v.name for v in vs1.global_variables()]
self.assertSetEqual(set(expected_names), set(actual_names))
@test_util.deprecated_graph_mode_only
def testAssignMovingAverageNewNamingMultipleCallsWithReuse(self):
with variable_scope.variable_scope("scope1") as vs1:
var = variable_scope.get_variable("Var", shape=[])
moving_averages.assign_moving_average(var, 0.0, 0.99)
moving_averages.assign_moving_average(var, 0.0, 0.99)
with variable_scope.variable_scope(vs1, reuse=True):
var = variable_scope.get_variable("Var", shape=[])
moving_averages.assign_moving_average(var, 0.0, 0.99)
moving_averages.assign_moving_average(var, 0.0, 0.99)
@test_util.deprecated_graph_mode_only
def testWeightedMovingAverage(self):
with self.cached_session() as sess:
decay = 0.5
weight = array_ops.placeholder(dtypes.float32, [])
val = array_ops.placeholder(dtypes.float32, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
self.evaluate(variables.global_variables_initializer())
# Get the first weighted moving average.
val_1 = 3.0
weight_1 = 4.0
wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})
numerator_1 = val_1 * weight_1 * (1.0 - decay)
denominator_1 = weight_1 * (1.0 - decay)
self.assertAllClose(numerator_1 / denominator_1, wma_array)
# Get the second weighted moving average.
val_2 = 11.0
weight_2 = 22.0
wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})
numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)
denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)
self.assertAllClose(numerator_2 / denominator_2, wma_array)
@test_util.deprecated_graph_mode_only
def testWeightedMovingAverageBfloat16(self):
with self.cached_session() as sess:
decay = 0.5
weight = array_ops.placeholder(dtypes.bfloat16, [])
val = array_ops.placeholder(dtypes.bfloat16, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
self.evaluate(variables.global_variables_initializer())
# Get the first weighted moving average.
val_1 = 3.0
weight_1 = 4.0
wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})
numerator_1 = val_1 * weight_1 * (1.0 - decay)
denominator_1 = weight_1 * (1.0 - decay)
self.assertAllClose(numerator_1 / denominator_1, wma_array)
# Get the second weighted moving average.
val_2 = 11.0
weight_2 = 22.0
wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})
numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)
denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)
self.assertAllClose(
dtypes._np_bfloat16(numerator_2 / denominator_2), wma_array)
def _Repeat(value, dim):
if dim == 1:
return value
return [value] * dim
class ExponentialMovingAverageTest(test.TestCase):
def _CheckDecay(self, ema, actual_decay, dim):
def _Scale(dk, steps):
if ema._zero_debias:
return 1 - dk**steps
else:
return 1
tens = _Repeat(10.0, dim)
thirties = _Repeat(30.0, dim)
var0 = variables.Variable(tens, name="v0")
var1 = variables.Variable(thirties, name="v1")
self.evaluate(variables.global_variables_initializer())
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
update = ema.apply([var0, var1, tensor2])
avg0 = ema.average(var0)
avg1 = ema.average(var1)
avg2 = ema.average(tensor2)
self.assertItemsEqual([var0, var1], variables.moving_average_variables())
self.assertNotIn(avg0, variables.trainable_variables())
self.assertNotIn(avg1, variables.trainable_variables())
self.assertNotIn(avg2, variables.trainable_variables())
self.evaluate(variables.global_variables_initializer())
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)
# Check initial values.
self.assertAllClose(tens, self.evaluate(var0))
self.assertAllClose(thirties, self.evaluate(var1))
self.assertAllClose(_Repeat(10.0 + 30.0, dim), self.evaluate(tensor2))
# Check that averages are initialized correctly.
self.assertAllClose(tens, self.evaluate(avg0))
self.assertAllClose(thirties, self.evaluate(avg1))
# Note that averages of Tensor's initialize to zeros_like since no value
# of the Tensor is known because the Op has not been run (yet).
self.assertAllClose(_Repeat(0.0, dim), self.evaluate(avg2))
# Update the averages and check.
self.evaluate(update)
dk = actual_decay
expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
self.assertAllClose(expected, self.evaluate(avg0))
expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
self.assertAllClose(expected, self.evaluate(avg1))
expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk) / _Scale(dk, 1), dim)
self.assertAllClose(expected, self.evaluate(avg2))
# Again, update the averages and check.
self.evaluate(update)
expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
dim)
self.assertAllClose(expected, self.evaluate(avg0))
expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
dim)
self.assertAllClose(expected, self.evaluate(avg1))
expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
(10.0 + 30.0) * (1 - dk)) / _Scale(dk, 2), dim)
self.assertAllClose(expected, self.evaluate(avg2))
@test_util.deprecated_graph_mode_only
def testAverageVariablesNoNumUpdates_Scalar(self):
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNoNumUpdates_Scalar_Debias(self):
ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNoNumUpdates_Vector(self):
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNoNumUpdates_Vector_Debias(self):
ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNumUpdates_Scalar(self):
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNumUpdates_Scalar_Debias(self):
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(
0.25, num_updates=1, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNumUpdates_Vector(self):
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNumUpdates_Vector_Debias(self):
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(
0.25, num_updates=1, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
@test_util.deprecated_graph_mode_only
def testAverageVariablesWithControlDeps(self):
v0 = variables.Variable(0, name="v0")
add_to_v0 = v0.assign_add(1)
v1 = variables.Variable([10.0], name="v1")
assign_to_v1 = v1.assign([20.0])
ema = moving_averages.ExponentialMovingAverage(0.25)
with ops.control_dependencies([add_to_v0]):
ema_op = ema.apply([v1])
# the moving average of v1 should not have any control inputs
v1_avg = ema.average(v1)
self.assertEqual([], v1_avg.initializer.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
# We should be able to initialize v1_avg before v0.
self.evaluate(v1_avg.initializer)
self.evaluate(v0.initializer)
self.assertEqual([10.0], self.evaluate(v1_avg))
# running ema_op should add to v0 (in addition to updating v1_avg)
self.evaluate(assign_to_v1)
self.evaluate(ema_op)
self.assertEqual(1, self.evaluate(v0))
self.assertEqual([17.5], self.evaluate(v1_avg))
def testBasicEager(self):
v0 = variables.Variable(1.0)
v1 = variables.Variable(2.0)
ema = moving_averages.ExponentialMovingAverage(0.25)
op = ema.apply([v0, v1])
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.evaluate(v0.assign(2.0))
self.evaluate(v1.assign(4.0))
self.evaluate(ema.apply([v0, v1]))
self.assertAllEqual(self.evaluate(ema.average(v0)), 1.75)
self.assertAllEqual(self.evaluate(ema.average(v1)), 3.5)
def averageVariablesNamesHelper(self, zero_debias):
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(
0.25, zero_debias=zero_debias, name="foo")
self.assertEqual("foo", ema.name)
self.assertEqual("v0/foo", ema.average_name(v0))
self.assertEqual("v1/foo", ema.average_name(v1))
self.assertEqual("add/foo", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# vars_to_restore should contain the following:
# {v0/foo : v0,
# v1/foo : v1,
# add/foo : add/foo,
# v2 : v2}
expected_names = [
ema.average_name(v0),
ema.average_name(v1),
ema.average_name(tensor2), v2.op.name
]
if zero_debias:
# vars_to_restore should also contain the following:
# {add/foo/biased: add/foo/biased,
# add/foo/local_step: add/foo/local_step}
expected_names += [
ema.average_name(tensor2) + "/biased",
ema.average_name(tensor2) + "/local_step"
]
self.assertEqual(sorted(expected_names), sorted(vars_to_restore.keys()))
self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))
self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))
self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))
@test_util.deprecated_graph_mode_only
def testAverageVariablesNames(self):
self.averageVariablesNamesHelper(zero_debias=True)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNamesNoDebias(self):
self.averageVariablesNamesHelper(zero_debias=False)
@test_util.deprecated_graph_mode_only
def averageVariablesNamesRespectScopeHelper(self, zero_debias):
# See discussion on #2740.
with variable_scope.variable_scope("scope1"):
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
with variable_scope.variable_scope("scope2"):
ema = moving_averages.ExponentialMovingAverage(
0.25, zero_debias=zero_debias, name="foo")
self.assertEqual("scope2/scope1/v0/foo", ema.average_name(v0))
self.assertEqual("scope2/scope1/v1/foo", ema.average_name(v1))
self.assertEqual("scope2/scope1/add/foo", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# `vars_to_restore` should contain the following:
# {scope2/scope1/v0/foo : v0,
# scope2/scope1/v1/foo : v1,
# scope2/scope1/add/foo : add/foo,
# scope1/v2 : v2}
expected_names = [
ema.average_name(v0),
ema.average_name(v1),
ema.average_name(tensor2), v2.op.name
]
if zero_debias:
# `vars_to_restore` should also contain the following:
# {scope2/scope2/scope1/add/foo/biased: add/foo/biased,
# scope2/scope2/scope1/add/foo/local_step: add/foo/local_step}
sc = "scope2/"
expected_names += [
sc + ema.average_name(tensor2) + "/biased",
sc + ema.average_name(tensor2) + "/local_step"
]
self.assertEqual(sorted(expected_names), sorted(vars_to_restore.keys()))
self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))
self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))
self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))
@test_util.deprecated_graph_mode_only
def testAverageVariablesNamesRespectScope(self):
self.averageVariablesNamesRespectScopeHelper(zero_debias=True)
@test_util.deprecated_graph_mode_only
def testAverageVariablesNamesRespectScopeNoDebias(self):
self.averageVariablesNamesRespectScopeHelper(zero_debias=False)
@test_util.deprecated_graph_mode_only
def testSubsetAverageVariablesNames(self):
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("v0/foo_avg", ema.average_name(v0))
self.assertEqual("v1/foo_avg", ema.average_name(v1))
self.assertEqual("add/foo_avg", ema.average_name(tensor2))
vars_to_restore = ema.variables_to_restore([v0, tensor2])
# vars_to_restore should contain the following:
# {v0/foo_avg : v0,
# add/foo_avg : add
# v1 : v1,
# v2 : v2}
self.assertEqual(
sorted(vars_to_restore.keys()),
sorted([
ema.average_name(v0),
ema.average_name(tensor2), v1.op.name, v2.op.name
]))
ema.apply([v0, v1, tensor2])
self.assertEqual(ema.average(v0).op.name, ema.average_name(v0))
self.assertEqual(ema.average(v1).op.name, ema.average_name(v1))
self.assertEqual(ema.average(tensor2).op.name, ema.average_name(tensor2))
@test_util.deprecated_graph_mode_only
def testAverageVariablesDeviceAssignment(self):
with ops.device("/job:dev_v0"):
v0 = variables.Variable(10.0, name="v0")
with ops.device("/job:dev_v1"):
v1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v1",
container="",
shared_name="")
v1.set_shape([1])
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
with ops.device("/job:default"):
ema.apply([v0, v1, tensor2])
self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device)
self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device)
# However, the colocation property is maintained.
self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups())
self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
def _ExportAndImportGraph(self, graph):
"""Export and import graph into a new graph."""
meta_graph = saver_lib.export_meta_graph(
graph=graph, collection_list=graph.get_all_collection_keys())
graph_copy = ops.Graph()
with graph_copy.as_default():
_ = saver_lib.import_meta_graph(meta_graph)
return graph_copy
@test_util.deprecated_graph_mode_only
def testImportedGraphVariablesToRestore(self):
g = ops.Graph()
with g.as_default():
variables.Variable(10.0, name="v")
# Export and import the graph into a new graph.
g_copy = self._ExportAndImportGraph(g)
with g_copy.as_default():
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
vars_to_restore = ema.variables_to_restore()
# There should only be one variable in vars_to_restore. This is important
# to check because when importing from a GraphDef, TF makes duplicate
# python Variable objects referring to the same underlying variable. We
# need to be sure that two variables referring to the same variable don't
# both get added to vars_to_restore.
self.assertEqual(len(vars_to_restore), 1)
self.assertIn("v/foo_avg", vars_to_restore)
@test_util.deprecated_graph_mode_only
def testCopyXlaSharding(self):
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
v = variables.Variable(_Repeat(10.0, 2), name="v")
self.assertIsNone(xla_sharding.get_tensor_sharding(v))
v = xla_sharding.mesh_split(v, np.array([0, 1]), [0], use_sharding_op=False)
self.assertIsNotNone(xla_sharding.get_tensor_sharding(v))
self.evaluate(variables.global_variables_initializer())
ema.apply([v])
avg = ema.average(v)
self.assertEqual(
xla_sharding.get_tensor_sharding(v),
xla_sharding.get_tensor_sharding(avg))
if __name__ == "__main__":
test.main()
| {
"content_hash": "4ecc9525b34261e52f3be0ec5ae0bdbc",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 80,
"avg_line_length": 41.72745490981964,
"alnum_prop": 0.6709249831908558,
"repo_name": "sarvex/tensorflow",
"id": "6e277dd9c9dcfe9b908caf96d1b1a7dbee8e012e",
"size": "21511",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tensorflow/python/training/moving_averages_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
"""Example of decorators."""
from flask import Flask
from flask_apscheduler import APScheduler
class Config:
"""App configuration."""
SCHEDULER_API_ENABLED = True
scheduler = APScheduler()
# interval examples
@scheduler.task("interval", id="do_job_1", seconds=30, misfire_grace_time=900)
def job1():
"""Sample job 1."""
print("Job 1 executed")
# cron examples
@scheduler.task("cron", id="do_job_2", minute="*")
def job2():
"""Sample job 2."""
print("Job 2 executed")
@scheduler.task("cron", id="do_job_3", week="*", day_of_week="sun")
def job3():
"""Sample job 3."""
print("Job 3 executed")
if __name__ == "__main__":
app = Flask(__name__)
app.config.from_object(Config())
# it is also possible to enable the API directly
# scheduler.api_enabled = True # noqa: E800
scheduler.init_app(app)
scheduler.start()
app.run()
| {
"content_hash": "95caee740f3b88d5b48d189500ebd4dc",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 19.88888888888889,
"alnum_prop": 0.6256983240223464,
"repo_name": "viniciuschiele/flask-apscheduler",
"id": "5fb003bde5266f75648704661fbe8d98ca61db52",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/decorated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47195"
}
],
"symlink_target": ""
} |
import datetime
import json
class Range:
def __init__(self, time):
self.startTime = time
self.endTime = time
self.count = 0
def inc(self):
self.count += 1
def label(self):
return self.startTime.strftime("%H:%M") + " - " + self.endTime.strftime("%H:%M")
class External:
KEY_10KM = '10548 m'
KEY_21KM = '21097 m'
KEY_42KM = '42195 m'
def __init__(self):
self.ranges = []
self.results = []
def request(self, key, gender):
file = open('data/2014.json')
x = file.read()
data = json.loads(x)
if gender == 'ALL':
self.results += data['data'][key]['M']
self.results += data['data'][key]['F']
elif gender == 'M' or gender == 'F':
self.results = data['data'][key][gender]
def calculate(self, step, number, hour, minute):
step = step
step_number = number * step
base = datetime.datetime(1900, 1, 1, hour, minute)
for i in range(0, step_number, step):
r = Range(base + datetime.timedelta(minutes=i))
self.ranges.append(r)
for man in self.results:
t = datetime.datetime.strptime(man[9], "%H:%M:%S.%f")
for n in self.ranges:
n.endTime = n.startTime + datetime.timedelta(minutes=step)
if n.startTime < t < n.endTime:
n.inc()
break
def print(self):
for r in self.ranges:
print(r.time)
print(r.count)
| {
"content_hash": "8de5c3ba744a4523d8f172a1fe20b337",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 88,
"avg_line_length": 26.913793103448278,
"alnum_prop": 0.5118513773222293,
"repo_name": "Leprechaunz/chisinau-marathon-2014",
"id": "14fe328a5dcd9e0c7da9b06daa805632b9e61a47",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "1934"
},
{
"name": "JavaScript",
"bytes": "567"
},
{
"name": "Python",
"bytes": "2454"
}
],
"symlink_target": ""
} |
from lxml import etree
from caldav.lib.namespace import nsmap, nsmap2
from caldav.lib.python_utilities import isPython3, to_unicode
class BaseElement(object):
children = None
tag = None
value = None
attributes = None
def __init__(self, name=None, value=None):
self.children = []
self.attributes = {}
value = to_unicode(value)
self.value = None
if name is not None:
self.attributes['name'] = name
if value is not None:
self.value = value
def __add__(self, other):
return self.append(other)
def __str__(self):
utf8 = etree.tostring(self.xmlelement(), encoding="utf-8",
xml_declaration=True, pretty_print=True)
if isPython3():
return str(utf8, 'utf-8')
return utf8
def xmlelement(self):
root = etree.Element(self.tag, nsmap=nsmap2)
if self.value is not None:
root.text = self.value
if len(self.attributes) > 0:
for k in list(self.attributes.keys()):
root.set(k, self.attributes[k])
self.xmlchildren(root)
return root
def xmlchildren(self, root):
for c in self.children:
root.append(c.xmlelement())
def append(self, element):
try:
iter(element)
self.children.extend(element)
except TypeError:
self.children.append(element)
return self
class NamedBaseElement(BaseElement):
def __init__(self, name=None):
super(NamedBaseElement, self).__init__(name=name)
def xmlelement(self):
if self.attributes.get('name') is None:
raise Exception("name attribute must be defined")
return super(NamedBaseElement, self).xmlelement()
class ValuedBaseElement(BaseElement):
def __init__(self, value=None):
super(ValuedBaseElement, self).__init__(value=value)
| {
"content_hash": "c211b10c235c446bb7782007d69227e1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 70,
"avg_line_length": 29.08955223880597,
"alnum_prop": 0.5915854284248332,
"repo_name": "glibin/caldav",
"id": "fa301b1f3f3cf2f5347bd77d9a6d4d672194fceb",
"size": "1998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caldav/elements/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "99408"
}
],
"symlink_target": ""
} |
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
class BoolWidget( GafferUI.Widget ) :
DisplayMode = IECore.Enum.create( "CheckBox", "Switch" )
def __init__( self, text="", checked=False, displayMode=DisplayMode.CheckBox, **kw ) :
GafferUI.Widget.__init__( self, QtGui.QCheckBox( text ), **kw )
self.setState( checked )
self.__stateChangedSignal = GafferUI.WidgetSignal()
self._qtWidget().stateChanged.connect( Gaffer.WeakMethod( self.__stateChanged ) )
if displayMode == self.DisplayMode.Switch :
self._qtWidget().setObjectName( "gafferBoolWidgetSwitch" )
def setText( self, text ) :
self._qtWidget().setText( text )
def getText( self ) :
return str( self._qtWidget().text() )
def setState( self, checked ) :
self._qtWidget().setCheckState( QtCore.Qt.Checked if checked else QtCore.Qt.Unchecked )
def getState( self ) :
return self._qtWidget().checkState() == QtCore.Qt.Checked
def stateChangedSignal( self ) :
return self.__stateChangedSignal
def __stateChanged( self, state ) :
self.__stateChangedSignal( self )
## \todo Backwards compatibility - remove for version 1.0
CheckBox = BoolWidget
| {
"content_hash": "5df48c5af043b1db3526cff012096292",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 89,
"avg_line_length": 24,
"alnum_prop": 0.7050653594771242,
"repo_name": "goddardl/gaffer",
"id": "f7a54daaeca0653dbecd4843986273f5c931c9a5",
"size": "3032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferUI/BoolWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2228"
},
{
"name": "C++",
"bytes": "4178625"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "4152621"
},
{
"name": "Shell",
"bytes": "8787"
},
{
"name": "Slash",
"bytes": "36371"
}
],
"symlink_target": ""
} |
from unittest import main, TestCase
from simhash import Simhash, SimhashIndexWithMongo
class TestSimhash(TestCase):
def test_value(self):
self.assertEqual(Simhash(['aaa', 'bbb']).value, 8637903533912358349)
def test_distance(self):
sh = Simhash('How are you? I AM fine. Thanks. And you?')
sh2 = Simhash('How old are you ? :-) i am fine. Thanks. And you?')
self.assertTrue(sh.distance(sh2) > 0)
sh3 = Simhash(sh2)
self.assertEqual(sh2.distance(sh3), 0)
self.assertNotEqual(Simhash('1').distance(Simhash('2')), 0)
def test_chinese(self):
self.maxDiff = None
sh1 = Simhash(u'你好 世界! 呼噜。')
sh2 = Simhash(u'你好,世界 呼噜')
sh4 = Simhash(u'How are you? I Am fine. ablar ablar xyz blar blar blar blar blar blar blar Thanks.')
sh5 = Simhash(u'How are you i am fine.ablar ablar xyz blar blar blar blar blar blar blar than')
sh6 = Simhash(u'How are you i am fine.ablar ablar xyz blar blar blar blar blar blar blar thank')
self.assertEqual(sh1.distance(sh2), 0)
self.assertTrue(sh4.distance(sh6) < 3)
self.assertTrue(sh5.distance(sh6) < 3)
def test_short(self):
shs = [Simhash(s).value for s in ('aa', 'aaa', 'aaaa', 'aaaab', 'aaaaabb', 'aaaaabbb')]
for i, sh1 in enumerate(shs):
for j, sh2 in enumerate(shs):
if i != j:
self.assertNotEqual(sh1, sh2)
class TestSimhashIndexWithMongo(TestCase):
data = {
1: 'How are you? I Am fine. blar blar blar blar blar Thanks.',
2: 'How are you i am fine. blar blar blar blar blar than',
3: 'This is simhash test.',
4: 'How are you i am fine. blar blar blar blar blar thank1',
}
def setUp(self):
objs = [(str(k), v) for k, v in self.data.items()]
self.index = SimhashIndexWithMongo(objs, k=10)
def test_get_near_dup(self):
s1 = Simhash(u'How are you i am fine.ablar ablar xyz blar blar blar blar blar blar blar thank')
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 3)
self.index.delete('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 2)
self.index.delete('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 2)
self.index.add('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 3)
self.index.add('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 3)
if __name__ == '__main__':
main()
| {
"content_hash": "e4fa379bd605c6459caad0ab9356b749",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 108,
"avg_line_length": 34.25316455696203,
"alnum_prop": 0.6016260162601627,
"repo_name": "likaiguo/simhashpy",
"id": "06f900025fdec92b4fd4288e7deca0a73c489866",
"size": "2768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_simhash_with_mongo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "814964"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import forms
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from .models import Goal
class GoalCloneForm(ModelForm):
class Meta:
model = Goal
exclude = []
copy_tasks = forms.BooleanField(label=_("Copy the Tasks"), initial=True, required=False)
clear_employee = forms.BooleanField(label=_("When copying unset the task's employee"), initial=True, required=False)
| {
"content_hash": "fae9f828a1eb387c4b51dd0b0b6869a0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 120,
"avg_line_length": 32.6,
"alnum_prop": 0.7321063394683026,
"repo_name": "glomium/django-bmf",
"id": "939a5196c9c4d00754c94ba3fa0439e0025ae84c",
"size": "537",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "djangobmf/contrib/task/forms.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11420"
},
{
"name": "CoffeeScript",
"bytes": "3197"
},
{
"name": "HTML",
"bytes": "117091"
},
{
"name": "JavaScript",
"bytes": "80435"
},
{
"name": "Python",
"bytes": "774167"
},
{
"name": "Shell",
"bytes": "736"
}
],
"symlink_target": ""
} |
"""
Django settings for janus project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DJANGO_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_NAME = os.path.basename(DJANGO_DIR)
PROJECT_ROOT = os.path.normpath(os.path.join(DJANGO_DIR, "../../.."))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h)fdma&==g1_0u)ret&r&9#6+joalh)7@c3qwlo_k84k+fxdu)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webpack_loader',
'rest_framework',
'drugtest',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'janus.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'janus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation' +
'.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation' +
'.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation' +
'.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation' +
'.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Manila'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
('dist', os.path.join(PROJECT_ROOT, 'src', 'assets', 'dist')), # dist
)
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/', # must end with slash
'STATS_FILE': os.path.join(PROJECT_ROOT, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
# if DEBUG:
# print "# %s " % WEBPACK_LOADER
if not DEBUG:
WEBPACK_LOADER['DEFAULT'].update({
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(PROJECT_ROOT, 'webpack-stats-prod.json')
})
# Django REST Framework Defaults and Settings:
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES':
('rest_framework.permissions.IsAuthenticated',),
'PAGE_SIZE': 10,
}
| {
"content_hash": "b868362a3c1fd885a5d4da32f357e98d",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 78,
"avg_line_length": 26.53939393939394,
"alnum_prop": 0.6512902489152774,
"repo_name": "HCCB/janus",
"id": "c633e0fdbec3d3a4a6b296d1834b43bff7fd6228",
"size": "4379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/janus/janus/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "178"
},
{
"name": "HTML",
"bytes": "395"
},
{
"name": "JavaScript",
"bytes": "7070"
},
{
"name": "Python",
"bytes": "59509"
}
],
"symlink_target": ""
} |
import mock
from jsl import fields
from jsl._compat import iteritems, with_metaclass
from jsl.document import Document, DocumentMeta, Options
class OptionsStub(Options):
"""An options container that allows storing extra options."""
def __init__(self, a=None, b=None, c=None, d=None, **kwargs):
super(OptionsStub, self).__init__(**kwargs)
self.a = a
self.b = b
self.c = c
self.d = d
def test_collect_fields_and_options():
with mock.patch.object(DocumentMeta, 'options_container', wraps=OptionsStub):
class ParentOne(Document):
a = fields.StringField()
b = fields.IntField()
c = fields.NumberField()
class Options(object):
a = 1
b = 1
c = 1
class ParentTwo(Document):
b = fields.DictField()
class Options:
b = 2
d = 2
bases = (ParentTwo, ParentOne)
attrs = {
'c': fields.BooleanField(),
'd': fields.BooleanField(),
}
fields_dict = DocumentMeta.collect_fields(bases, attrs)
assert fields_dict == {
'a': ParentOne.a,
'b': ParentTwo.b,
'c': attrs['c'],
'd': attrs['d'],
}
options_dict = DocumentMeta.collect_options(bases, attrs)
for expected_key, expected_value in iteritems({
'a': 1,
'b': 2,
'c': 1,
'd': 2
}):
assert options_dict[expected_key] == expected_value
def test_overriding_options_container():
class ParameterOptions(Options):
def __init__(self, repeated=None, location=None, annotations=None, **kwargs):
super(ParameterOptions, self).__init__(**kwargs)
self.repeated = repeated
self.location = location
self.annotations = annotations
class ParameterMeta(DocumentMeta):
options_container = ParameterOptions
class Parameter(with_metaclass(ParameterMeta, Document)):
class Options(object):
repeated = True
location = 'query'
title = 'Parameter'
assert Parameter._options.repeated
assert Parameter._options.location == 'query'
assert Parameter._options.title == 'Parameter'
| {
"content_hash": "cbef5be858fc8caacee731595eecfe56",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 85,
"avg_line_length": 28.375,
"alnum_prop": 0.5753303964757709,
"repo_name": "aromanovich/jsl",
"id": "881d0b1699ff0891c36c8b7df8fb43c177966b70",
"size": "2286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_documentmeta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "156220"
},
{
"name": "Shell",
"bytes": "155"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import asarray
from scipy.linalg import svd
__all__ = ["pca_numpy"]
def pca_numpy(data):
"""Compute the principle components of a set of data points.
Parameters
----------
data : list
A list of `m` observations, measuring `n` variables.
For example, if the data are points in 2D space, the data parameter
should contain `m` nested lists of `2` variables, the `x` and `y`
coordinates.
Returns
-------
tuple
* The ``mean of the data points``.
* The principle directions.
The number of principle directions is equal to the dimensionality of the data.
For example, if the data points are locations in 3D space, three principle components will be returned.
If the data points are locations in 2D space, only two principle components will be returned.
* The *spread* of the data along the principle directions.
Notes
-----
PCA of a dataset finds the directions along which the variance of the data
is largest, i.e. the directions along which the data is most spread out.
Examples
--------
>>>
"""
X = asarray(data)
n, dim = X.shape
assert (
n >= dim
), "The number of observations (n) should be higher than the number of measured variables (dimensions)."
# the average of the observations for each of the variables
# for example, if the data are 2D point coordinates,
# the average is the average of the x-coordinate across all observations
# and the average of the y-coordinate across all observations
mean = (X.sum(axis=0) / n).reshape((-1, dim))
# the spread matrix
# i.e. the variation of each variable compared to the average of the variable
# across all observations
Y = X - mean
# covariance matrix of spread
# note: there is a covariance function in NumPy...
# the shape of the covariance matrix is dim x dim
# for example, if the data are 2D point coordinates, the shape of C is 2 x 2
# the diagonal of the covariance matrix contains the variance of each variable
# the off-diagonal elements of the covariance matrix contain the covariance
# of two independent variables
C = Y.T.dot(Y) / (n - 1)
assert C.shape[0] == dim, "The shape of the covariance matrix is not correct."
# SVD of covariance matrix
u, s, vT = svd(C, full_matrices=False)
# eigenvectors
# ------------
# note: the eigenvectors are normalized
# note: vT is exactly what it says it will be => the transposed eigenvectors
# => take the rows of vT, or the columns of v
# the right-singular vectors of C (the columns of V or the rows of Vt)
# are the eigenvectors of CtC
eigenvectors = vT
# eigenvalues
# -----------
# the nonzero singular values of C are the square roots
# of the nonzero eigenvalues of CtC and CCt
eigenvalues = s
# return
return mean[0], eigenvectors, eigenvalues
| {
"content_hash": "324521b864579f835cdd5048a97ead55",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 113,
"avg_line_length": 34.43333333333333,
"alnum_prop": 0.6592449177153921,
"repo_name": "compas-dev/compas",
"id": "efcf64173e0bcdb0f028411191e2c968a2300f6b",
"size": "3099",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/compas/numerical/pca/pca_numpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3181804"
}
],
"symlink_target": ""
} |
import os
import sys
from rl_coach.base_parameters import TaskParameters, Frameworks
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import tensorflow as tf
from tensorflow import logging
import pytest
logging.set_verbosity(logging.INFO)
@pytest.mark.unit_test
def test_get_QActionStateValue_predictions():
tf.reset_default_graph()
from rl_coach.presets.CartPole_DQN import graph_manager as cartpole_dqn_graph_manager
assert cartpole_dqn_graph_manager
cartpole_dqn_graph_manager.create_graph(task_parameters=
TaskParameters(framework_type=Frameworks.tensorflow,
experiment_path="./experiments/test"))
cartpole_dqn_graph_manager.improve_steps.num_steps = 1
cartpole_dqn_graph_manager.steps_between_evaluation_periods.num_steps = 5
# graph_manager.improve()
#
# agent = graph_manager.level_managers[0].composite_agents['simple_rl_agent'].agents['simple_rl_agent/agent']
# some_state = agent.memory.sample(1)[0].state
# cartpole_dqn_predictions = agent.get_predictions(states=some_state, prediction_type=QActionStateValue)
# assert cartpole_dqn_predictions.shape == (1, 2)
if __name__ == '__main__':
test_get_QActionStateValue_predictions()
| {
"content_hash": "0c205ddc990edfd6c209cf07d48e3d2a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 113,
"avg_line_length": 40.27272727272727,
"alnum_prop": 0.6952595936794582,
"repo_name": "NervanaSystems/coach",
"id": "77f0a89e12de348b14bf66ff7857ebbe9c34ab10",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_coach/tests/agents/test_agent_external_communication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "CSS",
"bytes": "6493"
},
{
"name": "Dockerfile",
"bytes": "1118"
},
{
"name": "HTML",
"bytes": "161"
},
{
"name": "Jupyter Notebook",
"bytes": "91174"
},
{
"name": "Makefile",
"bytes": "5036"
},
{
"name": "Python",
"bytes": "1926733"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
} |
"""
Command line tool.
"""
import sys
from dox.config import init_environment, get_keyfields, get_cfg, write_keyfields, get_keymap, write_keymap, clean_hashes, is_modified, write_hash
from dox.client import ping_library, get_content_keys, get_content_item
from os import walk, getcwd
from dox.uploader import upload_document, find_key, extract_keyfield
import os.path
def init(args):
"""
Initialize the Dox environment.
"""
if not (args.library_key and args.project and args.content_type and args.body_field):
print 'You must specify the library key, project, content type and body field to initialize the environment.'
sys.exit(1)
print 'Dox -----------------------------------------'
print 'Initializing environment with:'
print 'Library Key',args.library_key
print 'Project',args.project
print 'Content Type',args.content_type
print 'Body Field',args.body_field
print 'Key Field',args.key_field
print '---------------------------------------------'
init_environment(args)
print 'Environment initialized.'
print 'Testing environment...'
ping_library()
print 'Connection settings are good.'
def up(args):
"""
Alias for upload.
"""
return upload(args)
def upload(args):
"""
Uploads documents.
"""
print 'Uploading documents...'
keymap = get_keymap()
keyfields = get_keyfields()
for root, dirs, files in walk(getcwd()):
if 'env' in dirs:
dirs.remove('env') # don't walk into environment
for name in files:
if name.endswith('.md'):
path = os.path.join(root,name)
if is_modified(path):
print 'Uploading',name
key = find_key(path,keymap,keyfields)
key, created = upload_document(path,key=key)
write_hash(path)
if created:
print 'Created new content item',key
keymap[path] = key
keyfield = extract_keyfield(path)
print 'assigning key',key,'to keyfields',keyfields,'under keyfield',keyfield
keyfields[keyfield] = key
else:
print name, 'not modified. Skipping.'
write_keyfields(keyfields)
write_keymap(keymap)
def keyfields(args):
"""
Synchronizes the local cache of keyfield / key data.
"""
print 'Synchronizing keyfield cache.'
cfg = get_cfg()
keyfield_name = cfg.get('Connection','key_field')
keyfield_data = {}
keys = get_content_keys()
for key in keys:
content_item = get_content_item(key)
keyfield_data[content_item['data'][keyfield_name]] = key
print 'Mapping',content_item['data'][keyfield_name],'to',key
write_keyfields(keyfield_data)
print 'Keyfield cache synchronized.'
def clean(args):
"""
Cleans out the local hash directory - essentially marking all
the local markdown files as modified.
"""
print 'Cleaning out local file records. All local files eligible for upload.'
clean_hashes()
| {
"content_hash": "5c9bfe6bc50feb146d97bb450fc6ab78",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 145,
"avg_line_length": 33.21875,
"alnum_prop": 0.5920351207275008,
"repo_name": "Axilent/Dox",
"id": "c235f5de5fc95c9e6a5fa37b8be0eb2c9ded2b48",
"size": "3189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dox/commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15144"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/jacket/shared_jacket_s24.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","jacket_s24")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "2b1bc71af9820a6ccb15554b923cff9f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 23.923076923076923,
"alnum_prop": 0.7041800643086816,
"repo_name": "anhstudios/swganh",
"id": "26aebd3782d6e90f5ca135477c56490e2b12d4b4",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/wearables/jacket/shared_jacket_s24.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
'''
Mouse module, porting from pywinauto project
code.google.com/p/pywinauto
'''
import ctypes
from ctypes import \
c_int, c_uint, c_long, c_ulong, c_void_p, c_wchar, c_char, \
c_ubyte, c_ushort, c_wchar_p, \
POINTER, sizeof, alignment, Union
SendInput = ctypes.windll.user32.SendInput
SetCursorPos = ctypes.windll.user32.SetCursorPos
GetSystemMetrics = ctypes.windll.user32.GetSystemMetrics
MOUSEEVENTF_LEFTUP = 4 # Variable c_int
MOUSEEVENTF_RIGHTUP = 16 # Variable c_int
MOUSEEVENTF_MIDDLEUP = 64 # Variable c_int
MOUSEEVENTF_LEFTDOWN = 2 # Variable c_int
MOUSEEVENTF_RIGHTDOWN = 8 # Variable c_int
MOUSEEVENTF_MIDDLEDOWN = 32 # Variable c_int
MOUSEEVENTF_MOVE = 1 # Variable c_int
MOUSEEVENTF_ABSOLUTE = 32768 # Variable c_int
MOUSEEVENTF_WHEEL = 2048 # Variable c_int
SM_SWAPBUTTON = 23 # Variable c_int
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
BOOL = c_int
BYTE = c_ubyte
CHAR = c_char
DWORD = c_ulong
HANDLE = c_void_p
HBITMAP = c_long
LONG = c_long
LPARAM = LONG
LPVOID = c_void_p
PVOID = c_void_p
UINT = c_uint
WCHAR = c_wchar
WORD = c_ushort
WPARAM = UINT
COLORREF = DWORD
HBITMAP = LONG
HINSTANCE = LONG
HMENU = LONG
HBRUSH = LONG
HTREEITEM = LONG
HWND = LONG
LPARAM = LONG
LPBYTE = POINTER(BYTE)
LPWSTR = c_long# POINTER(WCHAR)
class Structure(ctypes.Structure):
"Override the Structure class from ctypes to add printing and comparison"
#----------------------------------------------------------------
def __str__(self):
"""Print out the fields of the ctypes Structure
fields in exceptList will not be printed"""
lines = []
for f in self._fields_:
name = f[0]
lines.append("%20s\t%s"% (name, getattr(self, name)))
return "\n".join(lines)
#----------------------------------------------------------------
def __eq__(self, other_struct):
"return true if the two structures have the same coordinates"
if isinstance(other_struct, ctypes.Structure):
try:
# pretend they are two structures - check that they both
# have the same value for all fields
are_equal = True
for field in self._fields_:
name = field[0]
if getattr(self, name) != getattr(other_struct, name):
are_equal = False
break
return are_equal
except AttributeError:
return False
if isinstance(other_struct, (list, tuple)):
# Now try to see if we have been passed in a list or tuple
try:
are_equal = True
for i, field in enumerate(self._fields_):
name = field[0]
if getattr(self, name) != other_struct[i]:
are_equal = False
break
return are_equal
except:
return False
return False
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283
class MOUSEINPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4283
('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', DWORD),
]
assert sizeof(MOUSEINPUT) == 24, sizeof(MOUSEINPUT)
assert alignment(MOUSEINPUT) == 2, alignment(MOUSEINPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292
class KEYBDINPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4292
('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', DWORD),
]
assert sizeof(KEYBDINPUT) == 16, sizeof(KEYBDINPUT)
assert alignment(KEYBDINPUT) == 2, alignment(KEYBDINPUT)
class HARDWAREINPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4300
('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD),
]
assert sizeof(HARDWAREINPUT) == 8, sizeof(HARDWAREINPUT)
assert alignment(HARDWAREINPUT) == 2, alignment(HARDWAREINPUT)
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314
class UNION_INPUT_STRUCTS(Union):
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4314
('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT),
]
assert sizeof(UNION_INPUT_STRUCTS) == 24, sizeof(UNION_INPUT_STRUCTS)
assert alignment(UNION_INPUT_STRUCTS) == 2, alignment(UNION_INPUT_STRUCTS)
class INPUT(Structure):
_pack_ = 2
_fields_ = [
# C:/PROGRA~1/MICROS~4/VC98/Include/winuser.h 4310
('type', DWORD),
# Unnamed field renamed to '_'
('_', UNION_INPUT_STRUCTS),
]
assert sizeof(INPUT) == 28, sizeof(INPUT)
assert alignment(INPUT) == 2, alignment(INPUT)
def SendMouseInput(
coords,
button = "left",
double = False,
button_down = True,
button_up = True,
wheel_dist = 0):
"""Peform a click action using SendInput
All the *ClickInput() and *MouseInput() methods use this function.
Thanks to a bug report from Tomas Walch (twalch) on sourceforge and code
seen at http://msdn.microsoft.com/en-us/magazine/cc164126.aspx this
function now always works the same way whether the mouse buttons are
swapped or not.
For example if you send a right click to Notepad.Edit - it will always
bring up a popup menu rather than 'clicking' it.
"""
# Handle if the mouse buttons are swapped
if GetSystemMetrics(SM_SWAPBUTTON):
if button.lower() == 'left':
button = 'right'
else:
button = 'left'
events = []
if button.lower() == 'left':
if button_down:
events.append(MOUSEEVENTF_LEFTDOWN)
if button_up:
events.append(MOUSEEVENTF_LEFTUP)
elif button.lower() == 'right':
if button_down:
events.append(MOUSEEVENTF_RIGHTDOWN)
if button_up:
events.append(MOUSEEVENTF_RIGHTUP)
elif button.lower() == 'middle':
if button_down:
events.append(MOUSEEVENTF_MIDDLEDOWN)
if button_up:
events.append(MOUSEEVENTF_MIDDLEUP)
if button.lower() == 'wheel':
events.append(MOUSEEVENTF_WHEEL)
# if we were asked to double click (and we are doing a full click
# not just up or down.
if double and button_down and button_up:
events *= 2
# set the cursor position
SetCursorPos(coords[0], coords[1])
inp_struct = INPUT()
inp_struct.type = INPUT_MOUSE
for event in events:
inp_struct._.mi.dwFlags = event
if button.lower() == 'wheel':
inp_struct._.mi.mouseData = wheel_dist
else:
inp_struct._.mi.mouseData = 0
SendInput(
1,
ctypes.pointer(inp_struct),
ctypes.sizeof(inp_struct))
from AXUI.logger import LOGGER
class Mouse(object):
'''class for win32 mouse operations
Attributes:
LeftClick: left click the UI element, or taget coords
LeftDoubleClick: left double click the UI element, or taget coords
RightClick: right click the UI element, or taget coords
'''
def __init__(self, UIElement):
self.UIElement = UIElement
def __repr__(self):
docstring = '''
Attributes:
'''
docstring += self.LeftClick.__doc__+"\n"
docstring += self.LeftDoubleClick.__doc__+"\n"
docstring += self.RightClick.__doc__+"\n"
docstring += self.Move.__doc__+"\n"
docstring += self.DragDrop.__doc__+"\n"
return docstring
def left_click(self, relative_coords = None):
'''LeftClick: left click the UI element, or taget coords
Arguments:
coords: coordinate indicate where mouse click, default use UI element click point
Returns:
'''
if relative_coords is None:
coords = self.UIElement.get_clickable_point()
else:
coords = [0, 0]
coords[0] = relative_coords[0]+self.UIElement.coordinate[0]
coords[1] = relative_coords[1]+self.UIElement.coordinate[1]
LOGGER.debug("Mouse left click at: %s" , repr(coords))
self.UIElement.set_focus()
SendMouseInput(coords)
def left_double_click(self, relative_coords = None):
'''LeftDoubleClick: left double click the UI element, or taget coords
Arguments:
coords: coordinate indicate where mouse click, default use UI element click point
Returns:
'''
if relative_coords is None:
coords = self.UIElement.get_clickable_point()
else:
coords = [0, 0]
coords[0] = relative_coords[0]+self.UIElement.coordinate[0]
coords[1] = relative_coords[1]+self.UIElement.coordinate[1]
LOGGER.debug("Mouse left double click at: %s" , repr(coords))
self.UIElement.set_focus()
SendMouseInput(coords, double=True)
def right_click(self, relative_coords = None):
'''RightClick: right click the UI element, or taget coords
Arguments:
coords: coordinate indicate where mouse click, default use UI element click point
Returns:
'''
if relative_coords is None:
coords = self.UIElement.get_clickable_point()
else:
coords = [0, 0]
coords[0] = relative_coords[0]+self.UIElement.coordinate[0]
coords[1] = relative_coords[1]+self.UIElement.coordinate[1]
LOGGER.debug("Mouse right click at: %s" , repr(coords))
self.UIElement.set_focus()
SendMouseInput(coords, button="right")
def move(self, abs_source_coords, abs_dest_coords):
'''Move: move mouse from source_coords to dest_coords
mouse move is not related with UI element
so need use abs coords
'''
import time
import random
x_range = abs(abs_source_coords[0] - abs_dest_coords[0])
y_range = abs(abs_source_coords[1] - abs_dest_coords[1])
x_sample_size = x_range/10
y_sample_size = y_range/10
#choose the bigger one
sample_size = x_sample_size > y_sample_size and x_sample_size or y_sample_size
#build population
if abs_source_coords[0] < abs_dest_coords[0]:
x_population = list(range(abs_source_coords[0], abs_dest_coords[0]))
else:
x_population = list(range(abs_dest_coords[0], abs_source_coords[0]))
while len(x_population)<sample_size:
x_population = x_population*2
if abs_source_coords[1] < abs_dest_coords[1]:
y_population = list(range(abs_source_coords[1], abs_dest_coords[1]))
else:
y_population = list(range(abs_dest_coords[1], abs_source_coords[1]))
while len(y_population)<sample_size:
y_population = y_population*2
#get coords
if abs_source_coords[0] < abs_dest_coords[0]:
x_coords = sorted(random.sample(x_population, sample_size))
else:
x_coords = sorted(random.sample(x_population, sample_size), reverse=True)
if abs_source_coords[1] < abs_dest_coords[1]:
y_coords = sorted(random.sample(y_population, sample_size))
else:
y_coords = sorted(random.sample(y_population, sample_size), reverse=True)
#move mouse
LOGGER.debug("Mouse move from: %s to %s" , repr(abs_source_coords), repr(abs_dest_coords))
self.UIElement.set_focus()
for i in range(sample_size):
SendMouseInput([x_coords[i], y_coords[i]], button_down=False, button_up=False)
time.sleep(0.1)
def drag_drop(self, abs_source_coords, abs_dest_coords):
'''Move: move mouse from source_coords to dest_coords
mouse drag drop is not related with UI element
so need use abs coords
'''
LOGGER.debug("Mouse drag drop from: %s to %s" , repr(abs_source_coords), repr(abs_dest_coords))
self.UIElement.set_focus()
SendMouseInput(abs_source_coords, button_down=True, button_up=False)
self.Move(abs_source_coords, abs_dest_coords)
SendMouseInput(abs_dest_coords, button_down=False, button_up=True)
| {
"content_hash": "f9d5321e052173287e3eb72edf7c734a",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 103,
"avg_line_length": 33.413333333333334,
"alnum_prop": 0.5913008778930566,
"repo_name": "xcgspring/AXUI",
"id": "485d27004db146dd39fa9fa9e98365ca6404a4c3",
"size": "12531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AXUI/driver/windows_driver/win32/mouse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "995"
},
{
"name": "C++",
"bytes": "7959"
},
{
"name": "Python",
"bytes": "215829"
}
],
"symlink_target": ""
} |
"""
Exceptions and errors for use in twisted.internet modules.
"""
from __future__ import division, absolute_import
import socket
from twisted.python import deprecate
from twisted.python.versions import Version
class BindError(Exception):
"""An error occurred binding to an interface"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class CannotListenError(BindError):
"""
This gets raised by a call to startListening, when the object cannotstart
listening.
@ivar interface: the interface I tried to listen on
@ivar port: the port I tried to listen on
@ivar socketError: the exception I got when I tried to listen
@type socketError: L{socket.error}
"""
def __init__(self, interface, port, socketError):
BindError.__init__(self, interface, port, socketError)
self.interface = interface
self.port = port
self.socketError = socketError
def __str__(self):
iface = self.interface or 'any'
return "Couldn't listen on %s:%s: %s." % (iface, self.port,
self.socketError)
class MulticastJoinError(Exception):
"""
An attempt to join a multicast group failed.
"""
class MessageLengthError(Exception):
"""Message is too long to send"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class DNSLookupError(IOError):
"""DNS lookup failed"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectInProgressError(Exception):
"""A connect operation was started and isn't done yet."""
# connection errors
class ConnectError(Exception):
"""An error occurred while connecting"""
def __init__(self, osError=None, string=""):
self.osError = osError
Exception.__init__(self, string)
def __str__(self):
s = self.__doc__ or self.__class__.__name__
if self.osError:
s = '%s: %s' % (s, self.osError)
if self.args[0]:
s = '%s: %s' % (s, self.args[0])
s = '%s.' % s
return s
class ConnectBindError(ConnectError):
"""Couldn't bind"""
class UnknownHostError(ConnectError):
"""Hostname couldn't be looked up"""
class NoRouteError(ConnectError):
"""No route to host"""
class ConnectionRefusedError(ConnectError):
"""Connection was refused by other side"""
class TCPTimedOutError(ConnectError):
"""TCP connection timed out"""
class BadFileError(ConnectError):
"""File used for UNIX socket is no good"""
class ServiceNameUnknownError(ConnectError):
"""Service name given as port is unknown"""
class UserError(ConnectError):
"""User aborted connection"""
class TimeoutError(UserError):
"""User timeout caused connection failure"""
class SSLError(ConnectError):
"""An SSL error occurred"""
class VerifyError(Exception):
"""Could not verify something that was supposed to be signed.
"""
class PeerVerifyError(VerifyError):
"""The peer rejected our verify error.
"""
class CertificateError(Exception):
"""
We did not find a certificate where we expected to find one.
"""
try:
import errno
errnoMapping = {
errno.ENETUNREACH: NoRouteError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ETIMEDOUT: TCPTimedOutError,
}
if hasattr(errno, "WSAECONNREFUSED"):
errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
errnoMapping[errno.WSAENETUNREACH] = NoRouteError
except ImportError:
errnoMapping = {}
def getConnectError(e):
"""Given a socket exception, return connection error."""
if isinstance(e, Exception):
args = e.args
else:
args = e
try:
number, string = args
except ValueError:
return ConnectError(string=e)
if hasattr(socket, 'gaierror') and isinstance(e, socket.gaierror):
# Only works in 2.2 in newer. Really that means always; #5978 covers
# this and other wierdnesses in this function.
klass = UnknownHostError
else:
klass = errnoMapping.get(number, ConnectError)
return klass(number, string)
class ConnectionClosed(Exception):
"""
Connection was closed, whether cleanly or non-cleanly.
"""
class ConnectionLost(ConnectionClosed):
"""Connection to the other side was lost in a non-clean fashion"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ConnectionAborted(ConnectionLost):
"""
Connection was aborted locally, using
L{twisted.internet.interfaces.ITCPTransport.abortConnection}.
@since: 11.1
"""
class ConnectionDone(ConnectionClosed):
"""Connection was closed cleanly"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class FileDescriptorOverrun(ConnectionLost):
"""
A mis-use of L{IUNIXTransport.sendFileDescriptor} caused the connection to
be closed.
Each file descriptor sent using C{sendFileDescriptor} must be associated
with at least one byte sent using L{ITransport.write}. If at any point
fewer bytes have been written than file descriptors have been sent, the
connection is closed with this exception.
"""
class ConnectionFdescWentAway(ConnectionLost):
"""Uh""" #TODO
class AlreadyCalled(ValueError):
"""Tried to cancel an already-called event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class AlreadyCancelled(ValueError):
"""Tried to cancel an already-cancelled event"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class PotentialZombieWarning(Warning):
"""
Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
result in termination of the created child process not being reported.
Deprecated in Twisted 10.0.
"""
MESSAGE = (
"spawnProcess called, but the SIGCHLD handler is not "
"installed. This probably means you have not yet "
"called reactor.run, or called "
"reactor.run(installSignalHandler=0). You will probably "
"never see this process finish, and it may become a "
"zombie process.")
deprecate.deprecatedModuleAttribute(
Version("Twisted", 10, 0, 0),
"There is no longer any potential for zombie process.",
__name__,
"PotentialZombieWarning")
class ProcessDone(ConnectionDone):
"""A process has ended without apparent errors"""
def __init__(self, status):
Exception.__init__(self, "process finished with exit code 0")
self.exitCode = 0
self.signal = None
self.status = status
class ProcessTerminated(ConnectionLost):
"""A process has ended with a probable error condition"""
def __init__(self, exitCode=None, signal=None, status=None):
self.exitCode = exitCode
self.signal = signal
self.status = status
s = "process ended"
if exitCode is not None: s = s + " with exit code %s" % exitCode
if signal is not None: s = s + " by signal %s" % signal
Exception.__init__(self, s)
class ProcessExitedAlready(Exception):
"""
The process has already exited and the operation requested can no longer
be performed.
"""
class NotConnectingError(RuntimeError):
"""The Connector was not connecting when it was asked to stop connecting"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class NotListeningError(RuntimeError):
"""The Port was not listening when it was asked to stop listening"""
def __str__(self):
s = self.__doc__
if self.args:
s = '%s: %s' % (s, ' '.join(self.args))
s = '%s.' % s
return s
class ReactorNotRunning(RuntimeError):
"""
Error raised when trying to stop a reactor which is not running.
"""
class ReactorNotRestartable(RuntimeError):
"""
Error raised when trying to run a reactor which was stopped.
"""
class ReactorAlreadyRunning(RuntimeError):
"""
Error raised when trying to start the reactor multiple times.
"""
class ReactorAlreadyInstalledError(AssertionError):
"""
Could not install reactor because one is already installed.
"""
class ConnectingCancelledError(Exception):
"""
An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
cancelled before it connects.
@ivar address: The L{IAddress} that is the destination of the
cancelled L{IStreamClientEndpoint}.
"""
def __init__(self, address):
"""
@param address: The L{IAddress} that is the destination of the
L{IStreamClientEndpoint} that was cancelled.
"""
Exception.__init__(self, address)
self.address = address
class UnsupportedAddressFamily(Exception):
"""
An attempt was made to use a socket with an address family (eg I{AF_INET},
I{AF_INET6}, etc) which is not supported by the reactor.
"""
class UnsupportedSocketType(Exception):
"""
An attempt was made to use a socket of a type (eg I{SOCK_STREAM},
I{SOCK_DGRAM}, etc) which is not supported by the reactor.
"""
class AlreadyListened(Exception):
"""
An attempt was made to listen on a file descriptor which can only be
listened on once.
"""
__all__ = [
'BindError', 'CannotListenError', 'MulticastJoinError',
'MessageLengthError', 'DNSLookupError', 'ConnectInProgressError',
'ConnectError', 'ConnectBindError', 'UnknownHostError', 'NoRouteError',
'ConnectionRefusedError', 'TCPTimedOutError', 'BadFileError',
'ServiceNameUnknownError', 'UserError', 'TimeoutError', 'SSLError',
'VerifyError', 'PeerVerifyError', 'CertificateError',
'getConnectError', 'ConnectionClosed', 'ConnectionLost',
'ConnectionDone', 'ConnectionFdescWentAway', 'AlreadyCalled',
'AlreadyCancelled', 'PotentialZombieWarning', 'ProcessDone',
'ProcessTerminated', 'ProcessExitedAlready', 'NotConnectingError',
'NotListeningError', 'ReactorNotRunning', 'ReactorAlreadyRunning',
'ReactorAlreadyInstalledError', 'ConnectingCancelledError',
'UnsupportedAddressFamily', 'UnsupportedSocketType']
| {
"content_hash": "8ef10c1465995c99a265806e57a4745f",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 79,
"avg_line_length": 24.29646017699115,
"alnum_prop": 0.6277545073757057,
"repo_name": "biddisco/VTK",
"id": "687988636125cc27daf966f248ab9548b2aaf41d",
"size": "11055",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ThirdParty/Twisted/twisted/internet/error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "C",
"bytes": "45542302"
},
{
"name": "C++",
"bytes": "60467840"
},
{
"name": "CSS",
"bytes": "157961"
},
{
"name": "Cuda",
"bytes": "28721"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "IDL",
"bytes": "4406"
},
{
"name": "Java",
"bytes": "184678"
},
{
"name": "JavaScript",
"bytes": "978324"
},
{
"name": "Objective-C",
"bytes": "121232"
},
{
"name": "Objective-C++",
"bytes": "101052"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "177007"
},
{
"name": "Python",
"bytes": "13262355"
},
{
"name": "Shell",
"bytes": "41929"
},
{
"name": "Tcl",
"bytes": "1894036"
}
],
"symlink_target": ""
} |
"""
This module provides code for various distance measures.
Functions:
euclidean Euclidean distance between two points
euclidean_py Pure Python implementation of euclidean.
"""
# XXX cosine distance
import math
try:
from Numeric import *
except ImportError, x:
raise ImportError, "This module requires Numeric (precursor to NumPy)"
from Bio import listfns
def euclidean(x, y):
"""euclidean(x, y) -> euclidean distance between x and y"""
if len(x) != len(y):
raise ValueError, "vectors must be same length"
#return sqrt(sum((x-y)**2))
# Optimization by John Corradi ([email protected])
d = x-y
return sqrt(dot(d, d))
def euclidean_py(x, y):
"""euclidean_py(x, y) -> euclidean distance between x and y"""
# lightly modified from implementation by Thomas Sicheritz-Ponten.
# This works faster than the Numeric implementation on shorter
# vectors.
if len(x) != len(y):
raise ValueError, "vectors must be same length"
sum = 0
for i in range(len(x)):
sum += (x[i]-y[i])**2
return math.sqrt(sum)
# Try and load C implementations of functions. If I can't,
# then just ignore and use the pure python implementations.
try:
from cdistance import *
except ImportError:
pass
| {
"content_hash": "3c6c92f9c0b7dd1eef91e61feb1e556e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 28.954545454545453,
"alnum_prop": 0.6766091051805337,
"repo_name": "dbmi-pitt/DIKB-Micropublication",
"id": "c1f62bbcce201e2a6d4a3ed852a20ff0939197bc",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mp-scripts/Bio/distance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3682138"
},
{
"name": "R",
"bytes": "4656"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
} |
class Queue(object):
""" Implement a queue data structure, ie. FIFO.
Attrs:
head: head of the queue.
tail: tail of the queue.
"""
def __init__(self):
self.head = None
self.tail = None
def enqueue(self, value):
node = {'value': value, 'prev': None}
if self.head == None:
self.head = self.tail = node
else:
self.head['prev'] = node
self.head = node
def dequeue(self):
if self.tail == None:
return None
value = self.tail['value']
self.tail = self.tail['prev']
if self.tail == None:
self.head = None
return value
| {
"content_hash": "cd26da5aabe8bde3c0a0aebff13b4ce7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 51,
"avg_line_length": 23.93103448275862,
"alnum_prop": 0.49855907780979825,
"repo_name": "topliceanu/learn",
"id": "5c7c9f94024336ef5619bbc1045a474b36245645",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/algo/src/queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2259"
},
{
"name": "C",
"bytes": "50301"
},
{
"name": "CSS",
"bytes": "2376"
},
{
"name": "Clojure",
"bytes": "40105"
},
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "Dockerfile",
"bytes": "557"
},
{
"name": "Elm",
"bytes": "35974"
},
{
"name": "Forth",
"bytes": "128"
},
{
"name": "Go",
"bytes": "277187"
},
{
"name": "HTML",
"bytes": "750204"
},
{
"name": "Haskell",
"bytes": "57709"
},
{
"name": "JavaScript",
"bytes": "3327208"
},
{
"name": "Makefile",
"bytes": "3280"
},
{
"name": "OCaml",
"bytes": "157277"
},
{
"name": "PowerShell",
"bytes": "3022"
},
{
"name": "Procfile",
"bytes": "230"
},
{
"name": "Pug",
"bytes": "846"
},
{
"name": "Python",
"bytes": "1383229"
},
{
"name": "Racket",
"bytes": "7552"
},
{
"name": "Reason",
"bytes": "43"
},
{
"name": "Roff",
"bytes": "116"
},
{
"name": "Ruby",
"bytes": "134845"
},
{
"name": "Rust",
"bytes": "146828"
},
{
"name": "Shell",
"bytes": "9006"
},
{
"name": "Solidity",
"bytes": "1347"
},
{
"name": "TypeScript",
"bytes": "254"
},
{
"name": "Vue",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from selenium import webdriver
from django.test import Client
from myblog.models import Post, Category
import datetime
client = Client()
import unittest
class Setup_Teardown(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
self.verificationErrors = []
def tearDown(self):
self.browser.quit()
self.assertEqual([], self.verificationErrors)
class Visitor_Test(Setup_Teardown):
def test_for_visitor(self):
response = client.post('/login/?visitor=false', {'name': 'fred', 'passwd': 'secret'})
try:
self.assertEqual(response.status_code, 200)
except AssertionError as e:
self.verificationErrors.append('test_for_visitor: '+str(e))
if __name__ == '__main__':
unittest.main(warnings='ignore')
| {
"content_hash": "15487dabf42db866d3993d5f798691f2",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 93,
"avg_line_length": 25.87878787878788,
"alnum_prop": 0.6604215456674473,
"repo_name": "robalford/reConstruct",
"id": "9c1862caf848cf6fdd6124fdd03f7bae709071da",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/_test_temp_login.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7751"
},
{
"name": "CSS",
"bytes": "24416"
},
{
"name": "HTML",
"bytes": "27813"
},
{
"name": "JavaScript",
"bytes": "88031"
},
{
"name": "Makefile",
"bytes": "8093"
},
{
"name": "Python",
"bytes": "162582"
}
],
"symlink_target": ""
} |
"""
`cloud_monitoring_mql.py`
Cloud Monitoring backend implementation with MQL (Monitoring Query Language).
"""
import logging
import pprint
import typing
import warnings
from collections import OrderedDict
from datetime import datetime
from typing import List, Optional, Tuple
from google.api.distribution_pb2 import Distribution
from google.cloud.monitoring_v3 import QueryTimeSeriesRequest
from google.cloud.monitoring_v3.services.query_service import QueryServiceClient
from google.cloud.monitoring_v3.services.query_service.pagers import (
QueryTimeSeriesPager,
)
from google.cloud.monitoring_v3.types.metric import TimeSeries
from slo_generator.constants import NO_DATA
LOGGER = logging.getLogger(__name__)
class CloudMonitoringMqlBackend:
"""Backend for querying metrics from Cloud Monitoring with MQL.
Args:
project_id (str): Cloud Monitoring host project id.
client (google.cloud.monitoring_v3.QueryServiceClient, optional):
Existing Cloud Monitoring Query client. Initialize a new client
if omitted.
"""
def __init__(self, project_id: str, client=None):
self.client = client
if client is None:
self.client = QueryServiceClient()
self.parent = self.client.common_project_path( # type: ignore[union-attr]
project_id
)
def good_bad_ratio(
self,
timestamp: int,
window: int,
slo_config: dict,
) -> Tuple[int, int]:
"""Query two timeseries, one containing 'good' events, one containing
'bad' events.
Args:
timestamp (int): UNIX timestamp.
window (int): Window size (in seconds).
slo_config (dict): SLO configuration.
Returns:
tuple: A tuple (good_event_count, bad_event_count)
"""
measurement: dict = slo_config["spec"]["service_level_indicator"]
filter_good: str = measurement["filter_good"]
filter_bad: Optional[str] = measurement.get("filter_bad")
filter_valid: Optional[str] = measurement.get("filter_valid")
# Query 'good events' timeseries
good_ts: List[TimeSeries] = self.query(timestamp, window, filter_good)
good_event_count: int = CM.count(good_ts)
# Query 'bad events' timeseries
bad_event_count: int
if filter_bad:
bad_ts: List[TimeSeries] = self.query(timestamp, window, filter_bad)
bad_event_count = CM.count(bad_ts)
elif filter_valid:
valid_ts: List[TimeSeries] = self.query(timestamp, window, filter_valid)
bad_event_count = CM.count(valid_ts) - good_event_count
else:
raise Exception("One of `filter_bad` or `filter_valid` is required.")
LOGGER.debug(
f"Good events: {good_event_count} | " f"Bad events: {bad_event_count}"
)
return good_event_count, bad_event_count
# pylint: disable=too-many-locals,disable=unused-argument
def distribution_cut(
self,
timestamp: int,
window: int,
slo_config: dict,
) -> Tuple[int, int]:
"""Query one timeseries of type 'exponential'.
Args:
timestamp (int): UNIX timestamp.
window (int): Window size (in seconds).
slo_config (dict): SLO configuration.
Returns:
tuple: A tuple (good_event_count, bad_event_count).
"""
measurement: dict = slo_config["spec"]["service_level_indicator"]
filter_valid: str = measurement["filter_valid"]
threshold_bucket: int = int(measurement["threshold_bucket"])
good_below_threshold: typing.Optional[bool] = measurement.get(
"good_below_threshold", True
)
# Query 'valid' events
series = self.query(timestamp, window, filter_valid)
if not series:
return NO_DATA, NO_DATA # no timeseries
distribution_value: Distribution = (
series[0].point_data[0].values[0].distribution_value
)
bucket_counts: list = distribution_value.bucket_counts
valid_events_count: int = distribution_value.count
# Explicit the exponential distribution result
count_sum: int = 0
distribution = OrderedDict()
for i, bucket_count in enumerate(bucket_counts):
count_sum += bucket_count
distribution[i] = {"count_sum": count_sum}
LOGGER.debug(pprint.pformat(distribution))
lower_events_count: int
upper_events_count: int
if len(distribution) - 1 < threshold_bucket:
# maximum measured metric is below the cut after bucket number
lower_events_count = valid_events_count
upper_events_count = 0
else:
lower_events_count = distribution[threshold_bucket]["count_sum"]
upper_events_count = valid_events_count - lower_events_count
good_event_count: int
bad_event_count: int
if good_below_threshold:
good_event_count = lower_events_count
bad_event_count = upper_events_count
else:
good_event_count = upper_events_count
bad_event_count = lower_events_count
return good_event_count, bad_event_count
def exponential_distribution_cut(self, *args, **kwargs) -> Tuple[int, int]:
"""Alias for `distribution_cut` method to allow for backwards
compatibility.
"""
warnings.warn(
"exponential_distribution_cut will be deprecated in version 2.0, "
"please use distribution_cut instead",
FutureWarning,
)
return self.distribution_cut(*args, **kwargs)
def query_sli(
self,
timestamp: int, # pylint: disable=unused-argument
window: int,
slo_config: dict,
) -> float:
"""Query SLI value from a given MQL query.
Args:
timestamp (int): UNIX timestamp.
window (int): Window (in seconds).
slo_config (dict): SLO configuration.
Returns:
float: SLI value.
"""
measurement: dict = slo_config["spec"]["service_level_indicator"]
query: str = measurement["query"]
series: List[TimeSeries] = self.query(timestamp, window, query)
sli_value: float = series[0].point_data[0].values[0].double_value
LOGGER.debug(f"SLI value: {sli_value}")
return sli_value
def query(self, timestamp: float, window: int, query: str) -> List[TimeSeries]:
"""Query timeseries from Cloud Monitoring using MQL.
Args:
timestamp (float): Current timestamp.
window (int): Window size (in seconds).
query (str): MQL query.
Returns:
list: List of timeseries objects.
"""
# Enrich query to aggregate and reduce time series over target window.
query_with_time_horizon_and_period: str = (
self.enrich_query_with_time_horizon_and_period(timestamp, window, query)
)
request = QueryTimeSeriesRequest(
{"name": self.parent, "query": query_with_time_horizon_and_period}
)
# fmt: off
timeseries_pager: QueryTimeSeriesPager = (
self.client.query_time_series(request) # type: ignore[union-attr]
)
# fmt: on
timeseries: List[TimeSeries] = list(timeseries_pager)
LOGGER.debug(pprint.pformat(timeseries))
return timeseries
@staticmethod
def enrich_query_with_time_horizon_and_period(
timestamp: float,
window: int,
query: str,
) -> str:
"""Enrich MQL query with time period and horizon.
Args:
timestamp (float): UNIX timestamp.
window (int): Query window (in seconds).
query (str): Base query in YAML config.
Returns:
str: Enriched query.
"""
# Python uses floating point numbers to represent time in seconds since the
# epoch, in UTC, with decimal part representing nanoseconds.
# MQL expects dates formatted like "%Y/%m/%d %H:%M:%S" or "%Y/%m/%d-%H:%M:%S".
# Reference: https://cloud.google.com/monitoring/mql/reference#lexical-elements
end_time_str: str = datetime.fromtimestamp(timestamp).strftime(
"%Y/%m/%d %H:%M:%S"
)
query_with_time_horizon_and_period: str = (
query
+ f"| group_by [] | within {window}s, d'{end_time_str}' | every {window}s"
)
return query_with_time_horizon_and_period
@staticmethod
def count(timeseries: List[TimeSeries]) -> int:
"""Count events in time series assuming it was aligned with ALIGN_SUM
and reduced with REDUCE_SUM (default).
Args:
:obj:`monitoring_v3.TimeSeries`: Timeseries object.
Returns:
int: Event count.
"""
try:
return timeseries[0].point_data[0].values[0].int64_value
except (IndexError, AttributeError) as exception:
LOGGER.debug(exception, exc_info=True)
return NO_DATA # no events in timeseries
CM = CloudMonitoringMqlBackend
| {
"content_hash": "445987f061a3b65149aa99e858cafb0c",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 87,
"avg_line_length": 35.98054474708171,
"alnum_prop": 0.6109008327024981,
"repo_name": "google/slo-generator",
"id": "419e20fb8576d9ec5b84e0fcf655da5064f043a7",
"size": "9829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slo_generator/backends/cloud_monitoring_mql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "959"
},
{
"name": "Makefile",
"bytes": "4242"
},
{
"name": "Python",
"bytes": "224730"
}
],
"symlink_target": ""
} |
"""Test making many calls and immediately cancelling most of them."""
import threading
import unittest
from grpc._cython import cygrpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
from tests.unit._cython import test_utilities
_EMPTY_FLAGS = 0
_EMPTY_METADATA = ()
_SERVER_SHUTDOWN_TAG = 'server_shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
_RECEIVE_MESSAGE_TAG = 'receive_message'
_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
_SUCCESS_CALL_FRACTION = 1.0 / 8.0
_SUCCESSFUL_CALLS = int(test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
_UNSUCCESSFUL_CALLS = test_constants.RPC_CONCURRENCY - _SUCCESSFUL_CALLS
class _State(object):
def __init__(self):
self.condition = threading.Condition()
self.handlers_released = False
self.parked_handlers = 0
self.handled_rpcs = 0
def _is_cancellation_event(event):
return (event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
event.batch_operations[0].cancelled())
class _Handler(object):
def __init__(self, state, completion_queue, rpc_event):
self._state = state
self._lock = threading.Lock()
self._completion_queue = completion_queue
self._call = rpc_event.call
def __call__(self):
with self._state.condition:
self._state.parked_handlers += 1
if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
self._state.condition.notify_all()
while not self._state.handlers_released:
self._state.condition.wait()
with self._lock:
self._call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_RECEIVE_CLOSE_ON_SERVER_TAG)
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_RECEIVE_MESSAGE_TAG)
first_event = self._completion_queue.poll()
if _is_cancellation_event(first_event):
self._completion_queue.poll()
else:
with self._lock:
operations = (
cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x79\x57', _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
_EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
_EMPTY_FLAGS),
)
self._call.start_server_batch(operations,
_SERVER_COMPLETE_CALL_TAG)
self._completion_queue.poll()
self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
for _ in range(test_constants.RPC_CONCURRENCY):
call_completion_queue = cygrpc.CompletionQueue()
server.request_call(call_completion_queue, server_completion_queue,
_REQUEST_CALL_TAG)
rpc_event = server_completion_queue.poll()
thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
with state.condition:
state.handled_rpcs += 1
if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
state.condition.notify_all()
server_completion_queue.poll()
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def events(self, at_least):
with self._condition:
while len(self._events) < at_least:
self._condition.wait()
return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
def testCancelManyCalls(self):
server_thread_pool = logging_pool.pool(
test_constants.THREAD_CONCURRENCY)
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server([
(
b'grpc.so_reuseport',
0,
),
])
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(), None,
None)
state = _State()
server_thread_args = (
state,
server,
server_completion_queue,
server_thread_pool,
)
server_thread = threading.Thread(target=_serve, args=server_thread_args)
server_thread.start()
client_condition = threading.Condition()
client_due = set()
with client_condition:
client_calls = []
for index in range(test_constants.RPC_CONCURRENCY):
tag = 'client_complete_call_{0:04d}_tag'.format(index)
client_call = channel.integrated_call(
_EMPTY_FLAGS, b'/twinkies', None, None, _EMPTY_METADATA,
None, ((
(
cygrpc.SendInitialMetadataOperation(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x45\x56',
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(
_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
tag,
),))
client_due.add(tag)
client_calls.append(client_call)
client_events_future = test_utilities.SimpleFuture(lambda: tuple(
channel.next_call_event() for _ in range(_SUCCESSFUL_CALLS)))
with state.condition:
while True:
if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
state.condition.wait()
elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
state.condition.wait()
else:
state.handlers_released = True
state.condition.notify_all()
break
client_events_future.result()
with client_condition:
for client_call in client_calls:
client_call.cancel(cygrpc.StatusCode.cancelled, 'Cancelled!')
for _ in range(_UNSUCCESSFUL_CALLS):
channel.next_call_event()
channel.close(cygrpc.StatusCode.unknown, 'Cancelled on channel close!')
with state.condition:
server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "ac7bf1ab5ccf103de701c8d7e58f8ee9",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 80,
"avg_line_length": 36.642857142857146,
"alnum_prop": 0.5578947368421052,
"repo_name": "jboeuf/grpc",
"id": "b279f3d07c53aacd8e501647f85c73c822db5f89",
"size": "8272",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34391"
},
{
"name": "C",
"bytes": "2506485"
},
{
"name": "C#",
"bytes": "2056447"
},
{
"name": "C++",
"bytes": "31737951"
},
{
"name": "CMake",
"bytes": "678007"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "157798"
},
{
"name": "Go",
"bytes": "34791"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "61459"
},
{
"name": "M4",
"bytes": "50995"
},
{
"name": "Makefile",
"bytes": "1003022"
},
{
"name": "Mako",
"bytes": "5629"
},
{
"name": "Objective-C",
"bytes": "597466"
},
{
"name": "Objective-C++",
"bytes": "77713"
},
{
"name": "PHP",
"bytes": "474525"
},
{
"name": "PowerShell",
"bytes": "621"
},
{
"name": "Python",
"bytes": "2949502"
},
{
"name": "Ruby",
"bytes": "1029888"
},
{
"name": "Shell",
"bytes": "472145"
},
{
"name": "Swift",
"bytes": "3516"
},
{
"name": "TSQL",
"bytes": "4901"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
} |
"""
Template answer for REST Workshop
"""
##########################################################################
## Imports
##########################################################################
import json
import pprint
import requests
##########################################################################
## Module Variables/Constants
##########################################################################
DOJ_RELEASES_URL = 'http://www.justice.gov/api/v1/press_releases.json?pagesize=5'
#########################################################################
# Functions
#########################################################################
def fetch_press_releases(URL):
"""
Performs a GET on the DOJ web service and return the array found in the
'results' attribute of the JSON response
"""
# use requests library to execute a GET request and store the results
response = requests.get(URL)
# use requests library to decode as json and store the results
data = response.json()
# return the 'results' array of press releases
return data['results']
def main():
"""
Main execution function to perform required actions
"""
# fetch array of press releases
press_releases = fetch_press_releases(DOJ_RELEASES_URL)
# iterate through press releases
for release in press_releases:
# save content to a new file
# we'll use the unique press release identifier to make the filename
fname = release['uuid']+'.json'
content = json.dumps(release)
with open(fname, 'w') as f:
f.write(content)
##########################################################################
## Execution
##########################################################################
if __name__ == '__main__':
main()
| {
"content_hash": "4945bfcfefc577d8dad798cc5843bbe6",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 81,
"avg_line_length": 29.677419354838708,
"alnum_prop": 0.44782608695652176,
"repo_name": "rebeccabilbro/cipactli",
"id": "20fc1d1aaab582c3501e624060ba10483e1fb24b",
"size": "1878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restingest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4166"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
import re
import tempfile
def read_version():
source_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cedexis/radar/__init__.py')
with open(source_path) as fp:
source = fp.read()
major_match = re.search('__sampler_major_version__\s*=\s*(\d+)', source)
minor_match = re.search('__sampler_minor_version__\s*=\s*(\d+)', source)
micro_match = re.search('__sampler_micro_version__\s*=\s*(\d+)', source)
suffix_match = re.search('__version_suffix__\s*=\s*[\'"]([-\w\d]+)[\'"]', source)
suffix = ''
if not suffix_match is None:
suffix = suffix_match.group(1)
return '{}.{}.{}{}'.format(
major_match.group(1),
minor_match.group(1),
micro_match.group(1),
suffix,
)
def read_file(file_path):
"""
Read a file relative to the directory containing this file
"""
root_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(root_dir, file_path)
with open(file_path) as fp:
content = fp.read()
try:
return content.decode('utf-8').strip()
except AttributeError:
return content.strip()
long_description = '\n\n'.join([
read_file('README.rst'),
read_file('CHANGES.rst'),
])
#temp_file_handle, temp_file_path = tempfile.mkstemp(suffix='.rst')
#with open(temp_file_path, 'w') as fp:
# fp.write(long_description)
#print('PyPI documentation written to: {}'.format(temp_file_path))
# See http://docs.python.org/3.3/distutils/apiref.html#module-distutils.core
# for help with setup keyword arguments
setup_kwargs = {
# http://www.python.org/dev/peps/pep-0423/#use-a-single-name
'name': 'cedexis.radar',
'version': read_version(),
'description': 'Cedexis Radar client library',
'long_description': long_description,
'classifiers': [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: IronPython',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
'keywords': [
'cedexis',
'radar',
'internet',
'dns',
'cdn',
'cloud',
'load balancing',
'availability',
'rtt',
'mobile',
],
'url': 'https://github.com/cedexis/cedexis.radar',
'author': 'Jacob Wan',
'author_email': '[email protected]',
'license': 'MIT',
'packages': find_packages(),
'namespace_packages': [
'cedexis',
],
'tests_require': [
'nose'
],
'test_suite': 'nose.collector',
'entry_points': {
'console_scripts': [
'cedexis-radar-cli=cedexis.radar.cli:main',
],
},
'zip_safe': True,
}
setup(**setup_kwargs)
| {
"content_hash": "c777cf84c2e710a9336182d40fabed42",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 103,
"avg_line_length": 31.653846153846153,
"alnum_prop": 0.580498177399757,
"repo_name": "cedexis/cedexis.radar",
"id": "ca4afb91e61259caa5951ae9eaf32f53b9633fac",
"size": "3293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "194"
},
{
"name": "Python",
"bytes": "46451"
}
],
"symlink_target": ""
} |
import attr
import sqlite3
import logging
@attr.s
class ContactsProcessor:
"""
The processor of contacts data.
:param sakia.data.repositories.ContactsRepo _contacts_repo: the repository of the contacts
"""
_contacts_repo = attr.ib()
_logger = attr.ib(default=attr.Factory(lambda: logging.getLogger('sakia')))
@classmethod
def instanciate(cls, app):
"""
Instanciate a blockchain processor
:param sakia.app.Application app: the app
"""
return cls(app.db.contacts_repo)
def contacts(self):
return self._contacts_repo.get_all()
def contact(self, contact_id):
return self._contacts_repo.get_one(contact_id=contact_id)
def commit(self, contact):
try:
self._contacts_repo.insert(contact)
except sqlite3.IntegrityError:
self._contacts_repo.update(contact)
def delete(self, contact):
self._contacts_repo.drop(contact)
def get_one(self, **search):
return self._contacts_repo.get_one(**search)
| {
"content_hash": "3f1d8c77959e2004a6b17cb4f828a839",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 94,
"avg_line_length": 26.4,
"alnum_prop": 0.6448863636363636,
"repo_name": "ucoin-io/cutecoin",
"id": "71b484cdc7b3728de8df19d3bcb9d013c9a74573",
"size": "1056",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sakia/data/processors/contacts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2475"
},
{
"name": "JavaScript",
"bytes": "1594"
},
{
"name": "PowerShell",
"bytes": "3111"
},
{
"name": "Python",
"bytes": "718811"
},
{
"name": "Shell",
"bytes": "3983"
}
],
"symlink_target": ""
} |
from . import api
class OpenWebcastLectures():
# OpenWebcastLectures.OpenWebcasts
def open_webcasts(self, acadYear, semester, titleOnly=False, mediaChannelId=None, auth=True):
params = {'AcadYear': acadYear, 'Semester': semester, 'TitleOnly': titleOnly, 'MediaChannelID': mediaChannelId}
return api.call('OpenWebcasts', params, auth)
# OpenWebcastLectures.OpenWebcast_AddLog_JSON
def open_webcast_add_log(self, mediaChannelId, mediaChannelItemId, auth=True):
params = {'MediaChannelID': mediaChannelId, 'MediaChannelItemID': mediaChannelItemId}
return api.call('OpenWebcast_AddLog_JSON', params, auth, 'post')
| {
"content_hash": "a515dccecdd10cee5fc4f9b624b2f5c2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 119,
"avg_line_length": 55,
"alnum_prop": 0.7333333333333333,
"repo_name": "karenang/ivle-bot",
"id": "9b5385d089301f4fdb33e39dda355b1429db65d3",
"size": "660",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyivle/helpers/open_webcast_lectures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60827"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages, findall
PACKAGE_NAME = 'yawf'
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
REQUIREMENTS_FILEPATH = os.path.join(CURRENT_DIR, 'requirements', 'default.txt')
README_FILEPATH = os.path.join(CURRENT_DIR, 'README.rst')
VERSION_FILEPATH = os.path.join(CURRENT_DIR, PACKAGE_NAME, 'version.py')
def get_version():
# populate namespace with __version__
execfile(VERSION_FILEPATH)
return locals()['__version__']
def get_requirements():
with open(REQUIREMENTS_FILEPATH) as fp:
return fp.read().splitlines()
def get_data_files():
data_files = filter(
lambda name: not name.endswith('.py') and not name.endswith('.pyc'),
findall(PACKAGE_NAME))
return [x.split(os.sep, 1)[-1] for x in data_files]
def get_long_description():
return open(README_FILEPATH).read()
setup(
name = PACKAGE_NAME,
version = get_version(),
packages = find_packages(CURRENT_DIR, exclude=('yawf_sample', 'yawf_sample.*')),
package_data = {'': get_data_files()},
# Metadata
author = 'Nikolay Zakharov',
author_email = '[email protected]',
url = 'https://github.com/freevoid/yawf',
description = 'Yet Another Workflow Framework',
long_description = get_long_description(),
keywords = 'workflow state transition fsm django',
install_requires = get_requirements(),
extras_require = {
'reversion': ['django-reversion>=1.5'],
},
license = 'MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
| {
"content_hash": "8d27a97b54f1bd8e698eca0f68a6cecb",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 84,
"avg_line_length": 29.816666666666666,
"alnum_prop": 0.6400223588596982,
"repo_name": "freevoid/yawf",
"id": "466475b06f66207950afbf238cb5a42b103ef818",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "193290"
}
],
"symlink_target": ""
} |
import time
from netforce.model import Model, fields, get_model
from netforce import database
class Notification(Model):
_name = "hr.notification"
_fields = {
"subject": fields.Char("Title", required=True),
"description": fields.Text("Description"),
'birthday_ntf': fields.Boolean("Birthday Notify"),
}
def birthday_notify(self, context={}):
db = database.get_connection()
cr_time = time.strftime("%Y-%m-%d %H:%M:%S")
cr_yyyy = cr_time[0:4]
cr_mm = cr_time[5:7]
cr_dd = cr_time[8:10]
today = "%s%s" % (cr_dd, cr_mm)
print(cr_time, " checking birthday")
subject = "Happy Birth Day"
body = subject
ntf = get_model("hr.notification").browse(1)
if ntf:
subject = ntf.subject
body = ntf.description
count = 0
for emp in get_model("hr.employee").search_browse([['work_status', '=', 'working']]):
if emp.birth_date:
mm = emp.birth_date[5:7]
dd = emp.birth_date[8:10]
emp_date = "%s%s" % (dd, mm)
if emp_date == today:
user_id = emp.user_id.id
sql = "select id from message where related_id='hr.employee,%s' and extract(year from create_time) = %s"
res = db.query(sql, emp.id, cr_yyyy)
if not res:
if emp.email:
self.trigger([emp.id], "birthday_notify")
print("happby birthday %s %s" % (emp.first_name, emp.last_name))
if user_id:
vals = {
'subject': subject,
'to_id': user_id,
'body': body,
"related_id": "hr.employee,%s" % emp.id,
}
msg_id = get_model("message").create(vals)
print("created message #", msg_id)
count += 1
print("SEND TOTOAL: #", count)
Notification.register()
| {
"content_hash": "be7547915e50aaa54d9a07fb0b097033",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 124,
"avg_line_length": 39.56363636363636,
"alnum_prop": 0.45955882352941174,
"repo_name": "anastue/netforce",
"id": "9d66f4d368207ea74e3a30f3fedcfed62b1c78b8",
"size": "3281",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable-3.1",
"path": "netforce_hr/netforce_hr/models/hr_notification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "407336"
},
{
"name": "Groff",
"bytes": "15858"
},
{
"name": "HTML",
"bytes": "477928"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3711952"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3455528"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""
WSGI config for maestor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maestor.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "d2fbfbe3d21d6b514c8ce684af1494dc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.7737789203084833,
"repo_name": "amschaal/maestor",
"id": "60a746962d4c3b2fb6ff201e8e7419ae75ebb03b",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maestor/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22920"
},
{
"name": "JavaScript",
"bytes": "4085"
},
{
"name": "Python",
"bytes": "47992"
}
],
"symlink_target": ""
} |
"""
Bog standard Sheepdog example script.
Defines a simple function, a short list of arguments to run it with,
then submits it to a GridEngine cluster and waits for the results.
"""
import sheepdog
def f(a, b):
return a + b
args = [(1, 1), (1, 2), (2, 2)]
print("Running f(a,b) for arguments:")
print(args)
config = {
"host": "fear",
}
results = sheepdog.map(f, args, config)
print("\nReceived results:")
print(results)
| {
"content_hash": "550d9647bfa9eade0c19c2b7f9bd767c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 17.44,
"alnum_prop": 0.6651376146788991,
"repo_name": "adamgreig/sheepdog",
"id": "72de561334c75ea8fc7d84bffd4a63d83c8d449e",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simple_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60100"
}
],
"symlink_target": ""
} |
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.payer == request.user
class AnonymousCanCreate(permissions.BasePermission):
def has_permission(self, request, view):
return view.action == 'create'
| {
"content_hash": "0757a6e2867778187f480ec859b1bae4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 25.875,
"alnum_prop": 0.714975845410628,
"repo_name": "linkleonard/braintree-tutorial",
"id": "15ac7c48ae3894e5dcdbfb89db8dd84e8bfb3aaa",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myapp/mysite/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22913"
},
{
"name": "HTML",
"bytes": "2756"
},
{
"name": "Python",
"bytes": "9865"
}
],
"symlink_target": ""
} |
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.proto import annotation_spec_pb2
from google.cloud.automl_v1beta1.proto import column_spec_pb2
from google.cloud.automl_v1beta1.proto import dataset_pb2
from google.cloud.automl_v1beta1.proto import io_pb2
from google.cloud.automl_v1beta1.proto import model_evaluation_pb2
from google.cloud.automl_v1beta1.proto import model_pb2
from google.cloud.automl_v1beta1.proto import service_pb2
from google.cloud.automl_v1beta1.proto import table_spec_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestAutoMlClient(object):
def test_create_dataset(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
dataset = {}
response = client.create_dataset(parent, dataset)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.CreateDatasetRequest(
parent=parent, dataset=dataset
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
dataset = {}
with pytest.raises(CustomException):
client.create_dataset(parent, dataset)
def test_update_dataset(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
dataset = {}
response = client.update_dataset(dataset)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
dataset = {}
with pytest.raises(CustomException):
client.update_dataset(dataset)
def test_get_dataset(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
description = "description-1724546052"
example_count = 1517063674
etag = "etag3123477"
expected_response = {
"name": name_2,
"display_name": display_name,
"description": description,
"example_count": example_count,
"etag": etag,
}
expected_response = dataset_pb2.Dataset(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.get_dataset(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetDatasetRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_dataset_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
with pytest.raises(CustomException):
client.get_dataset(name)
def test_list_datasets(self):
# Setup Expected Response
next_page_token = ""
datasets_element = {}
datasets = [datasets_element]
expected_response = {"next_page_token": next_page_token, "datasets": datasets}
expected_response = service_pb2.ListDatasetsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_datasets(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.datasets[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListDatasetsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_datasets_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_datasets(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_dataset(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_delete_dataset", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.delete_dataset(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeleteDatasetRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_dataset_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_delete_dataset_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
response = client.delete_dataset(name)
exception = response.exception()
assert exception.errors[0] == error
def test_import_data(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_import_data", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
input_config = {}
response = client.import_data(name, input_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ImportDataRequest(
name=name, input_config=input_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_import_data_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_import_data_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
input_config = {}
response = client.import_data(name, input_config)
exception = response.exception()
assert exception.errors[0] == error
def test_export_data(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_data", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
output_config = {}
response = client.export_data(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ExportDataRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_data_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_data_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
output_config = {}
response = client.export_data(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_create_model(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
expected_response = {
"name": name,
"display_name": display_name,
"dataset_id": dataset_id,
}
expected_response = model_pb2.Model(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
model = {}
response = client.create_model(parent, model)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.CreateModelRequest(parent=parent, model=model)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
model = {}
response = client.create_model(parent, model)
exception = response.exception()
assert exception.errors[0] == error
def test_get_model(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
dataset_id = "datasetId-2115646910"
expected_response = {
"name": name_2,
"display_name": display_name,
"dataset_id": dataset_id,
}
expected_response = model_pb2.Model(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.get_model(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_model_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
with pytest.raises(CustomException):
client.get_model(name)
def test_list_models(self):
# Setup Expected Response
next_page_token = ""
model_element = {}
model = [model_element]
expected_response = {"next_page_token": next_page_token, "model": model}
expected_response = service_pb2.ListModelsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_models(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.model[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListModelsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_models_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
paged_list_response = client.list_models(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_delete_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_delete_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.delete_model(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeleteModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_delete_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.delete_model(name)
exception = response.exception()
assert exception.errors[0] == error
def test_deploy_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_deploy_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.deploy_model(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.DeployModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_deploy_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_deploy_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.deploy_model(name)
exception = response.exception()
assert exception.errors[0] == error
def test_undeploy_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_undeploy_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.undeploy_model(name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.UndeployModelRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_undeploy_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_undeploy_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
response = client.undeploy_model(name)
exception = response.exception()
assert exception.errors[0] == error
def test_get_model_evaluation(self):
# Setup Expected Response
name_2 = "name2-1052831874"
annotation_spec_id = "annotationSpecId60690191"
display_name = "displayName1615086568"
evaluated_example_count = 277565350
expected_response = {
"name": name_2,
"annotation_spec_id": annotation_spec_id,
"display_name": display_name,
"evaluated_example_count": evaluated_example_count,
}
expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_evaluation_path(
"[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]"
)
response = client.get_model_evaluation(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetModelEvaluationRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_model_evaluation_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
name = client.model_evaluation_path(
"[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]"
)
with pytest.raises(CustomException):
client.get_model_evaluation(name)
def test_export_model(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_model", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
output_config = {}
response = client.export_model(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ExportModelRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_model_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_model_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
output_config = {}
response = client.export_model(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_export_evaluated_examples(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_export_evaluated_examples", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
output_config = {}
response = client.export_evaluated_examples(name, output_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = service_pb2.ExportEvaluatedExamplesRequest(
name=name, output_config=output_config
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_export_evaluated_examples_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_export_evaluated_examples_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
output_config = {}
response = client.export_evaluated_examples(name, output_config)
exception = response.exception()
assert exception.errors[0] == error
def test_list_model_evaluations(self):
# Setup Expected Response
next_page_token = ""
model_evaluation_element = {}
model_evaluation = [model_evaluation_element]
expected_response = {
"next_page_token": next_page_token,
"model_evaluation": model_evaluation,
}
expected_response = service_pb2.ListModelEvaluationsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
paged_list_response = client.list_model_evaluations(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.model_evaluation[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_model_evaluations_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]")
paged_list_response = client.list_model_evaluations(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_annotation_spec(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
example_count = 1517063674
expected_response = {
"name": name_2,
"display_name": display_name,
"example_count": example_count,
}
expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.annotation_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
)
response = client.get_annotation_spec(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetAnnotationSpecRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_annotation_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
name = client.annotation_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]"
)
with pytest.raises(CustomException):
client.get_annotation_spec(name)
def test_get_table_spec(self):
# Setup Expected Response
name_2 = "name2-1052831874"
time_column_spec_id = "timeColumnSpecId1558734824"
row_count = 1340416618
valid_row_count = 406068761
column_count = 122671386
etag = "etag3123477"
expected_response = {
"name": name_2,
"time_column_spec_id": time_column_spec_id,
"row_count": row_count,
"valid_row_count": valid_row_count,
"column_count": column_count,
"etag": etag,
}
expected_response = table_spec_pb2.TableSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.table_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]"
)
response = client.get_table_spec(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetTableSpecRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_table_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
name = client.table_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]"
)
with pytest.raises(CustomException):
client.get_table_spec(name)
def test_list_table_specs(self):
# Setup Expected Response
next_page_token = ""
table_specs_element = {}
table_specs = [table_specs_element]
expected_response = {
"next_page_token": next_page_token,
"table_specs": table_specs,
}
expected_response = service_pb2.ListTableSpecsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
paged_list_response = client.list_table_specs(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.table_specs[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListTableSpecsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_table_specs_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]")
paged_list_response = client.list_table_specs(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_table_spec(self):
# Setup Expected Response
name = "name3373707"
time_column_spec_id = "timeColumnSpecId1558734824"
row_count = 1340416618
valid_row_count = 406068761
column_count = 122671386
etag = "etag3123477"
expected_response = {
"name": name,
"time_column_spec_id": time_column_spec_id,
"row_count": row_count,
"valid_row_count": valid_row_count,
"column_count": column_count,
"etag": etag,
}
expected_response = table_spec_pb2.TableSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
table_spec = {}
response = client.update_table_spec(table_spec)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateTableSpecRequest(table_spec=table_spec)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_table_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
table_spec = {}
with pytest.raises(CustomException):
client.update_table_spec(table_spec)
def test_get_column_spec(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
etag = "etag3123477"
expected_response = {"name": name_2, "display_name": display_name, "etag": etag}
expected_response = column_spec_pb2.ColumnSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
name = client.column_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]"
)
response = client.get_column_spec(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetColumnSpecRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_column_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
name = client.column_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]"
)
with pytest.raises(CustomException):
client.get_column_spec(name)
def test_list_column_specs(self):
# Setup Expected Response
next_page_token = ""
column_specs_element = {}
column_specs = [column_specs_element]
expected_response = {
"next_page_token": next_page_token,
"column_specs": column_specs,
}
expected_response = service_pb2.ListColumnSpecsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
parent = client.table_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]"
)
paged_list_response = client.list_column_specs(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.column_specs[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListColumnSpecsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_column_specs_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
parent = client.table_spec_path(
"[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]"
)
paged_list_response = client.list_column_specs(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_column_spec(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
etag = "etag3123477"
expected_response = {"name": name, "display_name": display_name, "etag": etag}
expected_response = column_spec_pb2.ColumnSpec(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup Request
column_spec = {}
response = client.update_column_spec(column_spec)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateColumnSpecRequest(column_spec=column_spec)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_column_spec_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = automl_v1beta1.AutoMlClient()
# Setup request
column_spec = {}
with pytest.raises(CustomException):
client.update_column_spec(column_spec)
| {
"content_hash": "9bb771f6ae298c2f12683e715610b0cc",
"timestamp": "",
"source": "github",
"line_count": 1239,
"max_line_length": 88,
"avg_line_length": 37.17756255044391,
"alnum_prop": 0.625621431517704,
"repo_name": "tseaver/google-cloud-python",
"id": "e3a5ce12f7cf70c378a7cbba447c785112add97f",
"size": "46665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "automl/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
from __future__ import print_function
"""
MOSEK interface to solve QP problems
"""
from builtins import range
import numpy as np
import scipy.sparse as spa
from mathprogbasepy.quadprog.results import QuadprogResults
from mathprogbasepy.quadprog.solvers.solver import Solver
import mathprogbasepy.quadprog.problem as qp
import mosek
class MOSEK(Solver):
"""
An interface for the Mosek QP solver.
"""
# Map of Mosek status to mathprogbasepy status.
STATUS_MAP = {mosek.solsta.optimal: qp.OPTIMAL,
mosek.solsta.integer_optimal: qp.OPTIMAL,
mosek.solsta.prim_infeas_cer: qp.PRIMAL_INFEASIBLE,
mosek.solsta.dual_infeas_cer: qp.DUAL_INFEASIBLE,
mosek.solsta.near_optimal: qp.OPTIMAL_INACCURATE,
mosek.solsta.near_prim_infeas_cer: qp.PRIMAL_INFEASIBLE_INACCURATE,
mosek.solsta.near_dual_infeas_cer: qp.DUAL_INFEASIBLE_INACCURATE,
mosek.solsta.unknown: qp.SOLVER_ERROR}
def solve(self, p):
# Get problem dimensions
n = p.P.shape[0]
m = p.A.shape[0]
'''
Load problem
'''
# Create environment
env = mosek.Env()
# Create optimization task
task = env.Task()
if self.options['verbose']:
# Define a stream printer to grab output from MOSEK
def streamprinter(text):
import sys
sys.stdout.write(text)
sys.stdout.flush()
env.set_Stream(mosek.streamtype.log, streamprinter)
task.set_Stream(mosek.streamtype.log, streamprinter)
# Load problem into task object
# Append 'm' empty constraints.
# The constraints will initially have no bounds.
task.appendcons(m)
# Append 'n' variables.
# The variables will initially be fixed at zero (x=0).
task.appendvars(n)
# Add linear cost by iterating over all variables
for j in range(n):
task.putcj(j, p.q[j])
task.putvarbound(j, mosek.boundkey.fr, -np.inf, np.inf)
# Constrain integer variables if present
if p.i_idx is not None:
int_types = [mosek.variabletype.type_int] * len(p.i_idx)
int_idx = p.i_idx.tolist()
task.putvartypelist(int_idx, int_types)
for i in range(len(p.i_idx)):
if p.i_l is None and p.i_u is not None:
task.putvarbound(p.i_idx[i],
mosek.boundkey.up, 0, p.i_u[i])
elif p.i_l is not None and p.i_u is None:
task.putvarbound(p.i_idx[i],
mosek.boundkey.lo, p.i_l[i], 0)
elif p.i_l is not None and p.i_u is not None:
task.putvarbound(p.i_idx[i],
mosek.boundkey.ra, p.i_l[i], p.i_u[i])
# Add constraints
if p.A is not None:
row_A, col_A, el_A = spa.find(p.A)
task.putaijlist(row_A, col_A, el_A)
for j in range(m):
# Get bounds and keys
u_temp = p.u[j] if p.u[j] < 1e20 else np.inf
l_temp = p.l[j] if p.l[j] > -1e20 else -np.inf
# Divide 5 cases
if (np.abs(l_temp - u_temp) < 1e-08):
bound_key = mosek.boundkey.fx
elif l_temp == -np.inf and u_temp == np.inf:
bound_key = mosek.boundkey.fr
elif l_temp != -np.inf and u_temp == np.inf:
bound_key = mosek.boundkey.lo
elif l_temp != -np.inf and u_temp != np.inf:
bound_key = mosek.boundkey.ra
elif l_temp == -np.inf and u_temp != np.inf:
bound_key = mosek.boundkey.up
# Add bound
task.putconbound(j, bound_key, l_temp, u_temp)
# Add quadratic cost
if p.P.count_nonzero(): # If there are any nonzero elms in P
P = spa.tril(p.P, format='coo')
task.putqobj(P.row, P.col, P.data)
# Set problem minimization
task.putobjsense(mosek.objsense.minimize)
'''
Set parameters
'''
for param, value in self.options.items():
if param == 'verbose':
if value is False:
self._handle_str_param(task, 'MSK_IPAR_LOG'.strip(), 0)
else:
if isinstance(param, str):
self._handle_str_param(task, param.strip(), value)
else:
self._handle_enum_param(task, param, value)
'''
Solve problem
'''
try:
# Optimization and check termination code
trmcode = task.optimize()
except:
if self.options['verbose']:
print("Error in MOSEK solution\n")
return QuadprogResults(qp.SOLVER_ERROR, None, None, None,
np.inf, None)
if self.options['verbose']:
task.solutionsummary(mosek.streamtype.msg)
'''
Extract results
'''
# Get solution type and status
soltype, solsta = self.choose_solution(task)
# Map status using statusmap
status = self.STATUS_MAP.get(solsta, qp.SOLVER_ERROR)
# Get statistics
cputime = task.getdouinf(mosek.dinfitem.optimizer_time) + \
task.getdouinf(mosek.dinfitem.presolve_time)
total_iter = task.getintinf(mosek.iinfitem.intpnt_iter)
if status in qp.SOLUTION_PRESENT:
# get primal variables values
sol = np.zeros(task.getnumvar())
task.getxx(soltype, sol)
# get obj value
objval = task.getprimalobj(soltype)
# get dual
if p.i_idx is None:
dual = np.zeros(task.getnumcon())
task.gety(soltype, dual)
# it appears signs are inverted
dual = -dual
else:
dual = None
return QuadprogResults(status, objval, sol, dual,
cputime, total_iter)
else:
return QuadprogResults(status, None, None, None,
cputime, None)
def choose_solution(self, task):
"""Chooses between the basic, interior point solution or integer solution
Parameters
N.B. From CVXPY
----------
task : mosek.Task
The solver status interface.
Returns
-------
soltype
The preferred solution (mosek.soltype.*)
solsta
The status of the preferred solution (mosek.solsta.*)
"""
import mosek
def rank(status):
# Rank solutions
# optimal > near_optimal > anything else > None
if status == mosek.solsta.optimal:
return 3
elif status == mosek.solsta.near_optimal:
return 2
elif status is not None:
return 1
else:
return 0
solsta_bas, solsta_itr = None, None
# Integer solution
if task.solutiondef(mosek.soltype.itg):
solsta_itg = task.getsolsta(mosek.soltype.itg)
return mosek.soltype.itg, solsta_itg
# Continuous solution
if task.solutiondef(mosek.soltype.bas):
solsta_bas = task.getsolsta(mosek.soltype.bas)
if task.solutiondef(mosek.soltype.itr):
solsta_itr = task.getsolsta(mosek.soltype.itr)
# As long as interior solution is not worse, take it
# (for backward compatibility)
if rank(solsta_itr) >= rank(solsta_bas):
return mosek.soltype.itr, solsta_itr
else:
return mosek.soltype.bas, solsta_bas
@staticmethod
def _handle_str_param(task, param, value):
if param.startswith("MSK_DPAR_"):
task.putnadouparam(param, value)
elif param.startswith("MSK_IPAR_"):
task.putnaintparam(param, value)
elif param.startswith("MSK_SPAR_"):
task.putnastrparam(param, value)
else:
raise ValueError("Invalid MOSEK parameter '%s'." % param)
@staticmethod
def _handle_enum_param(task, param, value):
if isinstance(param, mosek.dparam):
task.putdouparam(param, value)
elif isinstance(param, mosek.iparam):
task.putintparam(param, value)
elif isinstance(param, mosek.sparam):
task.putstrparam(param, value)
else:
raise ValueError("Invalid MOSEK parameter '%s'." % param)
| {
"content_hash": "05e3fdfa59ed4ffb1a433ca0b403ae22",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 85,
"avg_line_length": 34.724409448818896,
"alnum_prop": 0.535374149659864,
"repo_name": "bstellato/mathprogbasepy",
"id": "1278ccd238653d0e65b8ba7bdc6f747ce3321186",
"size": "8820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mathprogbasepy/quadprog/solvers/mosek_qpif.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41067"
}
],
"symlink_target": ""
} |
import sys
from os import path as osp
this = osp.splitext(osp.basename(__file__))[0]
BASE_DIR = osp.dirname(__file__)
from django.conf import settings
SETTINGS = dict(
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db'}},
DEBUG=True,
TEMPLATE_DEBUG=True,
ROOT_URLCONF=this,
INSTALLED_APPS=(
'django.contrib.auth', 'django.contrib.contenttypes',
'django.contrib.sessions', 'annoying', 'annoying.tests'
),
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
osp.join(BASE_DIR, 'annoying', 'tests', 'templates')
]
}
]
)
# Check Django version and adjust settings for 1.6.
import django
from distutils.version import LooseVersion
django_version = django.get_version()
if LooseVersion(django_version) < LooseVersion('1.6'):
raise ValueError("Django-annoying requires Django 1.6 or later.")
if LooseVersion(django_version) < LooseVersion('1.7'):
del SETTINGS['TEMPLATES']
SETTINGS['TEMPLATE_DIRS'] = [
osp.join(BASE_DIR, 'annoying', 'tests', 'templates')
]
if not settings.configured:
settings.configure(**SETTINGS)
try:
from django.conf.urls import patterns
except ImportError:
# Hack for backwards-compatibility.
patterns = lambda *x: list(x[1:])
urlpatterns = patterns('', )
if __name__ == '__main__':
try:
# Override Apps module to work with us
from django.apps.registry import Apps
get_containing_app_config_orig = Apps.get_containing_app_config
def get_containing_app_config(Apps_object, *args, **kwargs):
Apps_object.apps_ready = True
return get_containing_app_config_orig(Apps_object, *args, **kwargs)
Apps.get_containing_app_config = get_containing_app_config
except ImportError:
# override get_app to work with us for Django 1.6.
from django.db import models
get_app_orig = models.get_app
def get_app(app_label, *a, **kw):
if app_label == this:
return sys.modules[__name__]
return get_app_orig(app_label, *a, **kw)
models.get_app = get_app
from django.core import management
management.execute_from_command_line(["test.py", "test", "annoying.tests"])
| {
"content_hash": "4dd90cd9e0a7bf10cf13487cbc3b576b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 30.883116883116884,
"alnum_prop": 0.6269974768713205,
"repo_name": "YPCrumble/django-annoying",
"id": "70969f184def76444c05e4df08b9f6890712f6e7",
"size": "2400",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32296"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
from .views import IndexView
from .views import UploadView
from .views import LaunchHeatView
from .views import DetailView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^launch/$', LaunchHeatView.as_view(), name='launch'),
url(r'^upload/$', UploadView.as_view(), name='upload'),
url(r'^stack/(?P<stack_id>[^/]+)/$',
DetailView.as_view(), name='detail'),
)
| {
"content_hash": "7b6dbe3c8168f4dce5959f50c02585e1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.6473029045643154,
"repo_name": "hanney/heat-horizon",
"id": "8455b179199023ae6af6e57ed997b623e912920a",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thermal/stacks/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "41"
},
{
"name": "Python",
"bytes": "41808"
}
],
"symlink_target": ""
} |
'''
:codeauthor: :email:`Bo Maryniuk <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
Mock,
MagicMock,
call,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salt.exceptions import CommandExecutionError
import os
from salt.ext.six.moves import configparser
import StringIO
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
class ZyppCallMock(object):
def __init__(self, return_value=None):
self.__return_value = return_value
def __getattr__(self, item):
return self
def __call__(self, *args, **kwargs):
return MagicMock(return_value=self.__return_value)()
def get_test_data(filename):
'''
Return static test data
'''
return open(os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zypp'), filename)).read()
# Import Salt Libs
from salt.modules import zypper
# Globals
zypper.__salt__ = dict()
zypper.__context__ = dict()
zypper.rpm = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ZypperTestCase(TestCase):
'''
Test cases for salt.modules.zypper
'''
def setUp(self):
self.new_repo_config = dict(
name='mock-repo-name',
url='http://repo.url/some/path'
)
side_effect = [
Mock(**{'sections.return_value': []}),
Mock(**{'sections.return_value': [self.new_repo_config['name']]})
]
self.zypper_patcher_config = {
'_get_configured_repos': Mock(side_effect=side_effect),
'__zypper__': Mock(),
'get_repo': Mock()
}
def test_list_upgrades(self):
'''
List package upgrades
:return:
'''
ref_out = {
'stdout': get_test_data('zypper-updates.xml'),
'stderr': None,
'retcode': 0
}
with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=ref_out)}):
upgrades = zypper.list_upgrades(refresh=False)
self.assertEqual(len(upgrades), 3)
for pkg, version in {'SUSEConnect': '0.2.33-7.1',
'bind-utils': '9.9.6P1-35.1',
'bind-libs': '9.9.6P1-35.1'}.items():
self.assertIn(pkg, upgrades)
self.assertEqual(upgrades[pkg], version)
def test_zypper_caller(self):
'''
Test Zypper caller.
:return:
'''
class RunSniffer(object):
def __init__(self, stdout=None, stderr=None, retcode=None):
self.calls = list()
self._stdout = stdout or ''
self._stderr = stderr or ''
self._retcode = retcode or 0
def __call__(self, *args, **kwargs):
self.calls.append({'args': args, 'kwargs': kwargs})
return {'stdout': self._stdout,
'stderr': self._stderr,
'retcode': self._retcode}
stdout_xml_snippet = '<?xml version="1.0"?><test foo="bar"/>'
sniffer = RunSniffer(stdout=stdout_xml_snippet)
with patch.dict('salt.modules.zypper.__salt__', {'cmd.run_all': sniffer}):
self.assertEqual(zypper.__zypper__.call('foo'), stdout_xml_snippet)
self.assertEqual(len(sniffer.calls), 1)
zypper.__zypper__.call('bar')
self.assertEqual(len(sniffer.calls), 2)
self.assertEqual(sniffer.calls[0]['args'][0], ['zypper', '--non-interactive', '--no-refresh', 'foo'])
self.assertEqual(sniffer.calls[1]['args'][0], ['zypper', '--non-interactive', '--no-refresh', 'bar'])
dom = zypper.__zypper__.xml.call('xml-test')
self.assertEqual(sniffer.calls[2]['args'][0], ['zypper', '--non-interactive', '--xmlout',
'--no-refresh', 'xml-test'])
self.assertEqual(dom.getElementsByTagName('test')[0].getAttribute('foo'), 'bar')
zypper.__zypper__.refreshable.call('refresh-test')
self.assertEqual(sniffer.calls[3]['args'][0], ['zypper', '--non-interactive', 'refresh-test'])
zypper.__zypper__.nolock.call('no-locking-test')
self.assertEqual(sniffer.calls[4].get('kwargs', {}).get('env', {}).get('ZYPP_READONLY_HACK'), "1")
self.assertEqual(sniffer.calls[4].get('kwargs', {}).get('env', {}).get('SALT_RUNNING'), "1")
zypper.__zypper__.call('locking-test')
self.assertEqual(sniffer.calls[5].get('kwargs', {}).get('env', {}).get('ZYPP_READONLY_HACK'), None)
self.assertEqual(sniffer.calls[5].get('kwargs', {}).get('env', {}).get('SALT_RUNNING'), "1")
# Test exceptions
stdout_xml_snippet = '<?xml version="1.0"?><stream><message type="error">Booya!</message></stream>'
sniffer = RunSniffer(stdout=stdout_xml_snippet, retcode=1)
with patch.dict('salt.modules.zypper.__salt__', {'cmd.run_all': sniffer}):
with self.assertRaisesRegexp(CommandExecutionError, '^Zypper command failure: Booya!$'):
zypper.__zypper__.xml.call('crashme')
with self.assertRaisesRegexp(CommandExecutionError, "^Zypper command failure: Check Zypper's logs.$"):
zypper.__zypper__.call('crashme again')
zypper.__zypper__.noraise.call('stay quiet')
self.assertEqual(zypper.__zypper__.error_msg, "Check Zypper's logs.")
def test_list_upgrades_error_handling(self):
'''
Test error handling in the list package upgrades.
:return:
'''
# Test handled errors
ref_out = {
'stdout': '''<?xml version='1.0'?>
<stream>
<message type="info">Refreshing service 'container-suseconnect'.</message>
<message type="error">Some handled zypper internal error</message>
<message type="error">Another zypper internal error</message>
</stream>
''',
'stderr': '',
'retcode': 1,
}
with patch.dict('salt.modules.zypper.__salt__', {'cmd.run_all': MagicMock(return_value=ref_out)}):
with self.assertRaisesRegexp(CommandExecutionError,
"^Zypper command failure: Some handled zypper internal error\nAnother zypper internal error$"):
zypper.list_upgrades(refresh=False)
# Test unhandled error
ref_out = {
'retcode': 1,
'stdout': '',
'stderr': ''
}
with patch.dict('salt.modules.zypper.__salt__', {'cmd.run_all': MagicMock(return_value=ref_out)}):
with self.assertRaisesRegexp(CommandExecutionError, "^Zypper command failure: Check Zypper's logs.$"):
zypper.list_upgrades(refresh=False)
def test_list_products(self):
'''
List products test.
'''
for filename, test_data in {
'zypper-products-sle12sp1.xml': {
'name': ['SLES', 'SLES', 'SUSE-Manager-Proxy',
'SUSE-Manager-Server', 'sle-manager-tools-beta',
'sle-manager-tools-beta-broken-eol', 'sle-manager-tools-beta-no-eol'],
'vendor': 'SUSE LLC <https://www.suse.com/>',
'release': ['0', '0', '0', '0', '0', '0', '0'],
'productline': [None, None, None, None, None, None, 'sles'],
'eol_t': [None, 0, 1509408000, 1522454400, 1522454400, 1730332800, 1730332800],
'isbase': [False, False, False, False, False, False, True],
'installed': [False, False, False, False, False, False, True],
'registerrelease': [None, None, None, None, None, None, '123'],
},
'zypper-products-sle11sp3.xml': {
'name': ['SUSE-Manager-Server', 'SUSE-Manager-Server', 'SUSE-Manager-Server-Broken-EOL',
'SUSE_SLES', 'SUSE_SLES', 'SUSE_SLES', 'SUSE_SLES-SP4-migration'],
'vendor': 'SUSE LINUX Products GmbH, Nuernberg, Germany',
'release': ['1.138', '1.2', '1.2', '1.2', '1.201', '1.201', '1.4'],
'productline': [None, None, None, None, None, 'manager', 'manager'],
'eol_t': [None, 0, 0, 0, 0, 0, 0],
'isbase': [False, False, False, False, False, True, True],
'installed': [False, False, False, False, False, True, True],
'registerrelease': [None, None, None, None, None, None, "42"],
}}.items():
ref_out = {
'retcode': 0,
'stdout': get_test_data(filename)
}
with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=ref_out)}):
products = zypper.list_products()
self.assertEqual(len(products), 7)
self.assertIn(test_data['vendor'], [product['vendor'] for product in products])
for kwd in ['name', 'isbase', 'installed', 'release', 'productline', 'eol_t', 'registerrelease']:
self.assertEqual(test_data[kwd], sorted([prod.get(kwd) for prod in products]))
def test_refresh_db(self):
'''
Test if refresh DB handled correctly
'''
ref_out = [
"Repository 'openSUSE-Leap-42.1-LATEST' is up to date.",
"Repository 'openSUSE-Leap-42.1-Update' is up to date.",
"Retrieving repository 'openSUSE-Leap-42.1-Update-Non-Oss' metadata",
"Forcing building of repository cache",
"Building repository 'openSUSE-Leap-42.1-Update-Non-Oss' cache ..........[done]",
"Building repository 'salt-dev' cache",
"All repositories have been refreshed."
]
run_out = {
'stderr': '', 'stdout': '\n'.join(ref_out), 'retcode': 0
}
with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=run_out)}):
result = zypper.refresh_db()
self.assertEqual(result.get("openSUSE-Leap-42.1-LATEST"), False)
self.assertEqual(result.get("openSUSE-Leap-42.1-Update"), False)
self.assertEqual(result.get("openSUSE-Leap-42.1-Update-Non-Oss"), True)
def test_info_installed(self):
'''
Test the return information of the named package(s), installed on the system.
:return:
'''
run_out = {
'virgo-dummy':
{'build_date': '2015-07-09T10:55:19Z',
'vendor': 'openSUSE Build Service',
'description': 'This is the Virgo dummy package used for testing SUSE Manager',
'license': 'GPL-2.0', 'build_host': 'sheep05', 'url': 'http://www.suse.com',
'build_date_time_t': 1436432119, 'relocations': '(not relocatable)',
'source_rpm': 'virgo-dummy-1.0-1.1.src.rpm', 'install_date': '2016-02-23T16:31:57Z',
'install_date_time_t': 1456241517, 'summary': 'Virgo dummy package', 'version': '1.0',
'signature': 'DSA/SHA1, Thu Jul 9 08:55:33 2015, Key ID 27fa41bd8a7c64f9',
'release': '1.1', 'group': 'Applications/System', 'arch': 'noarch', 'size': '17992'},
'libopenssl1_0_0':
{'build_date': '2015-11-04T23:20:34Z', 'vendor': 'SUSE LLC <https://www.suse.com/>',
'description': 'The OpenSSL Project is a collaborative effort.',
'license': 'OpenSSL', 'build_host': 'sheep11', 'url': 'https://www.openssl.org/',
'build_date_time_t': 1446675634, 'relocations': '(not relocatable)',
'source_rpm': 'openssl-1.0.1i-34.1.src.rpm', 'install_date': '2016-02-23T16:31:35Z',
'install_date_time_t': 1456241495, 'summary': 'Secure Sockets and Transport Layer Security',
'version': '1.0.1i', 'signature': 'RSA/SHA256, Wed Nov 4 22:21:34 2015, Key ID 70af9e8139db7c82',
'release': '34.1', 'group': 'Productivity/Networking/Security', 'packager': 'https://www.suse.com/',
'arch': 'x86_64', 'size': '2576912'},
}
with patch.dict(zypper.__salt__, {'lowpkg.info': MagicMock(return_value=run_out)}):
installed = zypper.info_installed()
# Test overall products length
self.assertEqual(len(installed), 2)
# Test translated fields
for pkg_name, pkg_info in installed.items():
self.assertEqual(installed[pkg_name].get('source'), run_out[pkg_name]['source_rpm'])
# Test keys transition from the lowpkg.info
for pn_key, pn_val in run_out['virgo-dummy'].items():
if pn_key == 'source_rpm':
continue
self.assertEqual(installed['virgo-dummy'][pn_key], pn_val)
def test_info_available(self):
'''
Test return the information of the named package available for the system.
:return:
'''
test_pkgs = ['vim', 'emacs', 'python']
with patch('salt.modules.zypper.__zypper__', ZyppCallMock(return_value=get_test_data('zypper-available.txt'))):
available = zypper.info_available(*test_pkgs, refresh=False)
self.assertEqual(len(available), 3)
for pkg_name, pkg_info in available.items():
self.assertIn(pkg_name, test_pkgs)
self.assertEqual(available['emacs']['status'], 'up-to-date')
self.assertTrue(available['emacs']['installed'])
self.assertEqual(available['emacs']['support level'], 'Level 3')
self.assertEqual(available['emacs']['vendor'], 'SUSE LLC <https://www.suse.com/>')
self.assertEqual(available['emacs']['summary'], 'GNU Emacs Base Package')
self.assertEqual(available['vim']['status'], 'not installed')
self.assertFalse(available['vim']['installed'])
self.assertEqual(available['vim']['support level'], 'Level 3')
self.assertEqual(available['vim']['vendor'], 'SUSE LLC <https://www.suse.com/>')
self.assertEqual(available['vim']['summary'], 'Vi IMproved')
@patch('salt.modules.zypper.refresh_db', MagicMock(return_value=True))
def test_latest_version(self):
'''
Test the latest version of the named package available for upgrade or installation.
:return:
'''
with patch('salt.modules.zypper.__zypper__', ZyppCallMock(return_value=get_test_data('zypper-available.txt'))):
self.assertEqual(zypper.latest_version('vim'), '7.4.326-2.62')
@patch('salt.modules.zypper.refresh_db', MagicMock(return_value=True))
def test_upgrade_available(self):
'''
Test whether or not an upgrade is available for a given package.
:return:
'''
ref_out = get_test_data('zypper-available.txt')
with patch('salt.modules.zypper.__zypper__', ZyppCallMock(return_value=get_test_data('zypper-available.txt'))):
for pkg_name in ['emacs', 'python']:
self.assertFalse(zypper.upgrade_available(pkg_name))
self.assertTrue(zypper.upgrade_available('vim'))
def test_list_pkgs(self):
'''
Test packages listing.
:return:
'''
def _add_data(data, key, value):
data[key] = value
rpm_out = [
'protobuf-java_|-2.6.1_|-3.1.develHead_|-',
'yast2-ftp-server_|-3.1.8_|-8.1_|-',
'jose4j_|-0.4.4_|-2.1.develHead_|-',
'apache-commons-cli_|-1.2_|-1.233_|-',
'jakarta-commons-discovery_|-0.4_|-129.686_|-',
'susemanager-build-keys-web_|-12.0_|-5.1.develHead_|-',
]
with patch.dict(zypper.__salt__, {'cmd.run': MagicMock(return_value=os.linesep.join(rpm_out))}):
with patch.dict(zypper.__salt__, {'pkg_resource.add_pkg': _add_data}):
with patch.dict(zypper.__salt__, {'pkg_resource.sort_pkglist': MagicMock()}):
with patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}):
pkgs = zypper.list_pkgs()
for pkg_name, pkg_version in {
'jakarta-commons-discovery': '0.4-129.686',
'yast2-ftp-server': '3.1.8-8.1',
'protobuf-java': '2.6.1-3.1.develHead',
'susemanager-build-keys-web': '12.0-5.1.develHead',
'apache-commons-cli': '1.2-1.233',
'jose4j': '0.4.4-2.1.develHead'}.items():
self.assertTrue(pkgs.get(pkg_name))
self.assertEqual(pkgs[pkg_name], pkg_version)
def test_download(self):
'''
Test package download
:return:
'''
download_out = {
'stdout': get_test_data('zypper-download.xml'),
'stderr': None,
'retcode': 0
}
test_out = {
'nmap': {
'repository-alias': u'SLE-12-x86_64-Pool',
'repository-name': u'SLE-12-x86_64-Pool'
}
}
with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=download_out)}):
with patch.dict(zypper.__salt__, {'lowpkg.checksum': MagicMock(return_value=True)}):
self.assertEqual(zypper.download("nmap"), test_out)
test_out['_error'] = "The following package(s) failed to download: foo"
self.assertEqual(zypper.download("nmap", "foo"), test_out)
def test_remove_purge(self):
'''
Test package removal
:return:
'''
class ListPackages(object):
def __init__(self):
self._packages = ['vim', 'pico']
self._pkgs = {
'vim': '0.18.0',
'emacs': '24.0.1',
'pico': '0.1.1',
}
def __call__(self):
pkgs = self._pkgs.copy()
for target in self._packages:
if self._pkgs.get(target):
del self._pkgs[target]
return pkgs
parsed_targets = [{'vim': None, 'pico': None}, None]
cmd_out = {
'retcode': 0,
'stdout': '',
'stderr': ''
}
with patch.dict(zypper.__salt__, {'cmd.run_all': MagicMock(return_value=cmd_out)}):
with patch.dict(zypper.__salt__, {'pkg_resource.parse_targets': MagicMock(return_value=parsed_targets)}):
with patch.dict(zypper.__salt__, {'pkg_resource.stringify': MagicMock()}):
with patch('salt.modules.zypper.list_pkgs', ListPackages()):
diff = zypper.remove(name='vim,pico')
for pkg_name in ['vim', 'pico']:
self.assertTrue(diff.get(pkg_name))
self.assertTrue(diff[pkg_name]['old'])
self.assertFalse(diff[pkg_name]['new'])
def test_repo_value_info(self):
'''
Tests if repo info is properly parsed.
:return:
'''
repos_cfg = configparser.ConfigParser()
for cfg in ['zypper-repo-1.cfg', 'zypper-repo-2.cfg']:
repos_cfg.readfp(StringIO.StringIO(get_test_data(cfg)))
for alias in repos_cfg.sections():
r_info = zypper._get_repo_info(alias, repos_cfg=repos_cfg)
self.assertEqual(type(r_info['type']), type(None))
self.assertEqual(type(r_info['enabled']), bool)
self.assertEqual(type(r_info['autorefresh']), bool)
self.assertEqual(type(r_info['baseurl']), str)
self.assertEqual(r_info['type'], None)
self.assertEqual(r_info['enabled'], alias == 'SLE12-SP1-x86_64-Update')
self.assertEqual(r_info['autorefresh'], alias == 'SLE12-SP1-x86_64-Update')
def test_repo_add_nomod_noref(self):
'''
Test mod_repo adds the new repo and nothing else
:return:
'''
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
url = self.new_repo_config['url']
name = self.new_repo_config['name']
with zypper_patcher:
zypper.mod_repo(name, **{'url': url})
self.assertEqual(
zypper.__zypper__.xml.call.call_args_list,
[call('ar', url, name)]
)
zypper.__zypper__.refreshable.xml.call.assert_not_called()
def test_repo_noadd_nomod_noref(self):
'''
Test mod_repo detects the repo already exists,
no modification was requested and no refresh requested either
:return:
'''
url = self.new_repo_config['url']
name = self.new_repo_config['name']
self.zypper_patcher_config['_get_configured_repos'] = Mock(
**{'return_value.sections.return_value': [name]}
)
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
with zypper_patcher:
with self.assertRaisesRegexp(
Exception,
'Specified arguments did not result in modification of repo'
):
zypper.mod_repo(name, **{'url': url})
with self.assertRaisesRegexp(
Exception,
'Specified arguments did not result in modification of repo'
):
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': 'a'})
zypper.__zypper__.xml.call.assert_not_called()
zypper.__zypper__.refreshable.xml.call.assert_not_called()
def test_repo_add_mod_noref(self):
'''
Test mod_repo adds the new repo and call modify to update autorefresh
:return:
'''
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
url = self.new_repo_config['url']
name = self.new_repo_config['name']
with zypper_patcher:
zypper.mod_repo(name, **{'url': url, 'refresh': True})
self.assertEqual(
zypper.__zypper__.xml.call.call_args_list,
[call('ar', url, name)]
)
zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
'mr', '--refresh', name
)
def test_repo_noadd_mod_noref(self):
'''
Test mod_repo detects the repository exists,
calls modify to update 'autorefresh' but does not call refresh
:return:
'''
url = self.new_repo_config['url']
name = self.new_repo_config['name']
self.zypper_patcher_config['_get_configured_repos'] = Mock(
**{'return_value.sections.return_value': [name]})
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
with zypper_patcher:
zypper.mod_repo(name, **{'url': url, 'refresh': True})
zypper.__zypper__.xml.call.assert_not_called()
zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
'mr', '--refresh', name
)
def test_repo_add_nomod_ref(self):
'''
Test mod_repo adds the new repo and refreshes the repo with
`zypper --gpg-auto-import-keys refresh <repo-name>`
:return:
'''
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
url = self.new_repo_config['url']
name = self.new_repo_config['name']
with zypper_patcher:
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
self.assertEqual(
zypper.__zypper__.xml.call.call_args_list,
[
call('ar', url, name),
call('--gpg-auto-import-keys', 'refresh', name)
]
)
zypper.__zypper__.refreshable.xml.call.assert_not_called()
def test_repo_noadd_nomod_ref(self):
'''
Test mod_repo detects the repo already exists,
has nothing to modify and refreshes the repo with
`zypper --gpg-auto-import-keys refresh <repo-name>`
:return:
'''
url = self.new_repo_config['url']
name = self.new_repo_config['name']
self.zypper_patcher_config['_get_configured_repos'] = Mock(
**{'return_value.sections.return_value': [name]}
)
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
with zypper_patcher:
zypper.mod_repo(name, **{'url': url, 'gpgautoimport': True})
self.assertEqual(
zypper.__zypper__.xml.call.call_args_list,
[call('--gpg-auto-import-keys', 'refresh', name)]
)
zypper.__zypper__.refreshable.xml.call.assert_not_called()
def test_repo_add_mod_ref(self):
'''
Test mod_repo adds the new repo,
calls modify to update 'autorefresh' and refreshes the repo with
`zypper --gpg-auto-import-keys refresh <repo-name>`
:return:
'''
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
url = self.new_repo_config['url']
name = self.new_repo_config['name']
with zypper_patcher:
zypper.mod_repo(
name,
**{'url': url, 'refresh': True, 'gpgautoimport': True}
)
self.assertEqual(
zypper.__zypper__.xml.call.call_args_list,
[
call('ar', url, name),
call('--gpg-auto-import-keys', 'refresh', name)
]
)
zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
'--gpg-auto-import-keys', 'mr', '--refresh', name
)
def test_repo_noadd_mod_ref(self):
'''
Test mod_repo detects the repo already exists,
calls modify to update 'autorefresh' and refreshes the repo with
`zypper --gpg-auto-import-keys refresh <repo-name>`
:return:
'''
url = self.new_repo_config['url']
name = self.new_repo_config['name']
self.zypper_patcher_config['_get_configured_repos'] = Mock(
**{'return_value.sections.return_value': [name]}
)
zypper_patcher = patch.multiple(
'salt.modules.zypper', **self.zypper_patcher_config)
with zypper_patcher:
zypper.mod_repo(
name,
**{'url': url, 'refresh': True, 'gpgautoimport': True}
)
self.assertEqual(
zypper.__zypper__.xml.call.call_args_list,
[call('--gpg-auto-import-keys', 'refresh', name)]
)
zypper.__zypper__.refreshable.xml.call.assert_called_once_with(
'--gpg-auto-import-keys', 'mr', '--refresh', name
)
if __name__ == '__main__':
from integration import run_tests
run_tests(ZypperTestCase, needs_daemon=False)
| {
"content_hash": "c90c8d9ce39372b06cc1fea9d04e287b",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 119,
"avg_line_length": 41.83536585365854,
"alnum_prop": 0.540008745080892,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "308bf9429a5ad0c6a75b4b6ded1e5459a9efdb89",
"size": "27468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/unit/modules/zypper_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
import unittest as ut
from ctypes import windll, POINTER, byref, HRESULT
from comtypes import IUnknown, STDMETHOD, GUID
# XXX leaks references!
def method_count(interface):
return sum([len(base.__dict__.get("_methods_", ()))
for base in interface.__mro__])
class BasicTest(ut.TestCase):
def test_IUnknown(self):
from comtypes import IUnknown
self.failUnlessEqual(method_count(IUnknown), 3)
def test_release(self):
POINTER(IUnknown)()
def test_refcounts(self):
p = POINTER(IUnknown)()
windll.oleaut32.CreateTypeLib2(1, u"blabla", byref(p))
# initial refcount is 2
for i in range(2, 10):
self.failUnlessEqual(p.AddRef(), i)
for i in range(8, 0, -1):
self.failUnlessEqual(p.Release(), i)
def test_qi(self):
p = POINTER(IUnknown)()
windll.oleaut32.CreateTypeLib2(1, u"blabla", byref(p))
self.failUnlessEqual(p.AddRef(), 2)
self.failUnlessEqual(p.Release(), 1)
other = p.QueryInterface(IUnknown)
self.failUnlessEqual(other.AddRef(), 3)
self.failUnlessEqual(p.AddRef(), 4)
self.failUnlessEqual(p.Release(), 3)
self.failUnlessEqual(other.Release(), 2)
del p # calls p.Release()
self.failUnlessEqual(other.AddRef(), 2)
self.failUnlessEqual(other.Release(), 1)
def test_derived(self):
# XXX leaks 50 refs
self.failUnlessEqual(method_count(IUnknown), 3)
class IMyInterface(IUnknown):
pass
self.failUnlessEqual(method_count(IMyInterface), 3)
# assigning _methods_ does not work until we have an _iid_!
self.assertRaises(AttributeError,
setattr, IMyInterface, "_methods_", [])
IMyInterface._iid_ = GUID.create_new()
IMyInterface._methods_ = []
self.failUnlessEqual(method_count(IMyInterface), 3)
IMyInterface._methods_ = [
STDMETHOD(HRESULT, "Blah", [])]
self.failUnlessEqual(method_count(IMyInterface), 4)
def test_heirarchy(self):
class IMyInterface(IUnknown):
pass
self.failUnless(issubclass(IMyInterface, IUnknown))
self.failUnless(issubclass(POINTER(IMyInterface), POINTER(IUnknown)))
def test_mro(self):
mro = POINTER(IUnknown).__mro__
self.failUnlessEqual(mro[0], POINTER(IUnknown))
self.failUnlessEqual(mro[1], IUnknown)
# the IUnknown class has the actual methods:
self.failUnless(IUnknown.__dict__.get("QueryInterface"))
# but we can call it on the pointer instance
POINTER(IUnknown).QueryInterface
def test_make_methods(self):
# XXX leaks 53 refs
class IBase(IUnknown):
_iid_ = GUID.create_new()
class IDerived(IBase):
_iid_ = GUID.create_new()
# We cannot assign _methods_ to IDerived before IBase has it's _methods_:
self.assertRaises(TypeError, lambda: setattr(IDerived, "_methods_", []))
# Make sure that setting _methods_ failed completely.
self.assertRaises(KeyError, lambda: IDerived.__dict__["_methods_"])
IBase._methods_ = []
# Now it works:
IDerived._methods_ = []
def test_identity(self):
# COM indentity rules
# these should be identical
a = POINTER(IUnknown)()
b = POINTER(IUnknown)()
self.failUnlessEqual(a, b)
self.failUnlessEqual(hash(a), hash(b))
from comtypes.typeinfo import CreateTypeLib
# we do not save the lib, so no file will be created.
# these should NOT be identical
a = CreateTypeLib(u"blahblah")
b = CreateTypeLib(u"spam")
self.failIfEqual(a, b)
self.failIfEqual(hash(a), hash(b))
a = a.QueryInterface(IUnknown)
b = b.QueryInterface(IUnknown)
self.failIfEqual(a, b)
self.failIfEqual(hash(a), hash(b))
# These must be identical
c = a.QueryInterface(IUnknown)
self.failUnlessEqual(a, c)
self.failUnlessEqual(hash(a), hash(c))
def main():
ut.main()
if __name__ == "__main__":
main()
| {
"content_hash": "0b198699f94acc90205337a47b0a4c1e",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 81,
"avg_line_length": 32.50375939849624,
"alnum_prop": 0.5896368262780477,
"repo_name": "ezarko/cfn-init",
"id": "6aa8a44b4731e76f3defb0302af34005eb1a1e2f",
"size": "4336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comtypes/test/test_basic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "539671"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='genesisclient',
version='0.0.8',
description='Genesis (DeStatis et. al.) client for Python',
author='Marian Steinbach',
author_email='[email protected]',
url='https://github.com/marians/genesisclient',
license="MIT",
packages=['genesisclient'],
install_requires=[
'lxml',
'suds-py3'],
entry_points={
'console_scripts': [
'genesiscl = genesisclient:main'
]
}
)
| {
"content_hash": "d8806c97e25543519cdcb9b10db71855",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 26.63157894736842,
"alnum_prop": 0.5810276679841897,
"repo_name": "marians/genesisclient",
"id": "daf1881060768355cb5bbbec5ceb56a13629ff29",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25183"
}
],
"symlink_target": ""
} |
import argparse
import json
import logging
import os
import pickle
import platform
import re
import socket
import subprocess
import sys
import tempfile
import shlex
from shutil import which, move
default_compiler = "gcc"
cstub = "{headers}\nint main(int argc, char **argv){{\n {fragment} return 0;\n}}"
log_disclaimer = "This file was generated by Soletta's dependency resolver script to help debugging configuration issues."
class DepContext:
def __init__(self, config_log):
self.kconfig = {}
self.makefile_vars = {}
self.log_file = config_log
self.logger = None
def add_kconfig(self, k, t, v):
self.kconfig[k] = {"value": v, "type": t}
self.debug("Adding kconfig entry: %s %s %s", k, t, v)
def get_kconfig(self):
return self.kconfig
def add_makefile_var(self, k, v, attrib, overwrite):
curr = self.makefile_vars.get(k)
curr_val = v
if curr and curr["attrib"] == attrib:
if overwrite:
curr_val = v
else:
curr_val = "%s %s" % (curr["value"], v)
self.makefile_vars[k] = {"value": curr_val, "attrib": attrib}
self.debug("Adding Makefile.gen entry: %s %s %s", k, attrib, curr_val)
def add_append_makefile_var(self, k, v, overwrite=False):
self.add_makefile_var(k, v, "+=", overwrite)
def add_cond_makefile_var(self, k, v, overwrite=False):
self.add_makefile_var(k, v, "?=", overwrite)
def get_makefile_vars(self):
return self.makefile_vars
def find_makefile_var(self, v):
var = self.makefile_vars.get(v)
if var:
return var["value"]
return ""
def __getstate__(self):
d = self.__dict__.copy()
if 'logger' in d.keys():
d['logger'] = d['logger'].name
return d
def __setstate__(self, d):
if 'logger' in d.keys():
d['logger'] = logging.getLogger(d['logger'])
self.__dict__.update(d)
def logger_init(self):
if self.logger:
return
if os.path.isfile(self.log_file):
move(self.log_file, "%s.old" % self.log_file)
self.logger = logging.getLogger("dependency-resolver")
self.logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.log_file)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
def debug(self, msg, *args, **kargs):
self.logger_init()
self.logger.debug(msg, *args, **kargs)
def info(self, msg, *args, **kargs):
self.logger_init()
self.logger.info(msg, *args, **kargs)
def run_command(cmd, context):
try:
context.debug("Command: %s", cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True, universal_newlines=True)
context.debug("Command output(stdout):\n%s", output if output else "None")
return output.replace("\n", "").strip(), True
except subprocess.CalledProcessError as e:
context.debug("exit code: %s", e.returncode)
context.debug("Command output(stderr):\n%s", e.output)
return e.output, False
def handle_pkgconfig_check(args, conf, context):
dep = conf["dependency"].upper()
pkg = conf["pkgname"]
atleast_ver = conf.get("atleast-version")
max_ver = conf.get("max-version")
exact_ver = conf.get("exact-version")
ver_match = True
if exact_ver:
cmd = "%s --exact-version=%s %s" % (args.pkg_config, exact_ver, pkg)
result, status = run_command(cmd, context)
if not status:
ver_match = False
elif atleast_ver:
cmd = "%s --atleast-version=%s %s" % (args.pkg_config, atleast_ver, pkg)
result, status = run_command(cmd, context)
if not status:
ver_match = False
elif max_ver:
cmd = "%s --max-version=%s %s" % (args.pkg_config, max_ver, pkg)
result, status = run_command(cmd, context)
if not status:
ver_match = False
cflags_stat = None
ldflags_stat = None
if ver_match:
cflags_cmd = "%s --cflags %s" % (args.pkg_config, pkg)
ldflags_cmd = "%s --libs %s" % (args.pkg_config, pkg)
cflags, cflags_stat = run_command(cflags_cmd, context)
ldflags, ldflags_stat = run_command(ldflags_cmd, context)
if cflags_stat:
context.add_cond_makefile_var("%s_CFLAGS" % dep, cflags)
if ldflags_stat:
context.add_cond_makefile_var("%s_LDFLAGS" % dep, ldflags)
if cflags_stat or ldflags_stat:
context.add_cond_makefile_var("%s_REQUIRES_PRIVATE" % dep, pkg)
success = (cflags_stat or ldflags_stat) and ver_match
have_var = "y" if success else "n"
context.add_kconfig("HAVE_%s" % dep, "bool", have_var)
return success
def compile_test(source, compiler, cflags, ldflags):
f = tempfile.NamedTemporaryFile(suffix=".c",delete=False)
f.write(bytes(source, 'UTF-8'))
f.close()
output = "%s-bin" % f.name
cmd = "{compiler} {cflags} {src} -o {out} {ldflags}".format(compiler=compiler,
cflags=cflags, ldflags=ldflags or "", src=f.name, out=output)
context.debug("Compiling source code:\n%s", source)
out, status = run_command(cmd, context)
if os.path.exists(output):
os.unlink(output)
os.unlink(f.name)
return status
def set_makefile_compflags(flags, prefix, suffix):
append_to = flags.get("append_to")
flag_value = flags.get("value")
if not flag_value:
return
if append_to:
context.add_append_makefile_var(append_to, flag_value)
else:
context.add_cond_makefile_var("%s_%s" % (prefix, suffix),
flag_value)
def handle_ccode_check(args, conf, context):
dep = conf["dependency"].upper()
source = ""
cflags = conf.get("cflags", {})
ldflags = conf.get("ldflags", {})
defines = conf.get("defines", [])
headers = conf.get("headers", [])
for define in defines:
source += "#define %s\n" % define
for header in headers:
source += "#include %s\n" % header
common_cflags = context.find_makefile_var(args.common_cflags_var)
common_ldflags = context.find_makefile_var(args.common_ldflags_var)
test_cflags = (cflags.get("value", ""), args.cflags, common_cflags)
test_ldflags = (ldflags.get("value", ""), args.ldflags, common_ldflags)
fragment = conf.get("fragment") or ""
source = cstub.format(headers=source, fragment=fragment)
success = compile_test(source, args.compiler, (" ").join(test_cflags),
(" ").join(test_ldflags))
if success:
context.add_kconfig("HAVE_%s" % dep, "bool", "y")
if cflags:
set_makefile_compflags(cflags, dep, "CFLAGS")
if ldflags:
set_makefile_compflags(ldflags, dep, "LDFLAGS")
else:
context.add_kconfig("HAVE_%s" % dep, "bool", "n")
return success
def handle_exec_check(args, conf, context):
dep = conf.get("dependency")
dep_sym = dep.upper()
exe = conf.get("exec")
atleast_ver = conf.get("atleast-version")
max_ver = conf.get("max-version")
exact_ver = conf.get("exact-version")
cmd = conf.get("version-command")
ver_match = True
version = None
if not exe:
context.info("Could not parse dependency: %s, no exec was specified.", dep)
exit(1)
path = which(exe)
required = conf.get("required")
if exact_ver or max_ver or atleast_ver:
if not cmd:
context.info("Could not parse dependency: %s, version requested "
"but no version-command to fetch it was specified.", dep)
exit(1)
result, status = run_command(cmd, context)
if not status:
ver_match = False
elif (exact_ver and result != exact_ver) or \
(max_ver and result > max_ver) or \
(atleast_ver and result < atleast_ver):
version = result
ver_match = False
if required:
if not path:
req_label = context.find_makefile_var("NOT_FOUND")
req_label += "executable: %s" % exe
context.add_append_makefile_var("NOT_FOUND", req_label)
elif not ver_match:
req_label = context.find_makefile_var("NOT_FOUND")
req_label += "%s version: %s" % (exe, version)
context.add_append_makefile_var("NOT_FOUND", req_label)
context.add_cond_makefile_var(dep_sym, path)
success = bool(path) and ver_match
if success:
dir_path = os.path.dirname(os.path.realpath("%s" % path))
context.add_cond_makefile_var("%s_DIR" % dep_sym, dir_path)
context.add_kconfig("HAVE_%s" % dep_sym, "bool", "y" if success else "n")
return success
def handle_python_check(args, conf, context):
dep = conf.get("dependency")
required = conf.get("required", False)
pkgname = conf.get("pkgname")
if not pkgname:
context.info("Could not parse dependency: %s, no pkgname specified.", dep)
exit(1)
source = "import %s" % pkgname
f = tempfile.NamedTemporaryFile(suffix=".py",delete=False)
f.write(bytes(source, 'UTF-8'))
f.close()
cmd = "%s %s" % (sys.executable, f.name)
context.debug("Testing python code:\n%s", source)
output, status = run_command(cmd, context)
success = bool(status)
if required and not success:
req_label = context.find_makefile_var("NOT_FOUND")
req_label += "python%s module: %s\\n" % (sys.version_info[0], pkgname)
context.add_append_makefile_var("NOT_FOUND", req_label, True)
context.add_cond_makefile_var("HAVE_PYTHON_%s" % dep.upper(), "y" if success else "n")
os.unlink(f.name)
return success
def test_file_path(path, files):
files_len = len(files)
for curr in files:
exists = os.path.exists(os.path.join(path, curr))
context.debug("Lookingup file: %r at %r: %s" % (curr, path, exists))
if exists:
files_len = files_len - 1
return files_len == 0
def handle_filesystem_check(args, conf, context):
dep = conf.get("dependency")
files = conf.get("files", [])
path = conf.get("path", {})
variables = dict(os.environ)
variables['TOP_SRCDIR'] = os.getcwd()
for k,v in context.makefile_vars.items():
variables[k] = v["value"]
dest = path
vars_expand(dest, variables, len(dest))
found_path = None
for k,v in path.items():
curr_path = variables.get(k)
if not curr_path:
context.debug("Variable $%s is not set", v)
continue
r = test_file_path(curr_path, files)
if r:
found_path = os.path.abspath(curr_path)
break
if found_path:
context.add_kconfig("HAVE_%s" % dep.upper(), "bool", "y")
context.add_cond_makefile_var("%s_PATH" % dep.upper(), found_path)
return True
else:
context.add_kconfig("HAVE_%s" % dep.upper(), "bool", "n")
return False
def handle_flags_check(args, conf, context, cflags, ldflags):
append_to = conf.get("append_to")
source = cstub.format(headers="", fragment="(void)argc; (void)argv;")
if not cflags and not ldflags:
context.info("Neither cflags nor ldflags provided to flags_check.")
exit(1)
if not cflags:
cflags = []
if not ldflags:
ldflags = []
def flags_compile(local_cflags, local_ldflags):
"""Helper to compile given empty source using the args.compiler
and given local CFLAGS and LDFLAGS, prepending -Werror.
"""
return compile_test(source, args.compiler,
" ".join(["-Werror"] + local_cflags),
" ".join(local_ldflags))
if flags_compile(cflags, ldflags):
context.add_append_makefile_var(append_to,
" ".join(cflags + ldflags))
return True
supported_cflags = []
for ci in cflags:
# must acumulate the tested one so we handle dependent flags like -Wformat*
if flags_compile(supported_cflags + [ci], []):
supported_cflags.append(ci)
supported_ldflags = []
for cl in ldflags:
if flags_compile(supported_ldflags + [cl], []):
supported_ldflags.append(cl)
if supported_cflags or supported_ldflags:
context.add_append_makefile_var(append_to,
" ".join(supported_cflags + supported_ldflags))
return True
return False
def handle_cflags_check(args, conf, context):
return handle_flags_check(args, conf, context, conf.get("cflags"), None)
def handle_ldflags_check(args, conf, context):
return handle_flags_check(args, conf, context, None, conf.get("ldflags"))
type_handlers = {
"pkg-config": handle_pkgconfig_check,
"ccode": handle_ccode_check,
"exec": handle_exec_check,
"python": handle_python_check,
"cflags": handle_cflags_check,
"ldflags": handle_ldflags_check,
"filesystem": handle_filesystem_check,
}
def format_makefile_var(items):
output = ""
for k,v in sorted(items):
if not v or not v["value"]: continue
output += "%s %s %s\n" % (k, v["attrib"], v["value"].replace('#', '\\#'))
return output
def makefile_gen(args, context):
output = format_makefile_var(context.get_makefile_vars().items())
f = open(args.makefile_output, "w+")
f.write(output)
f.close()
def kconfig_gen(args, context):
output = ""
for k,v in sorted(context.get_kconfig().items()):
output += "config {config}\n{indent}{ktype}\n{indent}default {enabled}\n". \
format(config=k, indent=" ", ktype=v["type"], enabled=v["value"])
f = open(args.kconfig_output, "w+")
f.write(output)
f.close()
def is_verbose():
flag = os.environ.get("V")
if not flag:
return False
try:
flag = int(flag) != 0
except ValueError:
flag = flag.lower() in ("true", "yes", "on", "enabled")
finally:
return flag
def run(args, dep_checks, context):
verbose = is_verbose()
for dep in dep_checks:
context.debug("Testing dependency: %s, type: %s", dep["dependency"], dep["type"])
handler = type_handlers.get(dep["type"])
if not handler:
context.info("Parsing %s.", args.dep_config.name)
context.info("Invalid type: %s at: %s", dep["type"], dep)
exit(1)
result = handler(args, dep, context)
context.debug("##########################################################")
if verbose:
s = "Checking for %s%s... %s" % (dep["dependency"],
" (optional)" if not dep.get("required") else "",
"found." if result else "not found.")
context.info(s)
def vars_expand(origin, dest, maxrec):
remaining = {}
if not maxrec:
return
for k,v in origin.items():
if not isinstance(v, str):
dest[k] = v
continue
try:
dest[k] = re.sub("//*", "/", (v.format(**dest)))
except KeyError:
remaining[k] = v
if remaining:
vars_expand(remaining, dest, maxrec - 1)
def cache_persist(args, context):
cache = open(args.cache, "wb")
pickle.dump(context, cache, pickle.HIGHEST_PROTOCOL)
cache.close()
def log_environment(context):
context.debug("%s\n", log_disclaimer)
context.debug("## -------- ##")
context.debug("## Platform ##")
context.debug("## -------- ##\n\n")
uname = os.uname()
context.debug("hostname = %s", socket.gethostname())
context.debug("uname -m = %s", uname.machine)
context.debug("uname -r = %s", uname.release)
context.debug("uname -s = %s", uname.sysname)
context.debug("uname -v = %s\n", uname.version)
processor = platform.processor()
context.debug("uname -p = %s\n", processor if processor else "unknown")
context.debug("## ----------- ##")
context.debug("## Environment ##")
context.debug("## ----------- ##\n\n")
for k,v in sorted(os.environ.items()):
context.debug("%s = %s", k, v)
context.debug("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--compiler", help="The gcc compiler[for headers based tests]",
type=str, default=default_compiler)
parser.add_argument("--cflags", help="Additional cflags[for headers based tests]",
type=str, default="")
parser.add_argument("--ldflags", help="Additional/environment ldflags",
type=str, default="")
parser.add_argument("--pkg-config", help="What to use for pkg-config",
type=str, default="pkg-config")
parser.add_argument("--kconfig-output", help="The kconfig fragment output file",
type=str, default="Kconfig.gen")
parser.add_argument("--makefile-output", help="The makefile fragment output file",
type=str, default="Makefile.gen")
parser.add_argument("--dep-config", help="The dependencies config file",
type=argparse.FileType("r"), default="data/jsons/dependencies.json")
parser.add_argument("--common-cflags-var", help=("The makefile variable to "
"group common cflags"),
type=str, default="COMMON_CFLAGS")
parser.add_argument("--common-ldflags-var", help=("The makefile variable to "
"group common ldflags"),
type=str, default="COMMON_LDFLAGS")
parser.add_argument("--cache", help="The configuration cache.", type=str,
default=".config-cache")
parser.add_argument("--makefile-gen", help="Should generate Makefile.gen?",
action="store_true")
parser.add_argument("--kconfig-gen", help="Should generate Kconfig.gen?",
action="store_true")
parser.add_argument("--config-log", help="Config log file", type=str, default="config.log")
args = parser.parse_args()
if args.compiler != default_compiler and not which(shlex.split(args.compiler)[0]):
print("ERROR: Invalid --compiler argument, no such file: %s" % args.compiler)
exit(1)
context = None
conf = json.loads(args.dep_config.read())
if os.path.isfile(args.cache):
cache = open(args.cache, "rb")
context = pickle.load(cache)
cache.close()
else:
dep_checks = conf.get("dependencies")
pre_checks = conf.get("pre-dependencies")
context = DepContext(args.config_log)
log_environment(context)
context.debug("## ---------- ##")
context.debug("## Core tests ##")
context.debug("## ---------- ##\n\n")
run(args, pre_checks, context)
context.debug("## ---------------- ##")
context.debug("## Dependency tests ##")
context.debug("## ---------------- ##\n\n")
run(args, dep_checks, context)
cache_persist(args, context)
if args.makefile_gen:
makefile_gen(args, context)
cache_persist(args, context)
if args.kconfig_gen:
kconfig_gen(args, context)
| {
"content_hash": "ef89d500b51c63b5116cf18b443e2a5e",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 122,
"avg_line_length": 33.57410562180579,
"alnum_prop": 0.5784960422163589,
"repo_name": "zolkis/soletta",
"id": "7e7299086bb86451d51178a0d60882fcee41dfca",
"size": "20385",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "data/scripts/dependency-resolver.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3344"
},
{
"name": "C",
"bytes": "4649538"
},
{
"name": "C++",
"bytes": "104247"
},
{
"name": "JavaScript",
"bytes": "47811"
},
{
"name": "Makefile",
"bytes": "53636"
},
{
"name": "NSIS",
"bytes": "1385"
},
{
"name": "Python",
"bytes": "214601"
},
{
"name": "Shell",
"bytes": "7942"
},
{
"name": "Smarty",
"bytes": "1145"
},
{
"name": "VimL",
"bytes": "748"
}
],
"symlink_target": ""
} |
import pygame
import time
#import thread
import fmuglobals
if fmuglobals.RUN_ON_RASPBERRY_PI:
import RPi.GPIO as GPIO
"""
AnalogButtons
"""
class AnalogButtons:
def __init__(self):
if fmuglobals.RUN_ON_RASPBERRY_PI:
GPIO.setmode(GPIO.BCM)
#import rotary_encoder
#self.encoder = rotary_encoder.RotaryEncoder(4,15)
#from rotary_class import RotaryEncoder
#self.encoder = RotaryEncoder(4,15,callback=self.encoder_event)
self.buttons = [
{ 'pin': 22, 'key': pygame.K_UP, 'callback': self.on_up_click},
{ 'pin': 23, 'key': pygame.K_RIGHT, 'callback': self.on_right_click},
{ 'pin': 17, 'key': pygame.K_DOWN, 'callback': self.on_down_click},
{ 'pin': 14, 'key': pygame.K_LEFT, 'callback': self.on_left_click},
{ 'pin': 27, 'key': pygame.K_RETURN, 'callback': self.on_return_click},
]
if fmuglobals.RUN_ON_RASPBERRY_PI:
for btn in self.buttons:
GPIO.setup( btn['pin'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(btn['pin'], GPIO.FALLING, callback=btn['callback'], bouncetime=20)
#self.startListener()
def encoder_event(self,event):
if event == self.encoder.CLOCKWISE:
#print "Clockwise"
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_DOWN, unicode=None, mod=None))
elif event == self.encoder.ANTICLOCKWISE:
#print "Anticlockwise"
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_UP, unicode=None, mod=None))
#elif event == self.encoder.BUTTONDOWN:
# print "Button down"
#elif event == self.encoder.BUTTONUP:
# print "Button up"
return
#def on_click(self, btn):
# print 'GPIO ' + str(btn) + ' clicked'
# pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=btn['key'], unicode=None, mod=None))
def on_left_click(self, btn):
while GPIO.input(btn) == GPIO.LOW:
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_LEFT, unicode=None, mod=None))
time.sleep(0.18)
def on_up_click(self, btn):
while GPIO.input(btn) == GPIO.LOW:
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_UP, unicode=None, mod=None))
time.sleep(0.18)
def on_down_click(self, btn):
while GPIO.input(btn) == GPIO.LOW:
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_DOWN, unicode=None, mod=None))
time.sleep(0.18)
def on_right_click(self, btn):
while GPIO.input(btn) == GPIO.LOW:
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_RIGHT, unicode=None, mod=None))
time.sleep(0.18)
def on_return_click(self, btn):
while GPIO.input(btn) == GPIO.LOW:
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_RETURN, unicode=None, mod=None))
time.sleep(0.18)
def startListener(self):
if fmuglobals.RUN_ON_RASPBERRY_PI:
try:
thread.start_new_thread( self.check_buttons, ())
except:
print "Error: AnalogButtons unable to start thread"
else:
try:
thread.start_new_thread( self.check_buttons_desktop, ())
except:
print "Error: AnalogButtons unable to start thread"
def check_buttons(self):
while 1:
#delta = self.encoder.get_delta()
#if delta != 0:
# if delta > 0:
# pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_DOWN, unicode=None, mod=None))
# else:
# pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=pygame.K_UP, unicode=None, mod=None))
#else:
for btn in self.buttons:
if GPIO.input(btn['pin']) == False:
#print 'GPIO ' + str(btn['pin']) + ' pressed'
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, key=btn['key'], unicode=None, mod=None))
time.sleep(.3)
break
#time.sleep(.01)
def check_buttons_desktop(self):
while 1:
for btn in self.buttons:
pass
time.sleep(.08)
| {
"content_hash": "9d4dd609ff6bb70f4f8e2f1e4069bbbe",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 102,
"avg_line_length": 32.495726495726494,
"alnum_prop": 0.6685954760652288,
"repo_name": "jwayneroth/mpd-touch",
"id": "c45c964ec6acd497d6e5583987cbb28a1d04c9a7",
"size": "3820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/gpio-buttons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "870833"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
} |
from selenium.webdriver.remote.remote_connection import RemoteConnection
class ChromiumRemoteConnection(RemoteConnection):
def __init__(self, remote_server_addr, keep_alive=True):
RemoteConnection.__init__(self, remote_server_addr, keep_alive)
self._commands["launchApp"] = ('POST', '/session/$sessionId/chromium/launch_app')
self._commands["setNetworkConditions"] = ('POST', '/session/$sessionId/chromium/network_conditions')
self._commands["getNetworkConditions"] = ('GET', '/session/$sessionId/chromium/network_conditions')
self._commands['executeCdpCommand'] = ('POST', '/session/$sessionId/goog/cdp/execute')
self._commands['getSinks'] = ('GET', '/session/$sessionId/goog/cast/get_sinks')
self._commands['getIssueMessage'] = ('GET', '/session/$sessionId/goog/cast/get_issue_message')
self._commands['setSinkToUse'] = ('POST', '/session/$sessionId/goog/cast/set_sink_to_use')
self._commands['startTabMirroring'] = ('POST', '/session/$sessionId/goog/cast/start_tab_mirroring')
self._commands['stopCasting'] = ('POST', '/session/$sessionId/goog/cast/stop_casting')
| {
"content_hash": "2e7937078ed42c1e07a71a0bcf10c6d0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 108,
"avg_line_length": 72.125,
"alnum_prop": 0.6906412478336221,
"repo_name": "oddui/selenium",
"id": "71770b02c9c89ea1427b814f50e389ec5b6c8e04",
"size": "1942",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "py/selenium/webdriver/chromium/remote_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "347"
},
{
"name": "C",
"bytes": "47126"
},
{
"name": "C#",
"bytes": "3507419"
},
{
"name": "C++",
"bytes": "2219970"
},
{
"name": "CSS",
"bytes": "11660"
},
{
"name": "HTML",
"bytes": "1625237"
},
{
"name": "Java",
"bytes": "5362660"
},
{
"name": "JavaScript",
"bytes": "3575635"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "1108508"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "809667"
},
{
"name": "Shell",
"bytes": "15964"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
"""Fichier contenant la fonction nb_familiers."""
from fractions import Fraction
from primaires.scripting.fonction import Fonction
class ClasseFonction(Fonction):
"""Retourne le nombre de familiers."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.nb_familiers, "Personnage")
@staticmethod
def nb_familiers(maitre):
"""Retourne le nombre de familiers dont le maître est précisé.
Paramètres à préciser :
* maitre : le maître (un personnage)
"""
familiers = importeur.familier.familiers_de(maitre)
return Fraction(len(familiers))
| {
"content_hash": "464169c69375c0f2e39d7ea86d81f316",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 25,
"alnum_prop": 0.6752,
"repo_name": "stormi/tsunami",
"id": "9ebb83d896078e6a78da5d4663f8199e3c6fb907",
"size": "2194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/familier/fonctions/nb_familiers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import cgi
form = cgi.FieldStorage()
val1=form.getvalue("comments")
print "Content-type:text/html"
print
print "The form input is:<br/>"
print val1
| {
"content_hash": "09c513109d771004457d7e22eca7e27e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 31,
"avg_line_length": 21.142857142857142,
"alnum_prop": 0.75,
"repo_name": "zturchan/CMPUT410-Lab3",
"id": "a64b1169e34d4ff7da19e080f72f4ba8b8a1ead9",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/cgi/test_form.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5712"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import sys
import os
import json
def report(samples, pid):
plt.figure(figsize=(25, 10))
x = [s['t'] for s in samples if s['type'] == 'proc']
lines = [s for s in samples if s['type'] == 'event']
# minuss = min(s['uss'] for s in samples if s['type'] == 'proc')
ussplot = plt.subplot(211)
ussplot.set_title('uss')
ussplot.plot(
x, [s['uss'] for s in samples if s['type'] == 'proc'], '.')
for l in lines:
# ussplot.text(l['t'], minuss, l['event'], horizontalalignment='right',
# rotation=-90, rotation_mode='anchor')
ussplot.axvline(l['t'])
connplot = plt.subplot(212)
connplot.set_title('conn')
connplot.plot(
x, [s['conn'] for s in samples if s['type'] == 'proc'], '.')
os.makedirs('.reports', exist_ok=True)
path = '.reports/{}.png'.format(pid)
plt.savefig(path)
return path
def load(filepath):
samples = []
with open(filepath) as fp:
for line in fp:
line = line.strip()
samples.append(json.loads(line))
return samples
def order(samples):
return sorted(samples, key=lambda x: x['t'])
def normalize_time(samples):
if not samples:
return []
base_time = samples[0]['t']
return [{**s, 't': s['t'] - base_time} for s in samples]
def main():
samples = load(sys.argv[1])
pid, _ = os.path.splitext(os.path.basename(sys.argv[1]))
samples = order(samples)
samples = normalize_time(samples)
report(samples, pid)
if __name__ == '__main__':
main()
| {
"content_hash": "f29ce02c62d96d6d2d8a1c56ca0f245a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 23.220588235294116,
"alnum_prop": 0.5756808106396454,
"repo_name": "squeaky-pl/japronto",
"id": "2906f16e66fb693c8ce8567ed1e7987a648d00db",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "149858"
},
{
"name": "Dockerfile",
"bytes": "97"
},
{
"name": "Go",
"bytes": "668"
},
{
"name": "JavaScript",
"bytes": "387"
},
{
"name": "Lua",
"bytes": "915"
},
{
"name": "Python",
"bytes": "143655"
},
{
"name": "Shell",
"bytes": "2420"
}
],
"symlink_target": ""
} |
"""
Test Manager - Fetch test results.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 101460 $"
# Standard python imports.
import unittest;
# Validation Kit imports.
from common import constants;
from testmanager import config;
from testmanager.core.base import ModelDataBase, ModelLogicBase, ModelDataBaseTestCase, TMExceptionBase, TMTooManyRows;
from testmanager.core.testgroup import TestGroupData
from testmanager.core.build import BuildDataEx
from testmanager.core.testbox import TestBoxData
from testmanager.core.testcase import TestCaseData
from testmanager.core.schedgroup import SchedGroupData
from testmanager.core.systemlog import SystemLogData, SystemLogLogic;
class TestResultData(ModelDataBase):
"""
Test case execution result data
"""
## @name TestStatus_T
# @{
ksTestStatus_Running = 'running';
ksTestStatus_Success = 'success';
ksTestStatus_Skipped = 'skipped';
ksTestStatus_BadTestBox = 'bad-testbox';
ksTestStatus_Aborted = 'aborted';
ksTestStatus_Failure = 'failure';
ksTestStatus_TimedOut = 'timed-out';
ksTestStatus_Rebooted = 'rebooted';
## @}
## List of relatively harmless (to testgroup/case) statuses.
kasHarmlessTestStatuses = [ ksTestStatus_Skipped, ksTestStatus_BadTestBox, ksTestStatus_Aborted, ];
## List of bad statuses.
kasBadTestStatuses = [ ksTestStatus_Failure, ksTestStatus_TimedOut, ksTestStatus_Rebooted, ];
ksIdAttr = 'idTestResult';
ksParam_idTestResult = 'TestResultData_idTestResult';
ksParam_idTestResultParent = 'TestResultData_idTestResultParent';
ksParam_idTestSet = 'TestResultData_idTestSet';
ksParam_tsCreated = 'TestResultData_tsCreated';
ksParam_tsElapsed = 'TestResultData_tsElapsed';
ksParam_idStrName = 'TestResultData_idStrName';
ksParam_cErrors = 'TestResultData_cErrors';
ksParam_enmStatus = 'TestResultData_enmStatus';
ksParam_iNestingDepth = 'TestResultData_iNestingDepth';
kasValidValues_enmStatus = [
ksTestStatus_Running,
ksTestStatus_Success,
ksTestStatus_Skipped,
ksTestStatus_BadTestBox,
ksTestStatus_Aborted,
ksTestStatus_Failure,
ksTestStatus_TimedOut,
ksTestStatus_Rebooted
];
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResult = None
self.idTestResultParent = None
self.idTestSet = None
self.tsCreated = None
self.tsElapsed = None
self.idStrName = None
self.cErrors = 0;
self.enmStatus = None
self.iNestingDepth = None
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResults.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result record not found.')
self.idTestResult = aoRow[0]
self.idTestResultParent = aoRow[1]
self.idTestSet = aoRow[2]
self.tsCreated = aoRow[3]
self.tsElapsed = aoRow[4]
self.idStrName = aoRow[5]
self.cErrors = aoRow[6]
self.enmStatus = aoRow[7]
self.iNestingDepth = aoRow[8]
return self;
def isFailure(self):
""" Check if it's a real failure. """
return self.enmStatus in self.kasBadTestStatuses;
class TestResultDataEx(TestResultData):
"""
Extended test result data class.
This is intended for use as a node in a result tree. This is not intended
for serialization to parameters or vice versa. Use TestResultLogic to
construct the tree.
"""
def __init__(self):
TestResultData.__init__(self)
self.sName = None; # idStrName resolved.
self.oParent = None; # idTestResultParent within the tree.
self.aoChildren = []; # TestResultDataEx;
self.aoValues = []; # TestResultValue;
self.aoMsgs = []; # TestResultMsg;
self.aoFiles = []; # TestResultFile;
def initFromDbRow(self, aoRow):
"""
Initialize from a query like this:
SELECT TestResults.*, TestResultStrTab.sValue
FROM TestResults, TestResultStrTab
WHERE TestResultStrTab.idStr = TestResults.idStrName
Note! The caller is expected to fetch children, values, failure
details, and files.
"""
self.sName = None;
self.oParent = None;
self.aoChildren = [];
self.aoValues = [];
self.aoMsgs = [];
self.aoFiles = [];
TestResultData.initFromDbRow(self, aoRow);
self.sName = aoRow[9];
return self;
class TestResultValueData(ModelDataBase):
"""
Test result value data.
"""
ksIdAttr = 'idTestResultValue';
ksParam_idTestResultValue = 'TestResultValue_idTestResultValue';
ksParam_idTestResult = 'TestResultValue_idTestResult';
ksParam_idTestSet = 'TestResultValue_idTestSet';
ksParam_tsCreated = 'TestResultValue_tsCreated';
ksParam_idStrName = 'TestResultValue_idStrName';
ksParam_lValue = 'TestResultValue_lValue';
ksParam_iUnit = 'TestResultValue_iUnit';
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResultValue = None;
self.idTestResult = None;
self.idTestSet = None;
self.tsCreated = None;
self.idStrName = None;
self.lValue = None;
self.iUnit = 0;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResultValues.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result value record not found.')
self.idTestResultValue = aoRow[0];
self.idTestResult = aoRow[1];
self.idTestSet = aoRow[2];
self.tsCreated = aoRow[3];
self.idStrName = aoRow[4];
self.lValue = aoRow[5];
self.iUnit = aoRow[6];
return self;
class TestResultValueDataEx(TestResultValueData):
"""
Extends TestResultValue by resolving the value name and unit string.
"""
def __init__(self):
TestResultValueData.__init__(self)
self.sName = None;
self.sUnit = '';
def initFromDbRow(self, aoRow):
"""
Reinitialize from a query like this:
SELECT TestResultValues.*, TestResultStrTab.sValue
FROM TestResultValues, TestResultStrTab
WHERE TestResultStrTab.idStr = TestResultValues.idStrName
Return self. Raises exception if no row.
"""
TestResultValueData.initFromDbRow(self, aoRow);
self.sName = aoRow[7];
if self.iUnit < len(constants.valueunit.g_asNames):
self.sUnit = constants.valueunit.g_asNames[self.iUnit];
else:
self.sUnit = '<%d>' % (self.iUnit,);
return self;
class TestResultMsgData(ModelDataBase):
"""
Test result message data.
"""
ksIdAttr = 'idTestResultMsg';
ksParam_idTestResultMsg = 'TestResultValue_idTestResultMsg';
ksParam_idTestResult = 'TestResultValue_idTestResult';
ksParam_tsCreated = 'TestResultValue_tsCreated';
ksParam_idStrMsg = 'TestResultValue_idStrMsg';
ksParam_enmLevel = 'TestResultValue_enmLevel';
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResultMsg = None;
self.idTestResult = None;
self.tsCreated = None;
self.idStrMsg = None;
self.enmLevel = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResultMsgs.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result value record not found.')
self.idTestResultMsg = aoRow[0];
self.idTestResult = aoRow[1];
self.tsCreated = aoRow[2];
self.idStrMsg = aoRow[3];
self.enmLevel = aoRow[4];
return self;
class TestResultMsgDataEx(TestResultMsgData):
"""
Extends TestResultMsg by resolving the message string.
"""
def __init__(self):
TestResultMsgData.__init__(self)
self.sMsg = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a query like this:
SELECT TestResultMsg.*, TestResultStrTab.sValue
FROM TestResultMsg, TestResultStrTab
WHERE TestResultStrTab.idStr = TestResultMsgs.idStrName
Return self. Raises exception if no row.
"""
TestResultMsgData.initFromDbRow(self, aoRow);
self.sMsg = aoRow[5];
return self;
class TestResultFileData(ModelDataBase):
"""
Test result message data.
"""
ksIdAttr = 'idTestResultFile';
ksParam_idTestResultFile = 'TestResultFile_idTestResultFile';
ksParam_idTestResult = 'TestResultFile_idTestResult';
ksParam_tsCreated = 'TestResultFile_tsCreated';
ksParam_idStrFile = 'TestResultFile_idStrFile';
ksParam_idStrDescription = 'TestResultFile_idStrDescription';
ksParam_idStrKind = 'TestResultFile_idStrKind';
ksParam_idStrMime = 'TestResultFile_idStrMime';
def __init__(self):
ModelDataBase.__init__(self)
self.idTestResultFile = None;
self.idTestResult = None;
self.tsCreated = None;
self.idStrFile = None;
self.idStrDescription = None;
self.idStrKind = None;
self.idStrMime = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestResultFiles.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result file record not found.')
self.idTestResultFile = aoRow[0];
self.idTestResult = aoRow[1];
self.tsCreated = aoRow[2];
self.idStrFile = aoRow[3];
self.idStrDescription = aoRow[4];
self.idStrKind = aoRow[5];
self.idStrMime = aoRow[6];
return self;
class TestResultFileDataEx(TestResultFileData):
"""
Extends TestResultFile by resolving the strings.
"""
def __init__(self):
TestResultFileData.__init__(self)
self.sFile = None;
self.sDescription = None;
self.sKind = None;
self.sMime = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a query like this:
SELECT TestResultFiles.*,
StrTabFile.sValue AS sFile,
StrTabDesc.sValue AS sDescription
StrTabKind.sValue AS sKind,
StrTabMime.sValue AS sMime,
FROM ...
Return self. Raises exception if no row.
"""
TestResultFileData.initFromDbRow(self, aoRow);
self.sFile = aoRow[7];
self.sDescription = aoRow[8];
self.sKind = aoRow[9];
self.sMime = aoRow[10];
return self;
def initFakeMainLog(self, oTestSet):
"""
Reinitializes to represent the main.log object (not in DB).
Returns self.
"""
self.idTestResultFile = 0;
self.idTestResult = oTestSet.idTestResult;
self.tsCreated = oTestSet.tsCreated;
self.idStrFile = None;
self.idStrDescription = None;
self.idStrKind = None;
self.idStrMime = None;
self.sFile = 'main.log';
self.sDescription = '';
self.sKind = 'log/main';
self.sMime = 'text/plain';
return self;
def isProbablyUtf8Encoded(self):
"""
Checks if the file is likely to be UTF-8 encoded.
"""
if self.sMime in [ 'text/plain', 'text/html' ]:
return True;
return False;
def getMimeWithEncoding(self):
"""
Gets the MIME type with encoding if likely to be UTF-8.
"""
if self.isProbablyUtf8Encoded():
return '%s; charset=utf-8' % (self.sMime,);
return self.sMime;
class TestResultListingData(ModelDataBase): # pylint: disable=R0902
"""
Test case result data representation for table listing
"""
def __init__(self):
"""Initialize"""
ModelDataBase.__init__(self)
self.idTestSet = None
self.idBuildCategory = None;
self.sProduct = None
self.sRepository = None;
self.sBranch = None
self.sType = None
self.idBuild = None;
self.sVersion = None;
self.iRevision = None
self.sOs = None;
self.sOsVersion = None;
self.sArch = None;
self.sCpuVendor = None;
self.sCpuName = None;
self.cCpus = None;
self.fCpuHwVirt = None;
self.fCpuNestedPaging = None;
self.fCpu64BitGuest = None;
self.idTestBox = None
self.sTestBoxName = None
self.tsCreated = None
self.tsElapsed = None
self.enmStatus = None
self.cErrors = None;
self.idTestCase = None
self.sTestCaseName = None
self.sBaseCmd = None
self.sArgs = None
self.idBuildTestSuite = None;
self.iRevisionTestSuite = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a database query.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test result record not found.')
self.idTestSet = aoRow[0];
self.idBuildCategory = aoRow[1];
self.sProduct = aoRow[2];
self.sRepository = aoRow[3];
self.sBranch = aoRow[4];
self.sType = aoRow[5];
self.idBuild = aoRow[6];
self.sVersion = aoRow[7];
self.iRevision = aoRow[8];
self.sOs = aoRow[9];
self.sOsVersion = aoRow[10];
self.sArch = aoRow[11];
self.sCpuVendor = aoRow[12];
self.sCpuName = aoRow[13];
self.cCpus = aoRow[14];
self.fCpuHwVirt = aoRow[15];
self.fCpuNestedPaging = aoRow[16];
self.fCpu64BitGuest = aoRow[17];
self.idTestBox = aoRow[18];
self.sTestBoxName = aoRow[19];
self.tsCreated = aoRow[20];
self.tsElapsed = aoRow[21];
self.enmStatus = aoRow[22];
self.cErrors = aoRow[23];
self.idTestCase = aoRow[24];
self.sTestCaseName = aoRow[25];
self.sBaseCmd = aoRow[26];
self.sArgs = aoRow[27];
self.idBuildTestSuite = aoRow[28];
self.iRevisionTestSuite = aoRow[29];
return self
class TestResultHangingOffence(TMExceptionBase):
"""Hanging offence committed by test case."""
pass;
class TestResultLogic(ModelLogicBase): # pylint: disable=R0903
"""
Results grouped by scheduling group.
"""
#
# Result grinding for displaying in the WUI.
#
ksResultsGroupingTypeNone = 'ResultsGroupingTypeNone';
ksResultsGroupingTypeTestGroup = 'ResultsGroupingTypeTestGroup';
ksResultsGroupingTypeBuildRev = 'ResultsGroupingTypeBuild';
ksResultsGroupingTypeTestBox = 'ResultsGroupingTypeTestBox';
ksResultsGroupingTypeTestCase = 'ResultsGroupingTypeTestCase';
ksResultsGroupingTypeSchedGroup = 'ResultsGroupingTypeSchedGroup';
#kdResultGroupingMapOld = {
# ksResultsGroupingTypeNone: ('TestSets', None, None),
# ksResultsGroupingTypeTestGroup: ('TestSets', 'TestSets.idTestGroup', None),
# ksResultsGroupingTypeTestBox: ('TestSets', 'TestSets.idTestBox', None),
# ksResultsGroupingTypeTestCase: ('TestSets', 'TestSets.idTestCase', None),
# ksResultsGroupingTypeBuildRev: ('TestSets, Builds', 'Builds.iRevision',
# ' AND Builds.idBuild = TestSets.idBuild'
# ' AND Builds.tsExpire > TestSets.tsCreated'
# ' AND Builds.tsEffective <= TestSets.tsCreated' ),
# ksResultsGroupingTypeSchedGroup: ('TestSets, TestBoxes', 'TestBoxes.idSchedGroup',
# ' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox'),
#};
## @name Result sorting options.
## @{
ksResultsSortByRunningAndStart = 'ResultsSortByRunningAndStart'; ##< Default
ksResultsSortByBuildRevision = 'ResultsSortByBuildRevision';
ksResultsSortByTestBoxName = 'ResultsSortByTestBoxName';
ksResultsSortByTestBoxOs = 'ResultsSortByTestBoxOs';
ksResultsSortByTestBoxOsVersion = 'ResultsSortByTestBoxOsVersion';
ksResultsSortByTestBoxOsArch = 'ResultsSortByTestBoxOsArch';
ksResultsSortByTestBoxArch = 'ResultsSortByTestBoxArch';
ksResultsSortByTestBoxCpuVendor = 'ResultsSortByTestBoxCpuVendor';
ksResultsSortByTestBoxCpuName = 'ResultsSortByTestBoxCpuName';
ksResultsSortByTestBoxCpuRev = 'ResultsSortByTestBoxCpuRev';
ksResultsSortByTestBoxCpuFeatures = 'ResultsSortByTestBoxCpuFeatures';
ksResultsSortByTestCaseName = 'ResultsSortByTestCaseName';
kasResultsSortBy = {
ksResultsSortByRunningAndStart,
ksResultsSortByBuildRevision,
ksResultsSortByTestBoxName,
ksResultsSortByTestBoxOs,
ksResultsSortByTestBoxOsVersion,
ksResultsSortByTestBoxOsArch,
ksResultsSortByTestBoxArch,
ksResultsSortByTestBoxCpuVendor,
ksResultsSortByTestBoxCpuName,
ksResultsSortByTestBoxCpuRev,
ksResultsSortByTestBoxCpuFeatures,
ksResultsSortByTestCaseName,
};
## Used by the WUI for generating the drop down.
kaasResultsSortByTitles = (
( ksResultsSortByRunningAndStart, 'Running & Start TS' ),
( ksResultsSortByBuildRevision, 'Build Revision' ),
( ksResultsSortByTestBoxName, 'TestBox Name' ),
( ksResultsSortByTestBoxOs, 'O/S' ),
( ksResultsSortByTestBoxOsVersion, 'O/S Version' ),
( ksResultsSortByTestBoxOsArch, 'O/S & Architecture' ),
( ksResultsSortByTestBoxArch, 'Architecture' ),
( ksResultsSortByTestBoxCpuVendor, 'CPU Vendor' ),
( ksResultsSortByTestBoxCpuName, 'CPU Vendor & Name' ),
( ksResultsSortByTestBoxCpuRev, 'CPU Vendor & Revision' ),
( ksResultsSortByTestBoxCpuFeatures, 'CPU Features' ),
( ksResultsSortByTestCaseName, 'Test Case Name' ),
);
## @}
## Default sort by map.
kdResultSortByMap = {
ksResultsSortByRunningAndStart: ('', None, None, ''),
ksResultsSortByBuildRevision: (
# Sorting tables.
', Builds',
# Sorting table join(s).
' AND TestSets.idBuild = Builds.idBuild'
' AND Builds.tsExpire >= TestSets.tsCreated'
' AND Builds.tsEffective <= TestSets.tsCreated',
# Start of ORDER BY statement.
' Builds.iRevision DESC',
# Extra columns to fetch for the above ORDER BY to work in a SELECT DISTINCT statement.
'' ),
ksResultsSortByTestBoxName: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sName DESC',
'' ),
ksResultsSortByTestBoxOsArch: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sOs, TestBoxes.sCpuArch',
'' ),
ksResultsSortByTestBoxOs: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sOs',
'' ),
ksResultsSortByTestBoxOsVersion: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sOs, TestBoxes.sOsVersion DESC',
'' ),
ksResultsSortByTestBoxArch: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sCpuArch',
'' ),
ksResultsSortByTestBoxCpuVendor: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sCpuVendor',
'' ),
ksResultsSortByTestBoxCpuName: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sCpuVendor, TestBoxes.sCpuName',
'' ),
ksResultsSortByTestBoxCpuRev: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.sCpuVendor, TestBoxes.lCpuRevision DESC',
', TestBoxes.lCpuRevision' ),
ksResultsSortByTestBoxCpuFeatures: (
', TestBoxes',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
' TestBoxes.fCpuHwVirt DESC, TestBoxes.fCpuNestedPaging DESC, TestBoxes.fCpu64BitGuest DESC, TestBoxes.cCpus DESC',
', TestBoxes.cCpus' ),
ksResultsSortByTestCaseName: (
', TestCases',
' AND TestSets.idGenTestCase = TestCases.idGenTestCase',
' TestCases.sName',
'' ),
};
kdResultGroupingMap = {
ksResultsGroupingTypeNone: (
# Grouping tables; # Grouping field; # Grouping where addition. # Sort by overrides.
'TestSets', None, None, {}
),
ksResultsGroupingTypeTestGroup: ('TestSets', 'TestSets.idTestGroup', None, {}),
ksResultsGroupingTypeTestBox: ('TestSets', 'TestSets.idTestBox', None, {}),
ksResultsGroupingTypeTestCase: ('TestSets', 'TestSets.idTestCase', None, {}),
ksResultsGroupingTypeBuildRev: (
'TestSets, Builds',
'Builds.iRevision',
' AND Builds.idBuild = TestSets.idBuild'
' AND Builds.tsExpire > TestSets.tsCreated'
' AND Builds.tsEffective <= TestSets.tsCreated',
{ ksResultsSortByBuildRevision: ( '', None, ' Builds.iRevision DESC' ), }
),
ksResultsGroupingTypeSchedGroup: (
'TestSets, TestBoxes',
'TestBoxes.idSchedGroup',
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox',
{ ksResultsSortByTestBoxName: ( '', None, ' TestBoxes.sName DESC', '' ),
ksResultsSortByTestBoxOsArch: ( '', None, ' TestBoxes.sOs, TestBoxes.sCpuArch', '' ),
ksResultsSortByTestBoxOs: ( '', None, ' TestBoxes.sOs', '' ),
ksResultsSortByTestBoxOsVersion: ( '', None, ' TestBoxes.sOs, TestBoxes.sOsVersion DESC', '' ),
ksResultsSortByTestBoxArch: ( '', None, ' TestBoxes.sCpuArch', '' ),
ksResultsSortByTestBoxCpuVendor: ( '', None, ' TestBoxes.sCpuVendor', '' ),
ksResultsSortByTestBoxCpuName: ( '', None, ' TestBoxes.sCpuVendor, TestBoxes.sCpuName', '' ),
ksResultsSortByTestBoxCpuRev: (
'', None, ' TestBoxes.sCpuVendor, TestBoxes.lCpuRevision DESC', ', TestBoxes.lCpuRevision' ),
ksResultsSortByTestBoxCpuFeatures: (
' TestBoxes.fCpuHwVirt DESC, TestBoxes.fCpuNestedPaging DESC, TestBoxes.fCpu64BitGuest DESC, '
+ 'TestBoxes.cCpus DESC',
', TestBoxes.cCpus' ), }
),
};
def _getTimePeriodQueryPart(self, tsNow, sInterval):
"""
Get part of SQL query responsible for SELECT data within
specified period of time.
"""
assert sInterval is not None; # too many rows.
cMonthsMourningPeriod = 2; # Stop reminding everyone about testboxes after 2 months. (May also speed up the query.)
if tsNow is None:
sRet = '(TestSets.tsDone IS NULL OR TestSets.tsDone >= (CURRENT_TIMESTAMP - \'%s\'::interval))\n' \
' AND TestSets.tsCreated >= (CURRENT_TIMESTAMP - \'%s\'::interval - \'%u months\'::interval)\n' \
% (sInterval, sInterval, cMonthsMourningPeriod);
else:
sTsNow = '\'%s\'::TIMESTAMP' % (tsNow,); # It's actually a string already. duh.
sRet = 'TestSets.tsCreated <= %s\n' \
' AND TestSets.tsCreated >= (%s - \'%s\'::interval - \'%u months\'::interval)\n' \
' AND (TestSets.tsDone IS NULL OR TestSets.tsDone >= (%s - \'%s\'::interval))\n' \
% ( sTsNow,
sTsNow, sInterval, cMonthsMourningPeriod,
sTsNow, sInterval );
return sRet
def fetchResultsForListing(self, iStart, cMaxRows, tsNow, sInterval, enmResultSortBy,
enmResultsGroupingType, iResultsGroupingValue, fOnlyFailures):
"""
Fetches TestResults table content.
If @param enmResultsGroupingType and @param iResultsGroupingValue
are not None, then resulting (returned) list contains only records
that match specified @param enmResultsGroupingType.
If @param enmResultsGroupingType is None, then
@param iResultsGroupingValue is ignored.
Returns an array (list) of TestResultData items, empty list if none.
Raises exception on error.
"""
#
# Get SQL query parameters
#
if enmResultsGroupingType is None or enmResultsGroupingType not in self.kdResultGroupingMap:
raise TMExceptionBase('Unknown grouping type');
if enmResultSortBy is None or enmResultSortBy not in self.kasResultsSortBy:
raise TMExceptionBase('Unknown sorting');
sGroupingTables, sGroupingField, sGroupingCondition, dSortingOverrides = self.kdResultGroupingMap[enmResultsGroupingType];
if enmResultSortBy in dSortingOverrides:
sSortingTables, sSortingWhere, sSortingOrderBy, sSortingColumns = dSortingOverrides[enmResultSortBy];
else:
sSortingTables, sSortingWhere, sSortingOrderBy, sSortingColumns = self.kdResultSortByMap[enmResultSortBy];
#
# Construct the query.
#
sQuery = 'SELECT DISTINCT TestSets.idTestSet,\n' \
' BuildCategories.idBuildCategory,\n' \
' BuildCategories.sProduct,\n' \
' BuildCategories.sRepository,\n' \
' BuildCategories.sBranch,\n' \
' BuildCategories.sType,\n' \
' Builds.idBuild,\n' \
' Builds.sVersion,\n' \
' Builds.iRevision,\n' \
' TestBoxes.sOs,\n' \
' TestBoxes.sOsVersion,\n' \
' TestBoxes.sCpuArch,\n' \
' TestBoxes.sCpuVendor,\n' \
' TestBoxes.sCpuName,\n' \
' TestBoxes.cCpus,\n' \
' TestBoxes.fCpuHwVirt,\n' \
' TestBoxes.fCpuNestedPaging,\n' \
' TestBoxes.fCpu64BitGuest,\n' \
' TestBoxes.idTestBox,\n' \
' TestBoxes.sName,\n' \
' TestResults.tsCreated,\n' \
' COALESCE(TestResults.tsElapsed, CURRENT_TIMESTAMP - TestResults.tsCreated),\n' \
' TestSets.enmStatus,\n' \
' TestResults.cErrors,\n' \
' TestCases.idTestCase,\n' \
' TestCases.sName,\n' \
' TestCases.sBaseCmd,\n' \
' TestCaseArgs.sArgs,\n' \
' TestSuiteBits.idBuild AS idBuildTestSuite,\n' \
' TestSuiteBits.iRevision AS iRevisionTestSuite,\n' \
' (TestSets.tsDone IS NULL) SortRunningFirst' + sSortingColumns + '\n' \
'FROM BuildCategories,\n' \
' Builds,\n' \
' TestBoxes,\n' \
' TestResults,\n' \
' TestCases,\n' \
' TestCaseArgs,\n' \
' ( SELECT TestSets.idTestSet AS idTestSet,\n' \
' TestSets.tsDone AS tsDone,\n' \
' TestSets.tsCreated AS tsCreated,\n' \
' TestSets.enmStatus AS enmStatus,\n' \
' TestSets.idBuild AS idBuild,\n' \
' TestSets.idBuildTestSuite AS idBuildTestSuite,\n' \
' TestSets.idGenTestBox AS idGenTestBox,\n' \
' TestSets.idGenTestCase AS idGenTestCase,\n' \
' TestSets.idGenTestCaseArgs AS idGenTestCaseArgs\n' \
' FROM ' + sGroupingTables + sSortingTables + '\n' \
' WHERE ' + self._getTimePeriodQueryPart(tsNow, sInterval);
if fOnlyFailures:
sQuery += ' AND TestSets.enmStatus != \'success\'::TestStatus_T' \
' AND TestSets.enmStatus != \'running\'::TestStatus_T';
if sGroupingField is not None:
sQuery += ' AND %s = %d\n' % (sGroupingField, iResultsGroupingValue,);
if sGroupingCondition is not None:
sQuery += sGroupingCondition.replace(' AND ', ' AND ');
if sSortingWhere is not None:
sQuery += sSortingWhere.replace(' AND ', ' AND ');
sQuery += ' ORDER BY ';
if sSortingOrderBy is not None:
sQuery += sSortingOrderBy + ',\n ';
sQuery += '(TestSets.tsDone IS NULL) DESC, TestSets.idTestSet DESC\n' \
' LIMIT %s OFFSET %s\n' % (cMaxRows, iStart,);
sQuery += ' ) AS TestSets\n' \
' LEFT OUTER JOIN Builds AS TestSuiteBits\n' \
' ON TestSets.idBuildTestSuite = TestSuiteBits.idBuild\n' \
'WHERE TestSets.idTestSet = TestResults.idTestSet\n' \
' AND TestResults.idTestResultParent is NULL\n' \
' AND TestSets.idBuild = Builds.idBuild\n' \
' AND Builds.tsExpire > TestSets.tsCreated\n' \
' AND Builds.tsEffective <= TestSets.tsCreated\n' \
' AND Builds.idBuildCategory = BuildCategories.idBuildCategory\n' \
' AND TestSets.idGenTestBox = TestBoxes.idGenTestBox\n' \
' AND TestSets.idGenTestCase = TestCases.idGenTestCase\n' \
' AND TestSets.idGenTestCaseArgs = TestCaseArgs.idGenTestCaseArgs\n' \
'ORDER BY ';
if sSortingOrderBy is not None:
sQuery += sSortingOrderBy + ',\n ';
sQuery += '(TestSets.tsDone IS NULL) DESC, TestSets.idTestSet DESC\n';
#
# Execute the query and return the wrapped results.
#
self._oDb.execute(sQuery);
aoRows = [];
for aoRow in self._oDb.fetchAll():
aoRows.append(TestResultListingData().initFromDbRow(aoRow))
return aoRows
def getEntriesCount(self, tsNow, sInterval, enmResultsGroupingType, iResultsGroupingValue, fOnlyFailures):
"""
Get number of table records.
If @param enmResultsGroupingType and @param iResultsGroupingValue
are not None, then we count only only those records
that match specified @param enmResultsGroupingType.
If @param enmResultsGroupingType is None, then
@param iResultsGroupingValue is ignored.
"""
#
# Get SQL query parameters
#
if enmResultsGroupingType is None:
raise TMExceptionBase('Unknown grouping type')
if enmResultsGroupingType not in self.kdResultGroupingMap:
raise TMExceptionBase('Unknown grouping type')
sGroupingTables, sGroupingField, sGroupingCondition, _ = self.kdResultGroupingMap[enmResultsGroupingType];
#
# Construct the query.
#
sQuery = 'SELECT COUNT(idTestSet)\n' \
'FROM ' + sGroupingTables + '\n' \
'WHERE ' + self._getTimePeriodQueryPart(tsNow, sInterval);
if fOnlyFailures:
sQuery += ' AND TestSets.enmStatus != \'success\'::TestStatus_T' \
' AND TestSets.enmStatus != \'running\'::TestStatus_T';
if sGroupingField is not None:
sQuery += ' AND %s = %d\n' % (sGroupingField, iResultsGroupingValue,);
if sGroupingCondition is not None:
sQuery += sGroupingCondition.replace(' AND ', ' AND ');
#
# Execute the query and return the result.
#
self._oDb.execute(sQuery)
return self._oDb.fetchOne()[0]
def getTestGroups(self, tsNow, sPeriod):
"""
Get list of uniq TestGroupData objects which
found in all test results.
"""
self._oDb.execute('SELECT DISTINCT TestGroups.*\n'
'FROM TestGroups, TestSets\n'
'WHERE TestSets.idTestGroup = TestGroups.idTestGroup\n'
' AND TestGroups.tsExpire > TestSets.tsCreated\n'
' AND TestGroups.tsEffective <= TestSets.tsCreated'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod))
aaoRows = self._oDb.fetchAll()
aoRet = []
for aoRow in aaoRows:
## @todo Need to take time into consideration. Will go belly up if we delete a testgroup.
aoRet.append(TestGroupData().initFromDbRow(aoRow))
return aoRet
def getBuilds(self, tsNow, sPeriod):
"""
Get list of uniq BuildDataEx objects which
found in all test results.
"""
self._oDb.execute('SELECT DISTINCT Builds.*, BuildCategories.*\n'
'FROM Builds, BuildCategories, TestSets\n'
'WHERE TestSets.idBuild = Builds.idBuild\n'
' AND Builds.idBuildCategory = BuildCategories.idBuildCategory\n'
' AND Builds.tsExpire > TestSets.tsCreated\n'
' AND Builds.tsEffective <= TestSets.tsCreated'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod))
aaoRows = self._oDb.fetchAll()
aoRet = []
for aoRow in aaoRows:
aoRet.append(BuildDataEx().initFromDbRow(aoRow))
return aoRet
def getTestBoxes(self, tsNow, sPeriod):
"""
Get list of uniq TestBoxData objects which
found in all test results.
"""
## @todo do all in one query.
self._oDb.execute('SELECT DISTINCT TestBoxes.idTestBox, TestBoxes.idGenTestBox\n'
'FROM TestBoxes, TestSets\n'
'WHERE TestSets.idGenTestBox = TestBoxes.idGenTestBox\n'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod) +
'ORDER BY TestBoxes.idTestBox, TestBoxes.idGenTestBox DESC' );
idPrevTestBox = -1;
asIdGenTestBoxes = [];
for aoRow in self._oDb.fetchAll():
if aoRow[0] != idPrevTestBox:
idPrevTestBox = aoRow[0];
asIdGenTestBoxes.append(str(aoRow[1]));
aoRet = []
if len(asIdGenTestBoxes) > 0:
self._oDb.execute('SELECT *\n'
'FROM TestBoxes\n'
'WHERE idGenTestBox IN (' + ','.join(asIdGenTestBoxes) + ')\n'
'ORDER BY sName');
for aoRow in self._oDb.fetchAll():
aoRet.append(TestBoxData().initFromDbRow(aoRow));
return aoRet
def getTestCases(self, tsNow, sPeriod):
"""
Get a list of unique TestCaseData objects which is appears in the test
specified result period.
"""
self._oDb.execute('SELECT DISTINCT TestCases.idTestCase, TestCases.idGenTestCase, TestSets.tsConfig\n'
'FROM TestCases, TestSets\n'
'WHERE TestSets.idTestCase = TestCases.idTestCase\n'
' AND TestCases.tsExpire > TestSets.tsCreated\n'
' AND TestCases.tsEffective <= TestSets.tsCreated\n'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod) +
'ORDER BY TestCases.idTestCase, TestCases.idGenTestCase DESC\n');
aaoRows = self._oDb.fetchAll()
aoRet = []
idPrevTestCase = -1;
for aoRow in aaoRows:
## @todo reduce subqueries
if aoRow[0] != idPrevTestCase:
idPrevTestCase = aoRow[0];
aoRet.append(TestCaseData().initFromDbWithGenId(self._oDb, aoRow[1], aoRow[2]))
return aoRet
def getSchedGroups(self, tsNow, sPeriod):
"""
Get list of uniq SchedGroupData objects which
found in all test results.
"""
self._oDb.execute('SELECT DISTINCT TestBoxes.idSchedGroup\n'
'FROM TestBoxes, TestSets\n'
'WHERE TestSets.idGenTestBox = TestBoxes.idGenTestBox\n'
' AND TestBoxes.tsExpire > TestSets.tsCreated\n'
' AND TestBoxes.tsEffective <= TestSets.tsCreated'
' AND ' + self._getTimePeriodQueryPart(tsNow, sPeriod))
aiRows = self._oDb.fetchAll()
aoRet = []
for iRow in aiRows:
## @todo reduce subqueries
aoRet.append(SchedGroupData().initFromDbWithId(self._oDb, iRow))
return aoRet
def getById(self, idTestResult):
"""
Get build record by its id
"""
self._oDb.execute('SELECT *\n'
'FROM TestResults\n'
'WHERE idTestResult = %s\n',
(idTestResult,))
aRows = self._oDb.fetchAll()
if len(aRows) not in (0, 1):
raise TMExceptionBase('Found more than one test result with the same credentials. Database structure is corrupted.')
try:
return TestResultData().initFromDbRow(aRows[0])
except IndexError:
return None
#
# Details view and interface.
#
def fetchResultTree(self, idTestSet, cMaxDepth = None):
"""
Fetches the result tree for the given test set.
Returns a tree of TestResultDataEx nodes.
Raises exception on invalid input and database issues.
"""
# Depth first, i.e. just like the XML added them.
## @todo this still isn't performing extremely well, consider optimizations.
sQuery = self._oDb.formatBindArgs(
'SELECT TestResults.*,\n'
' TestResultStrTab.sValue,\n'
' EXISTS ( SELECT idTestResultValue\n'
' FROM TestResultValues\n'
' WHERE TestResultValues.idTestResult = TestResults.idTestResult ) AS fHasValues,\n'
' EXISTS ( SELECT idTestResultMsg\n'
' FROM TestResultMsgs\n'
' WHERE TestResultMsgs.idTestResult = TestResults.idTestResult ) AS fHasMsgs,\n'
' EXISTS ( SELECT idTestResultFile\n'
' FROM TestResultFiles\n'
' WHERE TestResultFiles.idTestResult = TestResults.idTestResult ) AS fHasFiles\n'
'FROM TestResults, TestResultStrTab\n'
'WHERE TestResults.idTestSet = %s\n'
' AND TestResults.idStrName = TestResultStrTab.idStr\n'
, ( idTestSet, ));
if cMaxDepth is not None:
sQuery += self._oDb.formatBindArgs(' AND TestResults.iNestingDepth <= %s\n', (cMaxDepth,));
sQuery += 'ORDER BY idTestResult ASC\n'
self._oDb.execute(sQuery);
cRows = self._oDb.getRowCount();
if cRows > 65536:
raise TMTooManyRows('Too many rows returned for idTestSet=%d: %d' % (idTestSet, cRows,));
aaoRows = self._oDb.fetchAll();
if len(aaoRows) == 0:
raise TMExceptionBase('No test results for idTestSet=%d.' % (idTestSet,));
# Set up the root node first.
aoRow = aaoRows[0];
oRoot = TestResultDataEx().initFromDbRow(aoRow);
if oRoot.idTestResultParent is not None:
raise self._oDb.integrityException('The root TestResult (#%s) has a parent (#%s)!'
% (oRoot.idTestResult, oRoot.idTestResultParent));
self._fetchResultTreeNodeExtras(oRoot, aoRow[-3], aoRow[-2], aoRow[-1]);
# The chilren (if any).
dLookup = { oRoot.idTestResult: oRoot };
oParent = oRoot;
for iRow in range(1, len(aaoRows)):
aoRow = aaoRows[iRow];
oCur = TestResultDataEx().initFromDbRow(aoRow);
self._fetchResultTreeNodeExtras(oCur, aoRow[-3], aoRow[-2], aoRow[-1]);
# Figure out and vet the parent.
if oParent.idTestResult != oCur.idTestResultParent:
oParent = dLookup.get(oCur.idTestResultParent, None);
if oParent is None:
raise self._oDb.integrityException('TestResult #%d is orphaned from its parent #%s.'
% (oCur.idTestResult, oCur.idTestResultParent,));
if oParent.iNestingDepth + 1 != oCur.iNestingDepth:
raise self._oDb.integrityException('TestResult #%d has incorrect nesting depth (%d instead of %d)'
% (oCur.idTestResult, oCur.iNestingDepth, oParent.iNestingDepth + 1,));
# Link it up.
oCur.oParent = oParent;
oParent.aoChildren.append(oCur);
dLookup[oCur.idTestResult] = oCur;
return (oRoot, dLookup);
def _fetchResultTreeNodeExtras(self, oCurNode, fHasValues, fHasMsgs, fHasFiles):
"""
fetchResultTree worker that fetches values, message and files for the
specified node.
"""
assert(oCurNode.aoValues == []);
assert(oCurNode.aoMsgs == []);
assert(oCurNode.aoFiles == []);
if fHasValues:
self._oDb.execute('SELECT TestResultValues.*,\n'
' TestResultStrTab.sValue\n'
'FROM TestResultValues, TestResultStrTab\n'
'WHERE TestResultValues.idTestResult = %s\n'
' AND TestResultValues.idStrName = TestResultStrTab.idStr\n'
'ORDER BY idTestResultValue ASC\n'
, ( oCurNode.idTestResult, ));
for aoRow in self._oDb.fetchAll():
oCurNode.aoValues.append(TestResultValueDataEx().initFromDbRow(aoRow));
if fHasMsgs:
self._oDb.execute('SELECT TestResultMsgs.*,\n'
' TestResultStrTab.sValue\n'
'FROM TestResultMsgs, TestResultStrTab\n'
'WHERE TestResultMsgs.idTestResult = %s\n'
' AND TestResultMsgs.idStrMsg = TestResultStrTab.idStr\n'
'ORDER BY idTestResultMsg ASC\n'
, ( oCurNode.idTestResult, ));
for aoRow in self._oDb.fetchAll():
oCurNode.aoMsgs.append(TestResultMsgDataEx().initFromDbRow(aoRow));
if fHasFiles:
self._oDb.execute('SELECT TestResultFiles.*,\n'
' StrTabFile.sValue AS sFile,\n'
' StrTabDesc.sValue AS sDescription,\n'
' StrTabKind.sValue AS sKind,\n'
' StrTabMime.sValue AS sMime\n'
'FROM TestResultFiles,\n'
' TestResultStrTab AS StrTabFile,\n'
' TestResultStrTab AS StrTabDesc,\n'
' TestResultStrTab AS StrTabKind,\n'
' TestResultStrTab AS StrTabMime\n'
'WHERE TestResultFiles.idTestResult = %s\n'
' AND TestResultFiles.idStrFile = StrTabFile.idStr\n'
' AND TestResultFiles.idStrDescription = StrTabDesc.idStr\n'
' AND TestResultFiles.idStrKind = StrTabKind.idStr\n'
' AND TestResultFiles.idStrMime = StrTabMime.idStr\n'
'ORDER BY idTestResultFile ASC\n'
, ( oCurNode.idTestResult, ));
for aoRow in self._oDb.fetchAll():
oCurNode.aoFiles.append(TestResultFileDataEx().initFromDbRow(aoRow));
return True;
#
# TestBoxController interface(s).
#
def _inhumeTestResults(self, aoStack, idTestSet, sError):
"""
The test produces too much output, kill and bury it.
Note! We leave the test set open, only the test result records are
completed. Thus, _getResultStack will return an empty stack and
cause XML processing to fail immediately, while we can still
record when it actually completed in the test set the normal way.
"""
self._oDb.dprint('** _inhumeTestResults: idTestSet=%d\n%s' % (idTestSet, self._stringifyStack(aoStack),));
#
# First add a message.
#
self._newFailureDetails(aoStack[0].idTestResult, sError, None);
#
# The complete all open test results.
#
for oTestResult in aoStack:
oTestResult.cErrors += 1;
self._completeTestResults(oTestResult, None, TestResultData.ksTestStatus_Failure, oTestResult.cErrors);
# A bit of paranoia.
self._oDb.execute('UPDATE TestResults\n'
'SET cErrors = cErrors + 1,\n'
' enmStatus = \'failure\'::TestStatus_T,\n'
' tsElapsed = CURRENT_TIMESTAMP - tsCreated\n'
'WHERE idTestSet = %s\n'
' AND enmStatus = \'running\'::TestStatus_T\n'
, ( idTestSet, ));
self._oDb.commit();
return None;
def strTabString(self, sString, fCommit = False):
"""
Gets the string table id for the given string, adding it if new.
Note! A copy of this code is also in TestSetLogic.
"""
## @todo move this and make a stored procedure for it.
self._oDb.execute('SELECT idStr\n'
'FROM TestResultStrTab\n'
'WHERE sValue = %s'
, (sString,));
if self._oDb.getRowCount() == 0:
self._oDb.execute('INSERT INTO TestResultStrTab (sValue)\n'
'VALUES (%s)\n'
'RETURNING idStr\n'
, (sString,));
if fCommit:
self._oDb.commit();
return self._oDb.fetchOne()[0];
@staticmethod
def _stringifyStack(aoStack):
"""Returns a string rep of the stack."""
sRet = '';
for i in range(len(aoStack)):
sRet += 'aoStack[%d]=%s\n' % (i, aoStack[i]);
return sRet;
def _getResultStack(self, idTestSet):
"""
Gets the current stack of result sets.
"""
self._oDb.execute('SELECT *\n'
'FROM TestResults\n'
'WHERE idTestSet = %s\n'
' AND enmStatus = \'running\'::TestStatus_T\n'
'ORDER BY idTestResult DESC'
, ( idTestSet, ));
aoStack = [];
for aoRow in self._oDb.fetchAll():
aoStack.append(TestResultData().initFromDbRow(aoRow));
for i in range(len(aoStack)):
assert aoStack[i].iNestingDepth == len(aoStack) - i - 1, self._stringifyStack(aoStack);
return aoStack;
def _newTestResult(self, idTestResultParent, idTestSet, iNestingDepth, tsCreated, sName, dCounts, fCommit = False):
"""
Creates a new test result.
Returns the TestResultData object for the new record.
May raise exception on database error.
"""
assert idTestResultParent is not None;
assert idTestResultParent > 1;
#
# This isn't necessarily very efficient, but it's necessary to prevent
# a wild test or testbox from filling up the database.
#
sCountName = 'cTestResults';
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResult)\n'
'FROM TestResults\n'
'WHERE idTestSet = %s\n'
, ( idTestSet,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestResultsPerTS:
raise TestResultHangingOffence('Too many sub-tests in total!');
sCountName = 'cTestResultsIn%d' % (idTestResultParent,);
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResult)\n'
'FROM TestResults\n'
'WHERE idTestResultParent = %s\n'
, ( idTestResultParent,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestResultsPerTR:
raise TestResultHangingOffence('Too many immediate sub-tests!');
# This is also a hanging offence.
if iNestingDepth > config.g_kcMaxTestResultDepth:
raise TestResultHangingOffence('To deep sub-test nesting!');
# Ditto.
if len(sName) > config.g_kcchMaxTestResultName:
raise TestResultHangingOffence('Test name is too long: %d chars - "%s"' % (len(sName), sName));
#
# Within bounds, do the job.
#
idStrName = self.strTabString(sName, fCommit);
self._oDb.execute('INSERT INTO TestResults (\n'
' idTestResultParent,\n'
' idTestSet,\n'
' tsCreated,\n'
' idStrName,\n'
' iNestingDepth )\n'
'VALUES (%s, %s, TIMESTAMP WITH TIME ZONE %s, %s, %s)\n'
'RETURNING *\n'
, ( idTestResultParent, idTestSet, tsCreated, idStrName, iNestingDepth) )
oData = TestResultData().initFromDbRow(self._oDb.fetchOne());
self._oDb.maybeCommit(fCommit);
return oData;
def _newTestValue(self, idTestResult, idTestSet, sName, lValue, sUnit, dCounts, tsCreated = None, fCommit = False):
"""
Creates a test value.
May raise exception on database error.
"""
#
# Bounds checking.
#
sCountName = 'cTestValues';
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResultValue)\n'
'FROM TestResultValues, TestResults\n'
'WHERE TestResultValues.idTestResult = TestResults.idTestResult\n'
' AND TestResults.idTestSet = %s\n'
, ( idTestSet,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestValuesPerTS:
raise TestResultHangingOffence('Too many values in total!');
sCountName = 'cTestValuesIn%d' % (idTestResult,);
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResultValue)\n'
'FROM TestResultValues\n'
'WHERE idTestResult = %s\n'
, ( idTestResult,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestValuesPerTR:
raise TestResultHangingOffence('Too many immediate values for one test result!');
if len(sName) > config.g_kcchMaxTestValueName:
raise TestResultHangingOffence('Value name is too long: %d chars - "%s"' % (len(sName), sName));
#
# Do the job.
#
iUnit = constants.valueunit.g_kdNameToConst.get(sUnit, constants.valueunit.NONE);
idStrName = self.strTabString(sName, fCommit);
if tsCreated is None:
self._oDb.execute('INSERT INTO TestResultValues (\n'
' idTestResult,\n'
' idTestSet,\n'
' idStrName,\n'
' lValue,\n'
' iUnit)\n'
'VALUES ( %s, %s, %s, %s, %s )\n'
, ( idTestResult, idTestSet, idStrName, lValue, iUnit,) );
else:
self._oDb.execute('INSERT INTO TestResultValues (\n'
' idTestResult,\n'
' idTestSet,\n'
' tsCreated,\n'
' idStrName,\n'
' lValue,\n'
' iUnit)\n'
'VALUES ( %s, %s, TIMESTAMP WITH TIME ZONE %s, %s, %s, %s )\n'
, ( idTestResult, idTestSet, tsCreated, idStrName, lValue, iUnit,) );
self._oDb.maybeCommit(fCommit);
return True;
def _newFailureDetails(self, idTestResult, sText, dCounts, tsCreated = None, fCommit = False):
"""
Creates a record detailing cause of failure.
May raise exception on database error.
"""
#
# Overflow protection.
#
if dCounts is not None:
sCountName = 'cTestMsgsIn%d' % (idTestResult,);
if sCountName not in dCounts:
self._oDb.execute('SELECT COUNT(idTestResultMsg)\n'
'FROM TestResultMsgs\n'
'WHERE idTestResult = %s\n'
, ( idTestResult,));
dCounts[sCountName] = self._oDb.fetchOne()[0];
dCounts[sCountName] += 1;
if dCounts[sCountName] > config.g_kcMaxTestMsgsPerTR:
raise TestResultHangingOffence('Too many messages under for one test result!');
if len(sText) > config.g_kcchMaxTestMsg:
raise TestResultHangingOffence('Failure details message is too long: %d chars - "%s"' % (len(sText), sText));
#
# Do the job.
#
idStrMsg = self.strTabString(sText, fCommit);
if tsCreated is None:
self._oDb.execute('INSERT INTO TestResultMsgs (\n'
' idTestResult,\n'
' idStrMsg,\n'
' enmLevel)\n'
'VALUES ( %s, %s, %s)\n'
, ( idTestResult, idStrMsg, 'failure',) );
else:
self._oDb.execute('INSERT INTO TestResultMsgs (\n'
' idTestResult,\n'
' tsCreated,\n'
' idStrMsg,\n'
' enmLevel)\n'
'VALUES ( %s, TIMESTAMP WITH TIME ZONE %s, %s, %s)\n'
, ( idTestResult, tsCreated, idStrMsg, 'failure',) );
self._oDb.maybeCommit(fCommit);
return True;
def _completeTestResults(self, oTestResult, tsDone, enmStatus, cErrors = 0, fCommit = False):
"""
Completes a test result. Updates the oTestResult object.
May raise exception on database error.
"""
self._oDb.dprint('** _completeTestResults: cErrors=%s tsDone=%s enmStatus=%s oTestResults=\n%s'
% (cErrors, tsDone, enmStatus, oTestResult,));
#
# Sanity check: No open sub tests (aoStack should make sure about this!).
#
self._oDb.execute('SELECT COUNT(idTestResult)\n'
'FROM TestResults\n'
'WHERE idTestResultParent = %s\n'
' AND enmStatus = %s\n'
, ( oTestResult.idTestResult, TestResultData.ksTestStatus_Running,));
cOpenSubTest = self._oDb.fetchOne()[0];
assert cOpenSubTest == 0, 'cOpenSubTest=%d - %s' % (cOpenSubTest, oTestResult,);
assert oTestResult.enmStatus == TestResultData.ksTestStatus_Running;
#
# Make sure the reporter isn't lying about successes or error counts.
#
self._oDb.execute('SELECT COALESCE(SUM(cErrors), 0)\n'
'FROM TestResults\n'
'WHERE idTestResultParent = %s\n'
, ( oTestResult.idTestResult, ));
cMinErrors = self._oDb.fetchOne()[0] + oTestResult.cErrors;
if cErrors < cMinErrors:
cErrors = cMinErrors;
if cErrors > 0 and enmStatus == TestResultData.ksTestStatus_Success:
enmStatus = TestResultData.ksTestStatus_Failure
#
# Do the update.
#
if tsDone is None:
self._oDb.execute('UPDATE TestResults\n'
'SET cErrors = %s,\n'
' enmStatus = %s,\n'
' tsElapsed = CURRENT_TIMESTAMP - tsCreated\n'
'WHERE idTestResult = %s\n'
'RETURNING tsElapsed'
, ( cErrors, enmStatus, oTestResult.idTestResult,) );
else:
self._oDb.execute('UPDATE TestResults\n'
'SET cErrors = %s,\n'
' enmStatus = %s,\n'
' tsElapsed = TIMESTAMP WITH TIME ZONE %s - tsCreated\n'
'WHERE idTestResult = %s\n'
'RETURNING tsElapsed'
, ( cErrors, enmStatus, tsDone, oTestResult.idTestResult,) );
oTestResult.tsElapsed = self._oDb.fetchOne()[0];
oTestResult.enmStatus = enmStatus;
oTestResult.cErrors = cErrors;
self._oDb.maybeCommit(fCommit);
return None;
def _doPopHint(self, aoStack, cStackEntries, dCounts):
""" Executes a PopHint. """
assert cStackEntries >= 0;
while len(aoStack) > cStackEntries:
if aoStack[0].enmStatus == TestResultData.ksTestStatus_Running:
self._newFailureDetails(aoStack[0].idTestResult, 'XML error: Missing </Test>', dCounts);
self._completeTestResults(aoStack[0], tsDone = None, cErrors = 1,
enmStatus = TestResultData.ksTestStatus_Failure, fCommit = True);
aoStack.pop(0);
return True;
@staticmethod
def _validateElement(sName, dAttribs, fClosed):
"""
Validates an element and its attributes.
"""
#
# Validate attributes by name.
#
# Validate integer attributes.
for sAttr in [ 'errors', 'testdepth' ]:
if sAttr in dAttribs:
try:
_ = int(dAttribs[sAttr]);
except:
return 'Element %s has an invalid %s attribute value: %s.' % (sName, sAttr, dAttribs[sAttr],);
# Validate long attributes.
for sAttr in [ 'value', ]:
if sAttr in dAttribs:
try:
_ = long(dAttribs[sAttr]);
except:
return 'Element %s has an invalid %s attribute value: %s.' % (sName, sAttr, dAttribs[sAttr],);
# Validate string attributes.
for sAttr in [ 'name', 'unit', 'text' ]:
if sAttr in dAttribs and len(dAttribs[sAttr]) == 0:
return 'Element %s has an empty %s attribute value.' % (sName, sAttr,);
# Validate the timestamp attribute.
if 'timestamp' in dAttribs:
(dAttribs['timestamp'], sError) = ModelDataBase.validateTs(dAttribs['timestamp'], fAllowNull = False);
if sError is not None:
return 'Element %s has an invalid timestamp ("%s"): %s' % (sName, dAttribs['timestamp'], sError,);
#
# Check that attributes that are required are present.
# We ignore extra attributes.
#
dElementAttribs = \
{
'Test': [ 'timestamp', 'name', ],
'Value': [ 'timestamp', 'name', 'unit', 'value', ],
'FailureDetails': [ 'timestamp', 'text', ],
'Passed': [ 'timestamp', ],
'Skipped': [ 'timestamp', ],
'Failed': [ 'timestamp', 'errors', ],
'TimedOut': [ 'timestamp', 'errors', ],
'End': [ 'timestamp', ],
'PushHint': [ 'testdepth', ],
'PopHint': [ 'testdepth', ],
};
if sName not in dElementAttribs:
return 'Unknown element "%s".' % (sName,);
for sAttr in dElementAttribs[sName]:
if sAttr not in dAttribs:
return 'Element %s requires attribute "%s".' % (sName, sAttr);
#
# Only the Test element can (and must) remain open.
#
if sName == 'Test' and fClosed:
return '<Test/> is not allowed.';
if sName != 'Test' and not fClosed:
return 'All elements except <Test> must be closed.';
return None;
@staticmethod
def _parseElement(sElement):
"""
Parses an element.
"""
#
# Element level bits.
#
sName = sElement.split()[0];
sElement = sElement[len(sName):];
fClosed = sElement[-1] == '/';
if fClosed:
sElement = sElement[:-1];
#
# Attributes.
#
sError = None;
dAttribs = {};
sElement = sElement.strip();
while len(sElement) > 0:
# Extract attribute name.
off = sElement.find('=');
if off < 0 or not sElement[:off].isalnum():
sError = 'Attributes shall have alpha numberical names and have values.';
break;
sAttr = sElement[:off];
# Extract attribute value.
if off + 2 >= len(sElement) or sElement[off + 1] != '"':
sError = 'Attribute (%s) value is missing or not in double quotes.' % (sAttr,);
break;
off += 2;
offEndQuote = sElement.find('"', off);
if offEndQuote < 0:
sError = 'Attribute (%s) value is missing end quotation mark.' % (sAttr,);
break;
sValue = sElement[off:offEndQuote];
# Check for duplicates.
if sAttr in dAttribs:
sError = 'Attribute "%s" appears more than once.' % (sAttr,);
break;
# Unescape the value.
sValue = sValue.replace('<', '<');
sValue = sValue.replace('>', '>');
sValue = sValue.replace(''', '\'');
sValue = sValue.replace('"', '"');
sValue = sValue.replace('
', '\n');
sValue = sValue.replace('
', '\r');
sValue = sValue.replace('&', '&'); # last
# Done.
dAttribs[sAttr] = sValue;
# advance
sElement = sElement[offEndQuote + 1:];
sElement = sElement.lstrip();
#
# Validate the element before we return.
#
if sError is None:
sError = TestResultLogic._validateElement(sName, dAttribs, fClosed);
return (sName, dAttribs, sError)
def _handleElement(self, sName, dAttribs, idTestSet, aoStack, aaiHints, dCounts):
"""
Worker for processXmlStream that handles one element.
Returns None on success, error string on bad XML or similar.
Raises exception on hanging offence and on database error.
"""
if sName == 'Test':
iNestingDepth = aoStack[0].iNestingDepth + 1 if len(aoStack) > 0 else 0;
aoStack.insert(0, self._newTestResult(idTestResultParent = aoStack[0].idTestResult, idTestSet = idTestSet,
tsCreated = dAttribs['timestamp'], sName = dAttribs['name'],
iNestingDepth = iNestingDepth, dCounts = dCounts, fCommit = True) );
elif sName == 'Value':
self._newTestValue(idTestResult = aoStack[0].idTestResult, idTestSet = idTestSet, tsCreated = dAttribs['timestamp'],
sName = dAttribs['name'], sUnit = dAttribs['unit'], lValue = long(dAttribs['value']),
dCounts = dCounts, fCommit = True);
elif sName == 'FailureDetails':
self._newFailureDetails(idTestResult = aoStack[0].idTestResult, tsCreated = dAttribs['timestamp'],
sText = dAttribs['text'], dCounts = dCounts, fCommit = True);
elif sName == 'Passed':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'],
enmStatus = TestResultData.ksTestStatus_Success, fCommit = True);
elif sName == 'Skipped':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'],
enmStatus = TestResultData.ksTestStatus_Skipped, fCommit = True);
elif sName == 'Failed':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'], cErrors = int(dAttribs['errors']),
enmStatus = TestResultData.ksTestStatus_Failure, fCommit = True);
elif sName == 'TimedOut':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'], cErrors = int(dAttribs['errors']),
enmStatus = TestResultData.ksTestStatus_TimedOut, fCommit = True);
elif sName == 'End':
self._completeTestResults(aoStack[0], tsDone = dAttribs['timestamp'],
cErrors = int(dAttribs.get('errors', '1')),
enmStatus = TestResultData.ksTestStatus_Success, fCommit = True);
elif sName == 'PushHint':
if len(aaiHints) > 1:
return 'PushHint cannot be nested.'
aaiHints.insert(0, [len(aoStack), int(dAttribs['testdepth'])]);
elif sName == 'PopHint':
if len(aaiHints) < 1:
return 'No hint to pop.'
iDesiredTestDepth = int(dAttribs['testdepth']);
cStackEntries, iTestDepth = aaiHints.pop(0);
self._doPopHint(aoStack, cStackEntries, dCounts); # Fake the necessary '<End/></Test>' tags.
if iDesiredTestDepth != iTestDepth:
return 'PopHint tag has different testdepth: %d, on stack %d.' % (iDesiredTestDepth, iTestDepth);
else:
return 'Unexpected element "%s".' % (sName,);
return None;
def processXmlStream(self, sXml, idTestSet):
"""
Processes the "XML" stream section given in sXml.
The sXml isn't a complete XML document, even should we save up all sXml
for a given set, they may not form a complete and well formed XML
document since the test may be aborted, abend or simply be buggy. We
therefore do our own parsing and treat the XML tags as commands more
than anything else.
Returns (sError, fUnforgivable), where sError is None on success.
May raise database exception.
"""
aoStack = self._getResultStack(idTestSet); # [0] == top; [-1] == bottom.
if len(aoStack) == 0:
return ('No open results', True);
self._oDb.dprint('** processXmlStream len(aoStack)=%s' % (len(aoStack),));
#self._oDb.dprint('processXmlStream: %s' % (self._stringifyStack(aoStack),));
#self._oDb.dprint('processXmlStream: sXml=%s' % (sXml,));
dCounts = {};
aaiHints = [];
sError = None;
fExpectCloseTest = False;
sXml = sXml.strip();
while len(sXml) > 0:
if sXml.startswith('</Test>'): # Only closing tag.
offNext = len('</Test>');
if len(aoStack) <= 1:
sError = 'Trying to close the top test results.'
break;
# ASSUMES that we've just seen an <End/>, <Passed/>, <Failed/>,
# <TimedOut/> or <Skipped/> tag earlier in this call!
if aoStack[0].enmStatus == TestResultData.ksTestStatus_Running or not fExpectCloseTest:
sError = 'Missing <End/>, <Passed/>, <Failed/>, <TimedOut/> or <Skipped/> tag.';
break;
aoStack.pop(0);
fExpectCloseTest = False;
elif fExpectCloseTest:
sError = 'Expected </Test>.'
break;
elif sXml.startswith('<?xml '): # Ignore (included files).
offNext = sXml.find('?>');
if offNext < 0:
sError = 'Unterminated <?xml ?> element.';
break;
offNext += 2;
elif sXml[0] == '<':
# Parse and check the tag.
if not sXml[1].isalpha():
sError = 'Malformed element.';
break;
offNext = sXml.find('>')
if offNext < 0:
sError = 'Unterminated element.';
break;
(sName, dAttribs, sError) = self._parseElement(sXml[1:offNext]);
offNext += 1;
if sError is not None:
break;
# Handle it.
try:
sError = self._handleElement(sName, dAttribs, idTestSet, aoStack, aaiHints, dCounts);
except TestResultHangingOffence as oXcpt:
self._inhumeTestResults(aoStack, idTestSet, str(oXcpt));
return (str(oXcpt), True);
fExpectCloseTest = sName in [ 'End', 'Passed', 'Failed', 'TimedOut', 'Skipped', ];
else:
sError = 'Unexpected content.';
break;
# Advance.
sXml = sXml[offNext:];
sXml = sXml.lstrip();
#
# Post processing checks.
#
if sError is None and fExpectCloseTest:
sError = 'Expected </Test> before the end of the XML section.'
elif sError is None and len(aaiHints) > 0:
sError = 'Expected </PopHint> before the end of the XML section.'
if len(aaiHints) > 0:
self._doPopHint(aoStack, aaiHints[-1][0], dCounts);
#
# Log the error.
#
if sError is not None:
SystemLogLogic(self._oDb).addEntry(SystemLogData.ksEvent_XmlResultMalformed,
'idTestSet=%s idTestResult=%s XML="%s" %s'
% ( idTestSet,
aoStack[0].idTestResult if len(aoStack) > 0 else -1,
sXml[:30 if len(sXml) >= 30 else len(sXml)],
sError, ),
cHoursRepeat = 6, fCommit = True);
return (sError, False);
#
# Unit testing.
#
# pylint: disable=C0111
class TestResultDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestResultData(),];
class TestResultValueDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestResultValueData(),];
if __name__ == '__main__':
unittest.main();
# not reached.
| {
"content_hash": "ef7b148c61ab830256ef5b7ffd4ee9f7",
"timestamp": "",
"source": "github",
"line_count": 1786,
"max_line_length": 130,
"avg_line_length": 42.66013437849944,
"alnum_prop": 0.5349975718916933,
"repo_name": "egraba/vbox_openbsd",
"id": "9a4a35d95ca3020d07815584f99a67f8eabf9a60",
"size": "76309",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/testresults.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "88714"
},
{
"name": "Assembly",
"bytes": "4303680"
},
{
"name": "AutoIt",
"bytes": "2187"
},
{
"name": "Batchfile",
"bytes": "95534"
},
{
"name": "C",
"bytes": "192632221"
},
{
"name": "C#",
"bytes": "64255"
},
{
"name": "C++",
"bytes": "83842667"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "6041"
},
{
"name": "CSS",
"bytes": "26756"
},
{
"name": "D",
"bytes": "41844"
},
{
"name": "DIGITAL Command Language",
"bytes": "56579"
},
{
"name": "DTrace",
"bytes": "1466646"
},
{
"name": "GAP",
"bytes": "350327"
},
{
"name": "Groff",
"bytes": "298540"
},
{
"name": "HTML",
"bytes": "467691"
},
{
"name": "IDL",
"bytes": "106734"
},
{
"name": "Java",
"bytes": "261605"
},
{
"name": "JavaScript",
"bytes": "80927"
},
{
"name": "Lex",
"bytes": "25122"
},
{
"name": "Logos",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "426902"
},
{
"name": "Module Management System",
"bytes": "2707"
},
{
"name": "NSIS",
"bytes": "177212"
},
{
"name": "Objective-C",
"bytes": "5619792"
},
{
"name": "Objective-C++",
"bytes": "81554"
},
{
"name": "PHP",
"bytes": "58585"
},
{
"name": "Pascal",
"bytes": "69941"
},
{
"name": "Perl",
"bytes": "240063"
},
{
"name": "PowerShell",
"bytes": "10664"
},
{
"name": "Python",
"bytes": "9094160"
},
{
"name": "QMake",
"bytes": "3055"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1460572"
},
{
"name": "SourcePawn",
"bytes": "4139"
},
{
"name": "TypeScript",
"bytes": "142342"
},
{
"name": "Visual Basic",
"bytes": "7161"
},
{
"name": "XSLT",
"bytes": "1034475"
},
{
"name": "Yacc",
"bytes": "22312"
}
],
"symlink_target": ""
} |
from . import __version__
from os.path import join
from requests_toolbelt import user_agent
from urlparse import urljoin
import logging
import logging.handlers
import requests
class Client(object):
LOG_INFO = logging.INFO
LOG_DEBUG = logging.DEBUG
VERBS = ('delete', 'get', 'post', 'put')
def __init__(self, token, host='https://api2.panopta.com', version='2', log_level=LOG_INFO, log_path="."):
self.session = requests.Session()
self.session.auth = PanoptaAuth(token)
self.session.headers.update({'Accept': 'application/json', 'User-Agent': user_agent(__package__, __version__)})
self.base_url = urljoin(host, 'v' + version)
logger = logging.getLogger()
log_handler = logging.handlers.TimedRotatingFileHandler(join(log_path, __package__ + '.log'),
when='d',
interval=1,
backupCount=14)
log_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(log_handler)
logger.setLevel(log_level)
logger.getChild(__package__).info(self.__class__.__name__ + ' initialized')
def url(self, *path_parts):
return '/'.join([self.base_url] + [part.strip('/') for part in path_parts])
def __getattr__(self, name):
if name in self.VERBS:
def wrapper(url, *args, **kwargs):
return getattr(self.session, name)(url, *args, **kwargs)
return wrapper
else:
raise AttributeError(
'{} does not support the "{}" HTTP verb'.format(self.__class__.__name__, name.upper())
)
class PanoptaAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, request):
request.headers.update({'Authorization': 'ApiKey %s' % self.token})
return request
| {
"content_hash": "5938c604b480f8f197c5a59517df98b2",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 119,
"avg_line_length": 38.58490566037736,
"alnum_prop": 0.5574572127139364,
"repo_name": "Panopta/python-panopta-api-client",
"id": "5f661cd3d40fb84136ba801db9dedc7ed93e5586",
"size": "2045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "panopta_api/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4041"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Droppy'
copyright = u'2013, Ian McCracken'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Droppydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Droppy.tex', u'Droppy Documentation',
u'Ian McCracken', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'droppy', u'Droppy Documentation',
[u'Ian McCracken'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Droppy', u'Droppy Documentation',
u'Ian McCracken', 'Droppy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "9e52e3cc214d5957936396443bc7b705",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 80,
"avg_line_length": 32.18723404255319,
"alnum_prop": 0.7020095187731359,
"repo_name": "iancmcc/droppy",
"id": "c4517bd0a49e6c30c61e1b38081061510319b046",
"size": "7981",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55853"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'BlogUser'
db.delete_table('blogs_bloguser')
def backwards(self, orm):
# Adding model 'BlogUser'
db.create_table('blogs_bloguser', (
('is_admin', self.gf('django.db.models.fields.BooleanField')(default=False)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
))
db.send_create_signal('blogs', ['BlogUser'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'analytics_account': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'bloguser': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bloguser'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'draft_notice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exclusion_end': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'exclusion_start': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'facebook_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'fb_page_access_token': ('django.db.models.fields.CharField', [], {'max_length': '260', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'main_color': ('django.db.models.fields.CharField', [], {'default': "'#C4BDB2'", 'max_length': '10', 'blank': 'True'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'pinterest_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Template']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_category'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'cat_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_caret': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_close': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_email': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_fb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_left': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_pint': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_right': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_tw': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_category'", 'null': 'True', 'to': "orm['blogs.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menu': {
'Meta': {'object_name': 'Menu'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_menu'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Menu']", 'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Page']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_page'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_25': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_26': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_27': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_28': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_29': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_30': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_31': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_32': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_33': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'soundcloud_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'soundcloud_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Tag']", 'null': 'True', 'blank': 'True'}),
'temp_tag_field': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_ogg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vimeo_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'vimeo_thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'blogs.rss': {
'Meta': {'object_name': 'Rss'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_rss'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_tag'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.template': {
'Meta': {'object_name': 'Template'},
'archives': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'base': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'category': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'single': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs'] | {
"content_hash": "b6f7e6bbd1f7397507c8ca12354bd665",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 195,
"avg_line_length": 91.16776315789474,
"alnum_prop": 0.5382283961753563,
"repo_name": "carquois/blobon",
"id": "3fb490018b66427183871c25196299e1a8a52b80",
"size": "27739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blobon/blogs/migrations/0107_auto__del_bloguser.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Tests for the replace script and ReplaceRobot class."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import os
from pywikibot import fixes
from scripts import replace
from tests import _data_dir
from tests.aspects import unittest, TestCase
# Load only the custom fixes
fixes.fixes.clear()
fixes._load_file(os.path.join(_data_dir, 'fixes.py'))
class TestReplacementsMain(TestCase):
"""Test various calls of main()."""
SUMMARY_CONFIRMATION = (
'Press Enter to use this automatic message, or enter a '
'description of the\nchanges your bot will make:')
family = 'test'
code = 'test'
def setUp(self):
"""Replace the original bot class with a fake one."""
class FakeReplaceBot(object):
"""A fake bot class for the minimal support."""
changed_pages = -42 # show that weird number to show this was used
def __init__(inner_self, generator, replacements, exceptions={},
always=False, allowoverlap=False, recursive=False,
addedCat=None, sleep=None, summary='', site=None,
**kwargs):
inner_self.replacements = replacements
inner_self.site = site
self.bots.append(inner_self)
def run(inner_self):
"""Nothing to do here."""
pass
super(TestReplacementsMain, self).setUp()
self._original_bot = replace.ReplaceRobot
self._original_input = replace.pywikibot.input
self.bots = []
self.inputs = []
replace.ReplaceRobot = FakeReplaceBot
replace.pywikibot.input = self._fake_input
def tearDown(self):
"""Bring back the old bot class."""
replace.ReplaceRobot = self._original_bot
replace.pywikibot.input = self._original_input
super(TestReplacementsMain, self).tearDown()
def _fake_input(self, message):
"""Cache the message and return static text "TESTRUN"."""
self.inputs.append(message)
return 'TESTRUN'
def _run(self, *args):
"""Run the L{replace.main} with the given args and summary and page."""
# -page to not have an empty generator
# -lang and -family as it will use Site() otherwise
return replace.main(*(args + ('-lang:test', '-family:test',
'-page:TEST')))
def test_invalid_replacements(self):
"""Test invalid command line replacement configurations."""
# old and new need to be together
self.assertFalse(self._run('foo', '-replacementfile:/dev/null', 'bar'))
# only old provided
self.assertFalse(self._run('foo'))
# In the end no bots should've been created
self.assertFalse(self.bots)
def _test_replacement(self, replacement, clazz=replace.Replacement,
offset=0):
"""Test a replacement from the command line."""
self.assertIsInstance(replacement, clazz)
self.assertEqual(replacement.old, str(offset * 2 + 1))
self.assertEqual(replacement.new, str(offset * 2 + 2))
def _test_fix_replacement(self, replacement, length=1, offset=0):
"""Test a replacement from a fix."""
assert length > offset
self._test_replacement(replacement, replace.ReplacementListEntry,
offset)
self.assertIsInstance(replacement.fix_set, replace.ReplacementList)
self.assertIsInstance(replacement.fix_set, list)
self.assertIn(replacement, replacement.fix_set)
self.assertIs(replacement, replacement.fix_set[offset])
self.assertEqual(len(replacement.fix_set), length)
def _get_bot(self, only_confirmation, *args):
"""Run with arguments, assert and return one bot."""
self.assertIsNone(self._run(*args))
self.assertEqual(len(self.bots), 1)
bot = self.bots[0]
if only_confirmation is not None:
self.assertIn(self.SUMMARY_CONFIRMATION, self.inputs)
if only_confirmation is True:
self.assertEqual(len(self.inputs), 1)
else:
self.assertNotIn(self.SUMMARY_CONFIRMATION, self.inputs)
self.assertEqual(bot.site, self.site)
return bot
def test_only_cmd(self):
"""Test command line replacements only."""
bot = self._get_bot(True, '1', '2')
self.assertEqual(len(bot.replacements), 1)
self._test_replacement(bot.replacements[0])
def test_only_fix_global_message(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:has-msg')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_global_message_tw(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:has-msg-tw')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_no_message(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:no-msg')
self.assertEqual(len(bot.replacements), 1)
self._test_fix_replacement(bot.replacements[0])
def test_only_fix_multiple(self):
"""Test fixes replacements only."""
bot = self._get_bot(True, '-fix:has-msg-multiple')
for offset, replacement in enumerate(bot.replacements):
self._test_fix_replacement(replacement, 3, offset)
self.assertEqual(len(bot.replacements), 3)
def test_cmd_and_fix(self):
"""Test command line and fix replacements together."""
bot = self._get_bot(True, '1', '2', '-fix:has-msg')
self.assertEqual(len(bot.replacements), 2)
self._test_replacement(bot.replacements[0])
self._test_fix_replacement(bot.replacements[1])
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| {
"content_hash": "17dcb45a9f3d23556b22bd89ab050c01",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 36.55688622754491,
"alnum_prop": 0.6152334152334152,
"repo_name": "valhallasw/pywikibot-core",
"id": "610b81155199c5f70d1e7b1bbd0fa9b8ea26222f",
"size": "6130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/replacebot_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3763739"
}
],
"symlink_target": ""
} |
"""
This module contains classes for XML Schema elements, complex types and model groups.
"""
from __future__ import unicode_literals
from decimal import Decimal
from elementpath import XPath2Parser, ElementPathSyntaxError, XPathContext
from elementpath.xpath_helpers import boolean_value
from elementpath.datatypes import AbstractDateTime, Duration
from ..exceptions import XMLSchemaAttributeError
from ..qnames import XSD_GROUP, XSD_SEQUENCE, XSD_ALL, XSD_CHOICE, XSD_ATTRIBUTE_GROUP, \
XSD_COMPLEX_TYPE, XSD_SIMPLE_TYPE, XSD_ALTERNATIVE, XSD_ELEMENT, XSD_ANY_TYPE, XSD_UNIQUE, \
XSD_KEY, XSD_KEYREF, XSI_NIL, XSI_TYPE, XSD_ID
from ..helpers import get_qname, get_xml_bool_attribute, get_xsd_derivation_attribute, \
get_xsd_form_attribute, ParticleCounter
from ..etree import etree_element
from ..converters import ElementData, raw_xml_encode, XMLSchemaConverter
from ..xpath import ElementPathMixin
from .exceptions import XMLSchemaValidationError
from .xsdbase import XsdComponent, XsdType, ValidationMixin, ParticleMixin
from .identities import XsdUnique, XsdKey, XsdKeyref
from .wildcards import XsdAnyElement
XSD_MODEL_GROUP_TAGS = {XSD_GROUP, XSD_SEQUENCE, XSD_ALL, XSD_CHOICE}
XSD_ATTRIBUTE_GROUP_ELEMENT = etree_element(XSD_ATTRIBUTE_GROUP)
class XsdElement(XsdComponent, ValidationMixin, ParticleMixin, ElementPathMixin):
"""
Class for XSD 1.0 'element' declarations.
<element
abstract = boolean : false
block = (#all | List of (extension | restriction | substitution))
default = string
final = (#all | List of (extension | restriction))
fixed = string
form = (qualified | unqualified)
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
name = NCName
nillable = boolean : false
ref = QName
substitutionGroup = QName
type = QName
{any attributes with non-schema namespace . . .}>
Content: (annotation?, ((simpleType | complexType)?, (unique | key | keyref)*))
</element>
"""
_admitted_tags = {XSD_ELEMENT}
qualified = False
_ref = None
_abstract = False
_block = None
_final = None
_substitution_group = None
def __init__(self, elem, schema, parent, name=None):
super(XsdElement, self).__init__(elem, schema, parent, name)
self.names = (self.qualified_name,) if self.qualified else (self.qualified_name, self.local_name)
if not hasattr(self, 'type'):
raise XMLSchemaAttributeError("undefined 'type' attribute for %r." % self)
if not hasattr(self, 'qualified'):
raise XMLSchemaAttributeError("undefined 'qualified' attribute for %r." % self)
def __repr__(self):
if self.ref is None:
return '%s(name=%r, occurs=%r)' % (self.__class__.__name__, self.prefixed_name, self.occurs)
else:
return '%s(ref=%r, occurs=%r)' % (self.__class__.__name__, self.prefixed_name, self.occurs)
def __setattr__(self, name, value):
if name == "type":
assert value is None or isinstance(value, XsdType), "Wrong value %r for attribute 'type'." % value
if hasattr(value, 'attributes'):
self.attributes = value.attributes
else:
self.attributes = self.schema.BUILDERS.attribute_group_class(
XSD_ATTRIBUTE_GROUP_ELEMENT, self.schema, self
)
super(XsdElement, self).__setattr__(name, value)
def __iter__(self):
if not self.type.has_simple_content():
for e in self.type.content_type.iter_subelements():
yield e
def _parse(self):
XsdComponent._parse(self)
self._parse_attributes()
index = self._parse_type()
self._parse_identity_constraints(index)
if self.parent is None:
self._parse_substitution_group()
def _parse_attributes(self):
elem = self.elem
attrib = elem.attrib
self._parse_particle(elem)
try:
self.qualified = (self.form or self.schema.element_form_default) == 'qualified'
except ValueError as err:
self.parse_error(err)
name = elem.get('name')
if name is not None:
if self.parent is None or self.qualified:
self.name = get_qname(self.target_namespace, attrib['name'])
else:
self.name = attrib['name']
elif self.parent is None:
self.parse_error("missing 'name' in a global element declaration")
self.name = elem.get('ref', 'nameless_%s' % str(id(self)))
elif 'ref' not in attrib:
self.parse_error("missing both 'name' and 'ref' attributes")
self.name = elem.get('nameless_%s' % str(id(self)))
else:
try:
element_name = self.schema.resolve_qname(attrib['ref'])
except ValueError as err:
self.parse_error(err)
self.type = self.maps.types[XSD_ANY_TYPE]
self.name = elem.get('nameless_%s' % str(id(self)))
else:
if not element_name:
self.parse_error("empty 'ref' attribute")
self.type = self.maps.types[XSD_ANY_TYPE]
self.name = elem.get('nameless_%s' % str(id(self)))
else:
try:
xsd_element = self.maps.lookup_element(element_name)
except KeyError:
self.parse_error('unknown element %r' % element_name)
self.name = element_name
self.type = self.maps.types[XSD_ANY_TYPE]
else:
self._ref = xsd_element
self.name = xsd_element.name
self.type = xsd_element.type
self.qualified = xsd_element.qualified
for attr_name in ('name', 'type', 'nillable', 'default', 'fixed', 'form',
'block', 'abstract', 'final', 'substitutionGroup'):
if attr_name in attrib:
self.parse_error("attribute %r is not allowed when element reference is used." % attr_name)
return
if 'default' in attrib and 'fixed' in attrib:
self.parse_error("'default' and 'fixed' attributes are mutually exclusive.")
if 'abstract' in elem.attrib:
try:
self._abstract = get_xml_bool_attribute(elem, 'abstract')
except ValueError as err:
self.parse_error(err, elem)
else:
if self.parent is not None:
self.parse_error("local scope elements cannot have abstract attribute")
if 'block' in elem.attrib:
try:
self._block = get_xsd_derivation_attribute(
elem, 'block', ('extension', 'restriction', 'substitution')
)
except ValueError as err:
self.parse_error(err, elem)
if self.parent is None:
self._parse_properties('nillable')
if 'final' in elem.attrib:
try:
self._final = get_xsd_derivation_attribute(elem, 'final', ('extension', 'restriction'))
except ValueError as err:
self.parse_error(err, elem)
for attr_name in ('ref', 'form', 'minOccurs', 'maxOccurs'):
if attr_name in attrib:
self.parse_error("attribute %r not allowed in a global element declaration" % attr_name)
else:
self._parse_properties('form', 'nillable')
for attr_name in ('final', 'substitutionGroup'):
if attr_name in attrib:
self.parse_error("attribute %r not allowed in a local element declaration" % attr_name)
def _parse_type(self):
attrib = self.elem.attrib
if self.ref:
if self._parse_component(self.elem, required=False, strict=False) is not None:
self.parse_error("element reference declaration can't has children.")
elif 'type' in attrib:
try:
self.type = self.maps.lookup_type(self.schema.resolve_qname(attrib['type']))
except KeyError:
self.parse_error('unknown type %r' % attrib['type'])
self.type = self.maps.types[XSD_ANY_TYPE]
except ValueError as err:
self.parse_error(err)
self.type = self.maps.types[XSD_ANY_TYPE]
finally:
child = self._parse_component(self.elem, required=False, strict=False)
if child is not None and child.tag in (XSD_COMPLEX_TYPE, XSD_SIMPLE_TYPE):
msg = "the attribute 'type' and the <%s> local declaration are mutually exclusive"
self.parse_error(msg % child.tag.split('}')[-1])
else:
child = self._parse_component(self.elem, required=False, strict=False)
if child is not None:
if child.tag == XSD_COMPLEX_TYPE:
self.type = self.schema.BUILDERS.complex_type_class(child, self.schema, self)
elif child.tag == XSD_SIMPLE_TYPE:
self.type = self.schema.BUILDERS.simple_type_factory(child, self.schema, self)
else:
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
return 0
# Check value constraints
if 'default' in attrib and not self.type.is_valid(attrib['default']):
msg = "'default' value %r is not compatible with the type of the element"
self.parse_error(msg % attrib['default'])
elif 'fixed' in attrib and not self.type.is_valid(attrib['fixed']):
msg = "'fixed' value %r is not compatible with the type of the element"
self.parse_error(msg % attrib['fixed'])
return 1
else:
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
return 0
# Check value constraints
if 'default' in attrib:
if not self.type.is_valid(attrib['default']):
msg = "'default' value {!r} is not compatible with the type {!r}"
self.parse_error(msg.format(attrib['default'], self.type))
elif self.schema.XSD_VERSION == '1.0' and (
self.type.name == XSD_ID or self.type.is_derived(self.schema.meta_schema.types['ID'])):
self.parse_error("'xs:ID' or a type derived from 'xs:ID' cannot has a 'default'")
elif 'fixed' in attrib:
if not self.type.is_valid(attrib['fixed']):
msg = "'fixed' value {!r} is not compatible with the type {!r}"
self.parse_error(msg.format(attrib['fixed'], self.type))
elif self.schema.XSD_VERSION == '1.0' and (
self.type.name == XSD_ID or self.type.is_derived(self.schema.meta_schema.types['ID'])):
self.parse_error("'xs:ID' or a type derived from 'xs:ID' cannot has a 'default'")
return 0
def _parse_identity_constraints(self, index=0):
self.constraints = {}
for child in self._iterparse_components(self.elem, start=index):
if child.tag == XSD_UNIQUE:
constraint = XsdUnique(child, self.schema, self)
elif child.tag == XSD_KEY:
constraint = XsdKey(child, self.schema, self)
elif child.tag == XSD_KEYREF:
constraint = XsdKeyref(child, self.schema, self)
else:
continue # Error already caught by validation against the meta-schema
try:
if child != self.maps.constraints[constraint.name]:
self.parse_error("duplicated identity constraint %r:" % constraint.name, child)
except KeyError:
self.maps.constraints[constraint.name] = constraint
finally:
self.constraints[constraint.name] = constraint
def _parse_substitution_group(self):
substitution_group = self.elem.get('substitutionGroup')
if substitution_group is None:
return
try:
substitution_group_qname = self.schema.resolve_qname(substitution_group)
except ValueError as err:
self.parse_error(err)
return
else:
if substitution_group_qname[0] != '{':
substitution_group_qname = get_qname(self.target_namespace, substitution_group_qname)
try:
head_element = self.maps.lookup_element(substitution_group_qname)
except KeyError:
self.parse_error("unknown substitutionGroup %r" % substitution_group)
return
else:
if isinstance(head_element, tuple):
self.parse_error("circularity found for substitutionGroup %r" % substitution_group)
return
elif 'substitution' in head_element.block:
return
final = head_element.final
if self.type == head_element.type or self.type.name == XSD_ANY_TYPE:
pass
elif not self.type.is_derived(head_element.type):
msg = "%r type is not of the same or a derivation of the head element %r type."
self.parse_error(msg % (self, head_element))
elif final == '#all' or 'extension' in final and 'restriction' in final:
msg = "head element %r can't be substituted by an element that has a derivation of its type"
self.parse_error(msg % head_element)
elif 'extension' in final and self.type.is_derived(head_element.type, 'extension'):
msg = "head element %r can't be substituted by an element that has an extension of its type"
self.parse_error(msg % head_element)
elif 'restriction' in final and self.type.is_derived(head_element.type, 'restriction'):
msg = "head element %r can't be substituted by an element that has a restriction of its type"
self.parse_error(msg % head_element)
if self.type.name == XSD_ANY_TYPE and 'type' not in self.elem.attrib:
self.type = self.maps.elements[substitution_group_qname].type
try:
self.maps.substitution_groups[substitution_group_qname].add(self)
except KeyError:
self.maps.substitution_groups[substitution_group_qname] = {self}
finally:
self._substitution_group = substitution_group_qname
@property
def built(self):
return self.type.parent is None or self.type.built
@property
def validation_attempted(self):
if self.built:
return 'full'
else:
return self.type.validation_attempted
# XSD declaration attributes
@property
def ref(self):
return self.elem.get('ref')
# Global element's exclusive properties
@property
def abstract(self):
return self._abstract if self._ref is None else self._ref.abstract
@property
def final(self):
return self._final or self.schema.final_default if self._ref is None else self._ref.final
@property
def block(self):
return self._block or self.schema.block_default if self._ref is None else self._ref.block
@property
def substitution_group(self):
return self._substitution_group if self._ref is None else self._ref.substitution_group
@property
def default(self):
return self.elem.get('default') if self._ref is None else self._ref.default
@property
def fixed(self):
return self.elem.get('fixed') if self._ref is None else self._ref.fixed
@property
def form(self):
return get_xsd_form_attribute(self.elem, 'form') if self._ref is None else self._ref.form
@property
def nillable(self):
if self._ref is not None:
return self._ref.nillable
return get_xml_bool_attribute(self.elem, 'nillable', default=False)
def get_attribute(self, name):
if name[0] != '{':
return self.type.attributes[get_qname(self.type.target_namespace, name)]
return self.type.attributes[name]
def get_type(self, elem):
return self.type
def get_path(self, ancestor=None, reverse=False):
"""
Returns the XPath expression of the element. The path is relative to the schema instance
in which the element is contained or is relative to a specific ancestor passed as argument.
In the latter case returns `None` if the argument is not an ancestor.
:param ancestor: optional XSD component of the same schema, that may be an ancestor of the element.
:param reverse: if set to `True` returns the reverse path, from the element to ancestor.
"""
path = []
xsd_component = self
while xsd_component is not None:
if xsd_component is ancestor:
return '/'.join(reversed(path)) or '.'
elif hasattr(xsd_component, 'tag'):
path.append('..' if reverse else xsd_component.name)
xsd_component = xsd_component.parent
else:
if ancestor is None:
return '/'.join(reversed(path)) or '.'
def iter_components(self, xsd_classes=None):
if xsd_classes is None:
yield self
for obj in self.constraints.values():
yield obj
else:
if isinstance(self, xsd_classes):
yield self
for obj in self.constraints.values():
if isinstance(obj, xsd_classes):
yield obj
if self.ref is None and self.type.parent is not None:
for obj in self.type.iter_components(xsd_classes):
yield obj
def iter_substitutes(self):
for xsd_element in self.maps.substitution_groups.get(self.name, ()):
yield xsd_element
for e in xsd_element.iter_substitutes():
yield e
def iter_decode(self, elem, validation='lax', **kwargs):
"""
Creates an iterator for decoding an Element instance.
:param elem: the Element that has to be decoded.
:param validation: the validation mode, can be 'lax', 'strict' or 'skip.
:param kwargs: keyword arguments for the decoding process.
:return: yields a decoded object, eventually preceded by a sequence of \
validation or decoding errors.
"""
converter = kwargs.get('converter')
if not isinstance(converter, XMLSchemaConverter):
converter = kwargs['converter'] = self.schema.get_converter(converter, **kwargs)
level = kwargs.pop('level', 0)
use_defaults = kwargs.get('use_defaults', False)
value = content = attributes = None
# Get the instance type: xsi:type or the schema's declaration
if XSI_TYPE not in elem.attrib:
xsd_type = self.get_type(elem)
else:
xsi_type = elem.attrib[XSI_TYPE]
try:
xsd_type = self.maps.lookup_type(converter.unmap_qname(xsi_type))
except KeyError:
yield self.validation_error(validation, "unknown type %r" % xsi_type, elem, **kwargs)
xsd_type = self.get_type(elem)
# Decode attributes
attribute_group = getattr(xsd_type, 'attributes', self.attributes)
for result in attribute_group.iter_decode(elem.attrib, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
attributes = result
# Checks the xsi:nil attribute of the instance
if validation != 'skip' and XSI_NIL in elem.attrib:
if not self.nillable:
yield self.validation_error(validation, "element is not nillable.", elem, **kwargs)
try:
if get_xml_bool_attribute(elem, XSI_NIL):
if elem.text is not None:
reason = "xsi:nil='true' but the element is not empty."
yield self.validation_error(validation, reason, elem, **kwargs)
else:
element_data = ElementData(elem.tag, None, None, attributes)
yield converter.element_decode(element_data, self, level)
return
except TypeError:
reason = "xsi:nil attribute must has a boolean value."
yield self.validation_error(validation, reason, elem, **kwargs)
if not xsd_type.has_simple_content():
for result in xsd_type.content_type.iter_decode(elem, validation, level=level + 1, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
content = result
else:
if len(elem) and validation != 'skip':
reason = "a simple content element can't has child elements."
yield self.validation_error(validation, reason, elem, **kwargs)
text = elem.text
if self.fixed is not None:
if text is None:
text = self.fixed
elif text != self.fixed:
reason = "must has the fixed value %r." % self.fixed
yield self.validation_error(validation, reason, elem, **kwargs)
elif not text and use_defaults and self.default is not None:
text = self.default
if not xsd_type.is_simple():
xsd_type = xsd_type.content_type
if text is None:
for result in xsd_type.iter_decode('', validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
if kwargs.get('filler') is not None:
value = kwargs.get('filler')(self)
else:
for result in xsd_type.iter_decode(text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
elif result is None and kwargs.get('filler') is not None:
value = kwargs.get('filler')(self)
else:
value = result
if isinstance(value, Decimal):
try:
value = kwargs['decimal_type'](value)
except (KeyError, TypeError):
pass
elif isinstance(value, (AbstractDateTime, Duration)):
try:
if kwargs['datetime_types'] is not True:
value = elem.text
except KeyError:
value = elem.text
element_data = ElementData(elem.tag, value, content, attributes)
yield converter.element_decode(element_data, self, level)
if content is not None:
del content
if validation != 'skip':
for constraint in self.constraints.values():
if isinstance(constraint, XsdKeyref) and '_no_deep' in kwargs: # TODO: Complete lazy validation
continue
for error in constraint(elem):
yield self.validation_error(validation, error, elem, **kwargs)
def iter_encode(self, obj, validation='lax', **kwargs):
"""
Creates an iterator for encoding data to an Element.
:param obj: the data that has to be encoded.
:param validation: the validation mode: can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the encoding process.
:return: yields an Element, eventually preceded by a sequence of \
validation or encoding errors.
"""
converter = kwargs.get('converter')
if not isinstance(converter, XMLSchemaConverter):
converter = self.schema.get_converter(converter, **kwargs)
level = kwargs.pop('level', 0)
element_data = converter.element_encode(obj, self, level)
errors = []
tag = element_data.tag
text = None
children = element_data.content
attributes = ()
if element_data.attributes and XSI_TYPE in element_data.attributes:
xsi_type = element_data.attributes[XSI_TYPE]
try:
xsd_type = self.maps.lookup_type(converter.unmap_qname(xsi_type))
except KeyError:
errors.append("unknown type %r" % xsi_type)
xsd_type = self.get_type(element_data)
else:
xsd_type = self.get_type(element_data)
attribute_group = getattr(xsd_type, 'attributes', self.attributes)
for result in attribute_group.iter_encode(element_data.attributes, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
attributes = result
if validation != 'skip' and XSI_NIL in element_data.attributes:
if not self.nillable:
errors.append("element is not nillable.")
xsi_nil = element_data.attributes[XSI_NIL]
if xsi_nil.strip() not in ('0', '1', 'true', 'false'):
errors.append("xsi:nil attribute must has a boolean value.")
if element_data.text is not None:
errors.append("xsi:nil='true' but the element is not empty.")
else:
elem = converter.etree_element(element_data.tag, attrib=attributes, level=level)
for e in errors:
yield self.validation_error(validation, e, elem, **kwargs)
yield elem
return
if xsd_type.is_simple():
if element_data.content:
errors.append("a simpleType element can't has child elements.")
if element_data.text is None:
pass
else:
for result in xsd_type.iter_encode(element_data.text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
text = result
elif xsd_type.has_simple_content():
if element_data.text is not None:
for result in xsd_type.content_type.iter_encode(element_data.text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
text = result
else:
for result in xsd_type.content_type.iter_encode(
element_data, validation, level=level + 1, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
elif result:
text, children = result
elem = converter.etree_element(tag, text, children, attributes, level)
if validation != 'skip' and errors:
for e in errors:
yield self.validation_error(validation, e, elem, **kwargs)
yield elem
del element_data
def is_restriction(self, other, check_occurs=True):
if isinstance(other, XsdAnyElement):
if self.min_occurs == self.max_occurs == 0:
return True
if check_occurs and not self.has_occurs_restriction(other):
return False
return other.is_matching(self.name, self.default_namespace)
elif isinstance(other, XsdElement):
if self.name != other.name:
substitution_group = self.substitution_group
if other.name == self.substitution_group and other.min_occurs != other.max_occurs \
and self.max_occurs != 0 and not other.abstract:
# Base is the head element, it's not abstract and has non deterministic occurs: this
# is less restrictive than W3C test group (elemZ026), marked as invalid despite it's
# based on an abstract declaration.
return False
elif self.substitution_group is None:
return False
elif not any(e.name == self.name for e in self.maps.substitution_groups[substitution_group]):
return False
if check_occurs and not self.has_occurs_restriction(other):
return False
elif self.type is not other.type and self.type.elem is not other.type.elem and \
not self.type.is_derived(other.type, 'restriction') and not other.type.abstract:
return False
elif self.fixed != other.fixed and self.type.normalize(self.fixed) != other.type.normalize(other.fixed):
return False
elif other.nillable is False and self.nillable:
return False
elif any(value not in self.block for value in other.block.split()):
return False
elif not all(k in other.constraints for k in self.constraints):
return False
else:
return True
elif other.model == 'choice':
if other.is_empty() and self.max_occurs != 0:
return False
check_group_items_occurs = self.schema.XSD_VERSION == '1.0'
counter = ParticleCounter()
for e in other.iter_model():
if not isinstance(e, (XsdElement, XsdAnyElement)):
return False
elif not self.is_restriction(e, check_group_items_occurs):
continue
counter += e
counter *= other
if self.has_occurs_restriction(counter):
return True
counter.reset()
return False
else:
match_restriction = False
for e in other.iter_model():
if match_restriction:
if not e.is_emptiable():
return False
elif self.is_restriction(e):
match_restriction = True
elif not e.is_emptiable():
return False
return True
def overlap(self, other):
if isinstance(other, XsdElement):
if self.name == other.name:
return True
elif other.substitution_group == self.name or other.name == self.substitution_group:
return True
elif isinstance(other, XsdAnyElement):
if other.is_matching(self.name, self.default_namespace):
return True
for e in self.maps.substitution_groups.get(self.name, ()):
if other.is_matching(e.name, self.default_namespace):
return True
return False
class Xsd11Element(XsdElement):
"""
Class for XSD 1.1 'element' declarations.
<element
abstract = boolean : false
block = (#all | List of (extension | restriction | substitution))
default = string
final = (#all | List of (extension | restriction))
fixed = string
form = (qualified | unqualified)
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
name = NCName
nillable = boolean : false
ref = QName
substitutionGroup = List of QName
targetNamespace = anyURI
type = QName
{any attributes with non-schema namespace . . .}>
Content: (annotation?, ((simpleType | complexType)?, alternative*, (unique | key | keyref)*))
</element>
"""
def _parse(self):
XsdComponent._parse(self)
self._parse_attributes()
index = self._parse_type()
index = self._parse_alternatives(index)
self._parse_identity_constraints(index)
if self.parent is None:
self._parse_substitution_group()
self._parse_target_namespace()
def _parse_alternatives(self, index=0):
if self._ref is not None:
self.alternatives = self._ref.alternatives
else:
self.alternatives = []
for child in self._iterparse_components(self.elem, start=index):
if child.tag == XSD_ALTERNATIVE:
self.alternatives.append(XsdAlternative(child, self.schema, self))
index += 1
else:
break
return index
@property
def target_namespace(self):
try:
return self.elem.attrib['targetNamespace']
except KeyError:
return self.schema.target_namespace
def get_type(self, elem):
if not self.alternatives:
return self.type
if isinstance(elem, ElementData):
if elem.attributes:
attrib = {k: raw_xml_encode(v) for k, v in elem.attributes.items()}
elem = etree_element(elem.tag, attrib=attrib)
else:
elem = etree_element(elem.tag)
for alt in self.alternatives:
if alt.type is not None and boolean_value(list(alt.token.select(context=XPathContext(root=elem)))):
return alt.type
return self.type
def overlap(self, other):
if isinstance(other, XsdElement):
if self.name == other.name:
return True
elif other.substitution_group == self.name or other.name == self.substitution_group:
return True
return False
class XsdAlternative(XsdComponent):
"""
<alternative
id = ID
test = an XPath expression
type = QName
xpathDefaultNamespace = (anyURI | (##defaultNamespace | ##targetNamespace | ##local))
{any attributes with non-schema namespace . . .}>
Content: (annotation?, (simpleType | complexType)?)
</alternative>
"""
_admitted_tags = {XSD_ALTERNATIVE}
type = None
def __repr__(self):
return '%s(type=%r, test=%r)' % (self.__class__.__name__, self.elem.get('type'), self.elem.get('test'))
def _parse(self):
XsdComponent._parse(self)
attrib = self.elem.attrib
try:
self.path = attrib['test']
except KeyError as err:
self.path = 'true()'
self.parse_error(err)
if 'xpathDefaultNamespace' in attrib:
self.xpath_default_namespace = self._parse_xpath_default_namespace(self.elem)
else:
self.xpath_default_namespace = self.schema.xpath_default_namespace
parser = XPath2Parser(self.namespaces, strict=False, default_namespace=self.xpath_default_namespace)
try:
self.token = parser.parse(self.path)
except ElementPathSyntaxError as err:
self.parse_error(err)
self.token = parser.parse('true()')
self.path = 'true()'
try:
type_qname = self.schema.resolve_qname(attrib['type'])
except KeyError:
self.parse_error("missing 'type' attribute")
except ValueError as err:
self.parse_error(err)
else:
try:
self.type = self.maps.lookup_type(type_qname)
except KeyError:
self.parse_error("unknown type %r" % attrib['type'])
else:
if not self.type.is_derived(self.parent.type):
self.parse_error("type %r ir not derived from %r" % (attrib['type'], self.parent.type))
@property
def built(self):
raise NotImplementedError
| {
"content_hash": "e1f54ffc4f316232929b9ef3615fe321",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 116,
"avg_line_length": 42.588928150765604,
"alnum_prop": 0.573925548979479,
"repo_name": "brunato/xmlschema",
"id": "ad38660fc4774a619f80e5cf9ba15ed230cf42a0",
"size": "36514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmlschema/validators/elements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "814028"
}
],
"symlink_target": ""
} |
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from gias3.mapclientpluginutilities.viewers.mayaviscenewidget import MayaviSceneWidget
class Ui_Dialog(object):
def setupUi(self, Dialog):
if not Dialog.objectName():
Dialog.setObjectName(u"Dialog")
Dialog.resize(1177, 726)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
self.horizontalLayout_2 = QHBoxLayout(Dialog)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.widgetMain = QWidget(Dialog)
self.widgetMain.setObjectName(u"widgetMain")
self.widgetMain.setEnabled(True)
sizePolicy.setHeightForWidth(self.widgetMain.sizePolicy().hasHeightForWidth())
self.widgetMain.setSizePolicy(sizePolicy)
self.widgetMain.setMaximumSize(QSize(16777215, 16777215))
self.gridLayout = QGridLayout(self.widgetMain)
self.gridLayout.setObjectName(u"gridLayout")
self.MayaviScene = MayaviSceneWidget(self.widgetMain)
self.MayaviScene.setObjectName(u"MayaviScene")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
sizePolicy1.setHorizontalStretch(1)
sizePolicy1.setVerticalStretch(1)
sizePolicy1.setHeightForWidth(self.MayaviScene.sizePolicy().hasHeightForWidth())
self.MayaviScene.setSizePolicy(sizePolicy1)
self.gridLayout.addWidget(self.MayaviScene, 0, 1, 1, 1)
self.widget = QWidget(self.widgetMain)
self.widget.setObjectName(u"widget")
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QSize(450, 0))
self.widget.setMaximumSize(QSize(600, 16777215))
self.verticalLayout_3 = QVBoxLayout(self.widget)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.tableWidget = QTableWidget(self.widget)
if (self.tableWidget.columnCount() < 1):
self.tableWidget.setColumnCount(1)
__qtablewidgetitem = QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, __qtablewidgetitem)
self.tableWidget.setObjectName(u"tableWidget")
sizePolicy2 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
self.tableWidget.setSizePolicy(sizePolicy2)
self.tableWidget.setMinimumSize(QSize(0, 0))
self.tableWidget.setMaximumSize(QSize(16777215, 150))
self.tableWidget.setSortingEnabled(False)
self.tableWidget.horizontalHeader().setVisible(True)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setMinimumSectionSize(300)
self.tableWidget.horizontalHeader().setDefaultSectionSize(300)
self.verticalLayout_3.addWidget(self.tableWidget)
self.toolBox = QToolBox(self.widget)
self.toolBox.setObjectName(u"toolBox")
self.toolBox.setMinimumSize(QSize(0, 0))
self.page_2 = QWidget()
self.page_2.setObjectName(u"page_2")
self.page_2.setGeometry(QRect(0, 0, 428, 379))
self.formLayout_3 = QFormLayout(self.page_2)
self.formLayout_3.setObjectName(u"formLayout_3")
self.formLayout_3.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
self.tableWidgetLandmarks = QTableWidget(self.page_2)
if (self.tableWidgetLandmarks.columnCount() < 2):
self.tableWidgetLandmarks.setColumnCount(2)
__qtablewidgetitem1 = QTableWidgetItem()
self.tableWidgetLandmarks.setHorizontalHeaderItem(0, __qtablewidgetitem1)
__qtablewidgetitem2 = QTableWidgetItem()
self.tableWidgetLandmarks.setHorizontalHeaderItem(1, __qtablewidgetitem2)
self.tableWidgetLandmarks.setObjectName(u"tableWidgetLandmarks")
self.tableWidgetLandmarks.setMinimumSize(QSize(0, 200))
self.tableWidgetLandmarks.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidgetLandmarks.horizontalHeader().setMinimumSectionSize(100)
self.tableWidgetLandmarks.horizontalHeader().setDefaultSectionSize(150)
self.formLayout_3.setWidget(0, QFormLayout.SpanningRole, self.tableWidgetLandmarks)
self.label_23 = QLabel(self.page_2)
self.label_23.setObjectName(u"label_23")
self.formLayout_3.setWidget(7, QFormLayout.LabelRole, self.label_23)
self.doubleSpinBox_markerRadius = QDoubleSpinBox(self.page_2)
self.doubleSpinBox_markerRadius.setObjectName(u"doubleSpinBox_markerRadius")
self.formLayout_3.setWidget(7, QFormLayout.FieldRole, self.doubleSpinBox_markerRadius)
self.label_24 = QLabel(self.page_2)
self.label_24.setObjectName(u"label_24")
self.formLayout_3.setWidget(8, QFormLayout.LabelRole, self.label_24)
self.doubleSpinBox_skinPad = QDoubleSpinBox(self.page_2)
self.doubleSpinBox_skinPad.setObjectName(u"doubleSpinBox_skinPad")
self.formLayout_3.setWidget(8, QFormLayout.FieldRole, self.doubleSpinBox_skinPad)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.pushButton_addLandmark = QPushButton(self.page_2)
self.pushButton_addLandmark.setObjectName(u"pushButton_addLandmark")
self.horizontalLayout.addWidget(self.pushButton_addLandmark)
self.pushButton_removeLandmark = QPushButton(self.page_2)
self.pushButton_removeLandmark.setObjectName(u"pushButton_removeLandmark")
self.horizontalLayout.addWidget(self.pushButton_removeLandmark)
self.formLayout_3.setLayout(1, QFormLayout.FieldRole, self.horizontalLayout)
self.toolBox.addItem(self.page_2, u"Landmarks")
self.page = QWidget()
self.page.setObjectName(u"page")
self.page.setGeometry(QRect(0, 0, 428, 379))
self.verticalLayout_5 = QVBoxLayout(self.page)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.formLayout_5 = QFormLayout()
self.formLayout_5.setObjectName(u"formLayout_5")
self.formLayout_5.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_5.setHorizontalSpacing(2)
self.label_14 = QLabel(self.page)
self.label_14.setObjectName(u"label_14")
self.formLayout_5.setWidget(1, QFormLayout.LabelRole, self.label_14)
self.label_18 = QLabel(self.page)
self.label_18.setObjectName(u"label_18")
self.formLayout_5.setWidget(2, QFormLayout.LabelRole, self.label_18)
self.label_19 = QLabel(self.page)
self.label_19.setObjectName(u"label_19")
self.formLayout_5.setWidget(3, QFormLayout.LabelRole, self.label_19)
self.horizontalLayout_11 = QHBoxLayout()
self.horizontalLayout_11.setObjectName(u"horizontalLayout_11")
self.doubleSpinBox_ptx = QDoubleSpinBox(self.page)
self.doubleSpinBox_ptx.setObjectName(u"doubleSpinBox_ptx")
self.doubleSpinBox_ptx.setMinimum(-10000.000000000000000)
self.doubleSpinBox_ptx.setMaximum(10000.000000000000000)
self.horizontalLayout_11.addWidget(self.doubleSpinBox_ptx)
self.doubleSpinBox_pty = QDoubleSpinBox(self.page)
self.doubleSpinBox_pty.setObjectName(u"doubleSpinBox_pty")
self.doubleSpinBox_pty.setMinimum(-10000.000000000000000)
self.doubleSpinBox_pty.setMaximum(10000.000000000000000)
self.horizontalLayout_11.addWidget(self.doubleSpinBox_pty)
self.doubleSpinBox_ptz = QDoubleSpinBox(self.page)
self.doubleSpinBox_ptz.setObjectName(u"doubleSpinBox_ptz")
self.doubleSpinBox_ptz.setMinimum(-10000.000000000000000)
self.doubleSpinBox_ptz.setMaximum(10000.000000000000000)
self.horizontalLayout_11.addWidget(self.doubleSpinBox_ptz)
self.formLayout_5.setLayout(3, QFormLayout.FieldRole, self.horizontalLayout_11)
self.label_20 = QLabel(self.page)
self.label_20.setObjectName(u"label_20")
self.formLayout_5.setWidget(4, QFormLayout.LabelRole, self.label_20)
self.horizontalLayout_12 = QHBoxLayout()
self.horizontalLayout_12.setObjectName(u"horizontalLayout_12")
self.doubleSpinBox_prx = QDoubleSpinBox(self.page)
self.doubleSpinBox_prx.setObjectName(u"doubleSpinBox_prx")
self.doubleSpinBox_prx.setMinimum(-360.000000000000000)
self.doubleSpinBox_prx.setMaximum(360.000000000000000)
self.horizontalLayout_12.addWidget(self.doubleSpinBox_prx)
self.doubleSpinBox_pry = QDoubleSpinBox(self.page)
self.doubleSpinBox_pry.setObjectName(u"doubleSpinBox_pry")
self.doubleSpinBox_pry.setMinimum(-360.000000000000000)
self.doubleSpinBox_pry.setMaximum(360.000000000000000)
self.horizontalLayout_12.addWidget(self.doubleSpinBox_pry)
self.doubleSpinBox_prz = QDoubleSpinBox(self.page)
self.doubleSpinBox_prz.setObjectName(u"doubleSpinBox_prz")
self.doubleSpinBox_prz.setMinimum(-360.000000000000000)
self.doubleSpinBox_prz.setMaximum(360.000000000000000)
self.horizontalLayout_12.addWidget(self.doubleSpinBox_prz)
self.formLayout_5.setLayout(4, QFormLayout.FieldRole, self.horizontalLayout_12)
self.label_21 = QLabel(self.page)
self.label_21.setObjectName(u"label_21")
self.formLayout_5.setWidget(5, QFormLayout.LabelRole, self.label_21)
self.horizontalLayout_13 = QHBoxLayout()
self.horizontalLayout_13.setObjectName(u"horizontalLayout_13")
self.doubleSpinBox_hiplx = QDoubleSpinBox(self.page)
self.doubleSpinBox_hiplx.setObjectName(u"doubleSpinBox_hiplx")
self.doubleSpinBox_hiplx.setMinimum(-360.000000000000000)
self.doubleSpinBox_hiplx.setMaximum(360.000000000000000)
self.horizontalLayout_13.addWidget(self.doubleSpinBox_hiplx)
self.doubleSpinBox_hiply = QDoubleSpinBox(self.page)
self.doubleSpinBox_hiply.setObjectName(u"doubleSpinBox_hiply")
self.doubleSpinBox_hiply.setMinimum(-360.000000000000000)
self.doubleSpinBox_hiply.setMaximum(360.000000000000000)
self.horizontalLayout_13.addWidget(self.doubleSpinBox_hiply)
self.doubleSpinBox_hiplz = QDoubleSpinBox(self.page)
self.doubleSpinBox_hiplz.setObjectName(u"doubleSpinBox_hiplz")
self.doubleSpinBox_hiplz.setMinimum(-360.000000000000000)
self.doubleSpinBox_hiplz.setMaximum(360.000000000000000)
self.horizontalLayout_13.addWidget(self.doubleSpinBox_hiplz)
self.formLayout_5.setLayout(5, QFormLayout.FieldRole, self.horizontalLayout_13)
self.label_22 = QLabel(self.page)
self.label_22.setObjectName(u"label_22")
self.formLayout_5.setWidget(7, QFormLayout.LabelRole, self.label_22)
self.horizontalLayout_14 = QHBoxLayout()
self.horizontalLayout_14.setObjectName(u"horizontalLayout_14")
self.doubleSpinBox_kneelx = QDoubleSpinBox(self.page)
self.doubleSpinBox_kneelx.setObjectName(u"doubleSpinBox_kneelx")
self.doubleSpinBox_kneelx.setMinimum(-180.000000000000000)
self.doubleSpinBox_kneelx.setMaximum(180.000000000000000)
self.horizontalLayout_14.addWidget(self.doubleSpinBox_kneelx)
self.doubleSpinBox_kneely = QDoubleSpinBox(self.page)
self.doubleSpinBox_kneely.setObjectName(u"doubleSpinBox_kneely")
self.doubleSpinBox_kneely.setMinimum(-180.000000000000000)
self.doubleSpinBox_kneely.setMaximum(180.000000000000000)
self.horizontalLayout_14.addWidget(self.doubleSpinBox_kneely)
self.doubleSpinBox_kneelz = QDoubleSpinBox(self.page)
self.doubleSpinBox_kneelz.setObjectName(u"doubleSpinBox_kneelz")
self.doubleSpinBox_kneelz.setMinimum(-180.000000000000000)
self.doubleSpinBox_kneelz.setMaximum(180.000000000000000)
self.horizontalLayout_14.addWidget(self.doubleSpinBox_kneelz)
self.formLayout_5.setLayout(7, QFormLayout.FieldRole, self.horizontalLayout_14)
self.doubleSpinBox_scaling = QDoubleSpinBox(self.page)
self.doubleSpinBox_scaling.setObjectName(u"doubleSpinBox_scaling")
self.doubleSpinBox_scaling.setEnabled(False)
self.doubleSpinBox_scaling.setMinimum(-5.000000000000000)
self.doubleSpinBox_scaling.setMaximum(5.000000000000000)
self.doubleSpinBox_scaling.setSingleStep(0.100000000000000)
self.formLayout_5.setWidget(2, QFormLayout.FieldRole, self.doubleSpinBox_scaling)
self.gridLayout_3 = QGridLayout()
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.formLayout_5.setLayout(1, QFormLayout.FieldRole, self.gridLayout_3)
self.label_5 = QLabel(self.page)
self.label_5.setObjectName(u"label_5")
self.formLayout_5.setWidget(6, QFormLayout.LabelRole, self.label_5)
self.label_7 = QLabel(self.page)
self.label_7.setObjectName(u"label_7")
self.formLayout_5.setWidget(8, QFormLayout.LabelRole, self.label_7)
self.horizontalLayout_3 = QHBoxLayout()
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.doubleSpinBox_hiprx = QDoubleSpinBox(self.page)
self.doubleSpinBox_hiprx.setObjectName(u"doubleSpinBox_hiprx")
self.doubleSpinBox_hiprx.setMinimum(-360.000000000000000)
self.doubleSpinBox_hiprx.setMaximum(360.000000000000000)
self.horizontalLayout_3.addWidget(self.doubleSpinBox_hiprx)
self.doubleSpinBox_hipry = QDoubleSpinBox(self.page)
self.doubleSpinBox_hipry.setObjectName(u"doubleSpinBox_hipry")
self.doubleSpinBox_hipry.setMinimum(-360.000000000000000)
self.doubleSpinBox_hipry.setMaximum(360.000000000000000)
self.horizontalLayout_3.addWidget(self.doubleSpinBox_hipry)
self.doubleSpinBox_hiprz = QDoubleSpinBox(self.page)
self.doubleSpinBox_hiprz.setObjectName(u"doubleSpinBox_hiprz")
self.doubleSpinBox_hiprz.setMinimum(-360.000000000000000)
self.doubleSpinBox_hiprz.setMaximum(360.000000000000000)
self.horizontalLayout_3.addWidget(self.doubleSpinBox_hiprz)
self.formLayout_5.setLayout(6, QFormLayout.FieldRole, self.horizontalLayout_3)
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName(u"horizontalLayout_4")
self.doubleSpinBox_kneerx = QDoubleSpinBox(self.page)
self.doubleSpinBox_kneerx.setObjectName(u"doubleSpinBox_kneerx")
self.doubleSpinBox_kneerx.setMinimum(-180.000000000000000)
self.doubleSpinBox_kneerx.setMaximum(180.000000000000000)
self.horizontalLayout_4.addWidget(self.doubleSpinBox_kneerx)
self.doubleSpinBox_kneery = QDoubleSpinBox(self.page)
self.doubleSpinBox_kneery.setObjectName(u"doubleSpinBox_kneery")
self.doubleSpinBox_kneery.setMinimum(-180.000000000000000)
self.doubleSpinBox_kneery.setMaximum(180.000000000000000)
self.horizontalLayout_4.addWidget(self.doubleSpinBox_kneery)
self.doubleSpinBox_kneerz = QDoubleSpinBox(self.page)
self.doubleSpinBox_kneerz.setObjectName(u"doubleSpinBox_kneerz")
self.doubleSpinBox_kneerz.setMinimum(-180.000000000000000)
self.doubleSpinBox_kneerz.setMaximum(180.000000000000000)
self.horizontalLayout_4.addWidget(self.doubleSpinBox_kneerz)
self.formLayout_5.setLayout(8, QFormLayout.FieldRole, self.horizontalLayout_4)
self.verticalLayout_5.addLayout(self.formLayout_5)
self.horizontalLayout_15 = QHBoxLayout()
self.horizontalLayout_15.setObjectName(u"horizontalLayout_15")
self.pushButton_manual_reset = QPushButton(self.page)
self.pushButton_manual_reset.setObjectName(u"pushButton_manual_reset")
self.horizontalLayout_15.addWidget(self.pushButton_manual_reset)
self.pushButton_manual_accept = QPushButton(self.page)
self.pushButton_manual_accept.setObjectName(u"pushButton_manual_accept")
self.horizontalLayout_15.addWidget(self.pushButton_manual_accept)
self.verticalLayout_5.addLayout(self.horizontalLayout_15)
self.toolBox.addItem(self.page, u"Manual Registration")
self.page_reg = QWidget()
self.page_reg.setObjectName(u"page_reg")
self.page_reg.setGeometry(QRect(0, 0, 280, 328))
self.verticalLayout_2 = QVBoxLayout(self.page_reg)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.formLayout_4 = QFormLayout()
self.formLayout_4.setObjectName(u"formLayout_4")
self.label_13 = QLabel(self.page_reg)
self.label_13.setObjectName(u"label_13")
self.formLayout_4.setWidget(0, QFormLayout.LabelRole, self.label_13)
self.label_11 = QLabel(self.page_reg)
self.label_11.setObjectName(u"label_11")
self.formLayout_4.setWidget(1, QFormLayout.LabelRole, self.label_11)
self.spinBox_pcsToFit = QSpinBox(self.page_reg)
self.spinBox_pcsToFit.setObjectName(u"spinBox_pcsToFit")
self.spinBox_pcsToFit.setMinimum(1)
self.spinBox_pcsToFit.setValue(4)
self.formLayout_4.setWidget(1, QFormLayout.FieldRole, self.spinBox_pcsToFit)
self.label_12 = QLabel(self.page_reg)
self.label_12.setObjectName(u"label_12")
self.formLayout_4.setWidget(2, QFormLayout.LabelRole, self.label_12)
self.spinBox_mWeight = QDoubleSpinBox(self.page_reg)
self.spinBox_mWeight.setObjectName(u"spinBox_mWeight")
self.spinBox_mWeight.setSingleStep(0.100000000000000)
self.formLayout_4.setWidget(2, QFormLayout.FieldRole, self.spinBox_mWeight)
self.label_6 = QLabel(self.page_reg)
self.label_6.setObjectName(u"label_6")
self.formLayout_4.setWidget(3, QFormLayout.LabelRole, self.label_6)
self.checkBox_kneecorr = QCheckBox(self.page_reg)
self.checkBox_kneecorr.setObjectName(u"checkBox_kneecorr")
self.formLayout_4.setWidget(3, QFormLayout.FieldRole, self.checkBox_kneecorr)
self.label_10 = QLabel(self.page_reg)
self.label_10.setObjectName(u"label_10")
self.formLayout_4.setWidget(4, QFormLayout.LabelRole, self.label_10)
self.checkBox_kneedof = QCheckBox(self.page_reg)
self.checkBox_kneedof.setObjectName(u"checkBox_kneedof")
self.formLayout_4.setWidget(4, QFormLayout.FieldRole, self.checkBox_kneedof)
self.comboBox_regmode = QComboBox(self.page_reg)
self.comboBox_regmode.setObjectName(u"comboBox_regmode")
self.formLayout_4.setWidget(0, QFormLayout.FieldRole, self.comboBox_regmode)
self.verticalLayout_2.addLayout(self.formLayout_4)
self.gridLayout_2 = QGridLayout()
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.pushButton_auto_reg = QPushButton(self.page_reg)
self.pushButton_auto_reg.setObjectName(u"pushButton_auto_reg")
self.gridLayout_2.addWidget(self.pushButton_auto_reg, 0, 0, 1, 1)
self.pushButton_auto_abort = QPushButton(self.page_reg)
self.pushButton_auto_abort.setObjectName(u"pushButton_auto_abort")
self.gridLayout_2.addWidget(self.pushButton_auto_abort, 1, 0, 1, 1)
self.pushButton_auto_reset = QPushButton(self.page_reg)
self.pushButton_auto_reset.setObjectName(u"pushButton_auto_reset")
self.gridLayout_2.addWidget(self.pushButton_auto_reset, 0, 1, 1, 1)
self.pushButton_auto_accept = QPushButton(self.page_reg)
self.pushButton_auto_accept.setObjectName(u"pushButton_auto_accept")
self.gridLayout_2.addWidget(self.pushButton_auto_accept, 1, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_2)
self.errorGroup = QGroupBox(self.page_reg)
self.errorGroup.setObjectName(u"errorGroup")
self.formLayout_2 = QFormLayout(self.errorGroup)
self.formLayout_2.setObjectName(u"formLayout_2")
self.formLayout_2.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
self.landmarkErrorLabel = QLabel(self.errorGroup)
self.landmarkErrorLabel.setObjectName(u"landmarkErrorLabel")
self.formLayout_2.setWidget(0, QFormLayout.LabelRole, self.landmarkErrorLabel)
self.lineEdit_landmarkError = QLineEdit(self.errorGroup)
self.lineEdit_landmarkError.setObjectName(u"lineEdit_landmarkError")
self.lineEdit_landmarkError.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.lineEdit_landmarkError.setReadOnly(True)
self.formLayout_2.setWidget(0, QFormLayout.FieldRole, self.lineEdit_landmarkError)
self.mDistLabel = QLabel(self.errorGroup)
self.mDistLabel.setObjectName(u"mDistLabel")
self.formLayout_2.setWidget(1, QFormLayout.LabelRole, self.mDistLabel)
self.lineEdit_mDist = QLineEdit(self.errorGroup)
self.lineEdit_mDist.setObjectName(u"lineEdit_mDist")
self.lineEdit_mDist.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.lineEdit_mDist.setReadOnly(True)
self.formLayout_2.setWidget(1, QFormLayout.FieldRole, self.lineEdit_mDist)
self.verticalLayout_2.addWidget(self.errorGroup)
self.toolBox.addItem(self.page_reg, u"Auto Registration")
self.Screenshot = QWidget()
self.Screenshot.setObjectName(u"Screenshot")
self.Screenshot.setGeometry(QRect(0, 0, 191, 143))
self.formLayout = QFormLayout(self.Screenshot)
self.formLayout.setObjectName(u"formLayout")
self.pixelsXLabel = QLabel(self.Screenshot)
self.pixelsXLabel.setObjectName(u"pixelsXLabel")
sizePolicy3 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Preferred)
sizePolicy3.setHorizontalStretch(0)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(self.pixelsXLabel.sizePolicy().hasHeightForWidth())
self.pixelsXLabel.setSizePolicy(sizePolicy3)
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.pixelsXLabel)
self.screenshotPixelXLineEdit = QLineEdit(self.Screenshot)
self.screenshotPixelXLineEdit.setObjectName(u"screenshotPixelXLineEdit")
sizePolicy4 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy4.setHorizontalStretch(0)
sizePolicy4.setVerticalStretch(0)
sizePolicy4.setHeightForWidth(self.screenshotPixelXLineEdit.sizePolicy().hasHeightForWidth())
self.screenshotPixelXLineEdit.setSizePolicy(sizePolicy4)
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.screenshotPixelXLineEdit)
self.pixelsYLabel = QLabel(self.Screenshot)
self.pixelsYLabel.setObjectName(u"pixelsYLabel")
sizePolicy3.setHeightForWidth(self.pixelsYLabel.sizePolicy().hasHeightForWidth())
self.pixelsYLabel.setSizePolicy(sizePolicy3)
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.pixelsYLabel)
self.screenshotPixelYLineEdit = QLineEdit(self.Screenshot)
self.screenshotPixelYLineEdit.setObjectName(u"screenshotPixelYLineEdit")
sizePolicy4.setHeightForWidth(self.screenshotPixelYLineEdit.sizePolicy().hasHeightForWidth())
self.screenshotPixelYLineEdit.setSizePolicy(sizePolicy4)
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.screenshotPixelYLineEdit)
self.screenshotFilenameLabel = QLabel(self.Screenshot)
self.screenshotFilenameLabel.setObjectName(u"screenshotFilenameLabel")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.screenshotFilenameLabel)
self.screenshotFilenameLineEdit = QLineEdit(self.Screenshot)
self.screenshotFilenameLineEdit.setObjectName(u"screenshotFilenameLineEdit")
sizePolicy4.setHeightForWidth(self.screenshotFilenameLineEdit.sizePolicy().hasHeightForWidth())
self.screenshotFilenameLineEdit.setSizePolicy(sizePolicy4)
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.screenshotFilenameLineEdit)
self.screenshotSaveButton = QPushButton(self.Screenshot)
self.screenshotSaveButton.setObjectName(u"screenshotSaveButton")
sizePolicy5 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy5.setHorizontalStretch(0)
sizePolicy5.setVerticalStretch(0)
sizePolicy5.setHeightForWidth(self.screenshotSaveButton.sizePolicy().hasHeightForWidth())
self.screenshotSaveButton.setSizePolicy(sizePolicy5)
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.screenshotSaveButton)
self.toolBox.addItem(self.Screenshot, u"Screenshots")
self.verticalLayout_3.addWidget(self.toolBox)
self.gridLayout.addWidget(self.widget, 0, 0, 1, 1)
self.horizontalLayout_2.addWidget(self.widgetMain)
QWidget.setTabOrder(self.tableWidget, self.tableWidgetLandmarks)
QWidget.setTabOrder(self.tableWidgetLandmarks, self.pushButton_addLandmark)
QWidget.setTabOrder(self.pushButton_addLandmark, self.pushButton_removeLandmark)
QWidget.setTabOrder(self.pushButton_removeLandmark, self.doubleSpinBox_markerRadius)
QWidget.setTabOrder(self.doubleSpinBox_markerRadius, self.doubleSpinBox_skinPad)
QWidget.setTabOrder(self.doubleSpinBox_skinPad, self.doubleSpinBox_scaling)
QWidget.setTabOrder(self.doubleSpinBox_scaling, self.doubleSpinBox_ptx)
QWidget.setTabOrder(self.doubleSpinBox_ptx, self.doubleSpinBox_pty)
QWidget.setTabOrder(self.doubleSpinBox_pty, self.doubleSpinBox_ptz)
QWidget.setTabOrder(self.doubleSpinBox_ptz, self.doubleSpinBox_prx)
QWidget.setTabOrder(self.doubleSpinBox_prx, self.doubleSpinBox_pry)
QWidget.setTabOrder(self.doubleSpinBox_pry, self.doubleSpinBox_prz)
QWidget.setTabOrder(self.doubleSpinBox_prz, self.doubleSpinBox_hiplx)
QWidget.setTabOrder(self.doubleSpinBox_hiplx, self.doubleSpinBox_hiply)
QWidget.setTabOrder(self.doubleSpinBox_hiply, self.doubleSpinBox_hiplz)
QWidget.setTabOrder(self.doubleSpinBox_hiplz, self.doubleSpinBox_hiprx)
QWidget.setTabOrder(self.doubleSpinBox_hiprx, self.doubleSpinBox_hipry)
QWidget.setTabOrder(self.doubleSpinBox_hipry, self.doubleSpinBox_hiprz)
QWidget.setTabOrder(self.doubleSpinBox_hiprz, self.doubleSpinBox_kneelx)
QWidget.setTabOrder(self.doubleSpinBox_kneelx, self.doubleSpinBox_kneely)
QWidget.setTabOrder(self.doubleSpinBox_kneely, self.doubleSpinBox_kneelz)
QWidget.setTabOrder(self.doubleSpinBox_kneelz, self.doubleSpinBox_kneerx)
QWidget.setTabOrder(self.doubleSpinBox_kneerx, self.doubleSpinBox_kneery)
QWidget.setTabOrder(self.doubleSpinBox_kneery, self.doubleSpinBox_kneerz)
QWidget.setTabOrder(self.doubleSpinBox_kneerz, self.pushButton_manual_reset)
QWidget.setTabOrder(self.pushButton_manual_reset, self.pushButton_manual_accept)
QWidget.setTabOrder(self.pushButton_manual_accept, self.comboBox_regmode)
QWidget.setTabOrder(self.comboBox_regmode, self.spinBox_pcsToFit)
QWidget.setTabOrder(self.spinBox_pcsToFit, self.spinBox_mWeight)
QWidget.setTabOrder(self.spinBox_mWeight, self.checkBox_kneecorr)
QWidget.setTabOrder(self.checkBox_kneecorr, self.checkBox_kneedof)
QWidget.setTabOrder(self.checkBox_kneedof, self.pushButton_auto_reg)
QWidget.setTabOrder(self.pushButton_auto_reg, self.pushButton_auto_reset)
QWidget.setTabOrder(self.pushButton_auto_reset, self.pushButton_auto_abort)
QWidget.setTabOrder(self.pushButton_auto_abort, self.pushButton_auto_accept)
QWidget.setTabOrder(self.pushButton_auto_accept, self.lineEdit_landmarkError)
QWidget.setTabOrder(self.lineEdit_landmarkError, self.lineEdit_mDist)
QWidget.setTabOrder(self.lineEdit_mDist, self.screenshotPixelXLineEdit)
QWidget.setTabOrder(self.screenshotPixelXLineEdit, self.screenshotPixelYLineEdit)
QWidget.setTabOrder(self.screenshotPixelYLineEdit, self.screenshotFilenameLineEdit)
QWidget.setTabOrder(self.screenshotFilenameLineEdit, self.screenshotSaveButton)
self.retranslateUi(Dialog)
self.toolBox.setCurrentIndex(1)
QMetaObject.connectSlotsByName(Dialog)
# setupUi
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QCoreApplication.translate("Dialog", u"2-Sided Lower Limb Registration", None))
___qtablewidgetitem = self.tableWidget.horizontalHeaderItem(0)
___qtablewidgetitem.setText(QCoreApplication.translate("Dialog", u"Visible", None));
___qtablewidgetitem1 = self.tableWidgetLandmarks.horizontalHeaderItem(0)
___qtablewidgetitem1.setText(QCoreApplication.translate("Dialog", u"Model Landmarks", None));
___qtablewidgetitem2 = self.tableWidgetLandmarks.horizontalHeaderItem(1)
___qtablewidgetitem2.setText(QCoreApplication.translate("Dialog", u"MoCap Landmarks", None));
self.label_23.setText(QCoreApplication.translate("Dialog", u"Marker Radius", None))
self.label_24.setText(QCoreApplication.translate("Dialog", u"Skin Padding", None))
self.pushButton_addLandmark.setText(QCoreApplication.translate("Dialog", u"Add Landmark", None))
self.pushButton_removeLandmark.setText(QCoreApplication.translate("Dialog", u"Remove Landmarks", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), QCoreApplication.translate("Dialog", u"Landmarks", None))
self.label_14.setText(QCoreApplication.translate("Dialog", u"Shape Modes:", None))
self.label_18.setText(QCoreApplication.translate("Dialog", u"Scaling:", None))
self.label_19.setText(QCoreApplication.translate("Dialog", u"Pelvis Trans.", None))
self.label_20.setText(QCoreApplication.translate("Dialog", u"Pelvis Rot.", None))
self.label_21.setText(QCoreApplication.translate("Dialog", u"Hip Rot. Left", None))
self.label_22.setText(QCoreApplication.translate("Dialog", u"Knee Rot. Left", None))
self.label_5.setText(QCoreApplication.translate("Dialog", u"Hip Rot. Right", None))
self.label_7.setText(QCoreApplication.translate("Dialog", u"Knee Rot. Right", None))
self.pushButton_manual_reset.setText(QCoreApplication.translate("Dialog", u"Reset", None))
self.pushButton_manual_accept.setText(QCoreApplication.translate("Dialog", u"Accept", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.page), QCoreApplication.translate("Dialog", u"Manual Registration", None))
self.label_13.setText(QCoreApplication.translate("Dialog", u"Registration Mode:", None))
self.label_11.setText(QCoreApplication.translate("Dialog", u"PCs to Fit:", None))
self.label_12.setText(QCoreApplication.translate("Dialog", u"Mahalanobis Weight:", None))
self.label_6.setText(QCoreApplication.translate("Dialog", u"Correct Knee Abd.:", None))
self.checkBox_kneecorr.setText("")
self.label_10.setText(QCoreApplication.translate("Dialog", u"Fit Knee Abd.:", None))
self.checkBox_kneedof.setText("")
self.pushButton_auto_reg.setText(QCoreApplication.translate("Dialog", u"Register", None))
self.pushButton_auto_abort.setText(QCoreApplication.translate("Dialog", u"Abort", None))
self.pushButton_auto_reset.setText(QCoreApplication.translate("Dialog", u"Reset", None))
self.pushButton_auto_accept.setText(QCoreApplication.translate("Dialog", u"Accept", None))
self.errorGroup.setTitle(QCoreApplication.translate("Dialog", u"Registration Results", None))
self.landmarkErrorLabel.setText(QCoreApplication.translate("Dialog", u"Landmark Error (mm RMS):", None))
#if QT_CONFIG(whatsthis)
self.mDistLabel.setWhatsThis(QCoreApplication.translate("Dialog", u"Percentage of landmarks that have converged to their texture match.", None))
#endif // QT_CONFIG(whatsthis)
self.mDistLabel.setText(QCoreApplication.translate("Dialog", u"Mahalanobis Distance:", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.page_reg), QCoreApplication.translate("Dialog", u"Auto Registration", None))
self.pixelsXLabel.setText(QCoreApplication.translate("Dialog", u"Pixels X:", None))
self.screenshotPixelXLineEdit.setText(QCoreApplication.translate("Dialog", u"800", None))
self.pixelsYLabel.setText(QCoreApplication.translate("Dialog", u"Pixels Y:", None))
self.screenshotPixelYLineEdit.setText(QCoreApplication.translate("Dialog", u"600", None))
self.screenshotFilenameLabel.setText(QCoreApplication.translate("Dialog", u"Filename:", None))
self.screenshotFilenameLineEdit.setText(QCoreApplication.translate("Dialog", u"screenshot.png", None))
self.screenshotSaveButton.setText(QCoreApplication.translate("Dialog", u"Save Screenshot", None))
self.toolBox.setItemText(self.toolBox.indexOf(self.Screenshot), QCoreApplication.translate("Dialog", u"Screenshots", None))
# retranslateUi
| {
"content_hash": "75afdae26662fc6ec89016e10d981e02",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 152,
"avg_line_length": 50.787234042553195,
"alnum_prop": 0.7352025854330002,
"repo_name": "mapclient-plugins/fieldworklowerlimb2sidegenerationstep",
"id": "68bac7638cef908bf4ada9fe817a6f6732647d9d",
"size": "33820",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mapclientplugins/fieldworklowerlimb2sidegenerationstep/ui_lowerlimbgenerationdialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123344"
}
],
"symlink_target": ""
} |
import asyncio
from typing import Sequence, Optional, TYPE_CHECKING
import aiorpcx
from .util import bh2u, VerifiedTxInfo, NetworkJobOnDefaultServer
from .bitcoin import Hash, hash_decode, hash_encode
from .transaction import Transaction
from .blockchain import hash_header
from .interface import GracefulDisconnect
from . import constants
if TYPE_CHECKING:
from .network import Network
from .address_synchronizer import AddressSynchronizer
class MerkleVerificationFailure(Exception): pass
class MissingBlockHeader(MerkleVerificationFailure): pass
class MerkleRootMismatch(MerkleVerificationFailure): pass
class InnerNodeOfSpvProofIsValidTx(MerkleVerificationFailure): pass
class SPV(NetworkJobOnDefaultServer):
""" Simple Payment Verification """
def __init__(self, network: 'Network', wallet: 'AddressSynchronizer'):
self.wallet = wallet
NetworkJobOnDefaultServer.__init__(self, network)
def _reset(self):
super()._reset()
self.merkle_roots = {} # txid -> merkle root (once it has been verified)
self.requested_merkle = set() # txid set of pending requests
async def _start_tasks(self):
async with self.group as group:
await group.spawn(self.main)
def diagnostic_name(self):
return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
async def main(self):
self.blockchain = self.network.blockchain()
while True:
await self._maybe_undo_verifications()
await self._request_proofs()
await asyncio.sleep(0.1)
async def _request_proofs(self):
local_height = self.blockchain.height()
unverified = self.wallet.get_unverified_txs()
for tx_hash, tx_height in unverified.items():
# do not request merkle branch if we already requested it
if tx_hash in self.requested_merkle or tx_hash in self.merkle_roots:
continue
# or before headers are available
if tx_height <= 0 or tx_height > local_height:
continue
# if it's in the checkpoint region, we still might not have the header
header = self.blockchain.read_header(tx_height)
if header is None:
if tx_height < constants.net.max_checkpoint():
await self.group.spawn(self.network.request_chunk(tx_height, None, can_return_early=True))
continue
# request now
self.print_error('requested merkle', tx_hash)
self.requested_merkle.add(tx_hash)
await self.group.spawn(self._request_and_verify_single_proof, tx_hash, tx_height)
async def _request_and_verify_single_proof(self, tx_hash, tx_height):
try:
merkle = await self.network.get_merkle_for_transaction(tx_hash, tx_height)
except aiorpcx.jsonrpc.RPCError as e:
self.print_error('tx {} not at height {}'.format(tx_hash, tx_height))
self.wallet.remove_unverified_tx(tx_hash, tx_height)
try: self.requested_merkle.remove(tx_hash)
except KeyError: pass
return
# Verify the hash of the server-provided merkle branch to a
# transaction matches the merkle root of its block
if tx_height != merkle.get('block_height'):
self.print_error('requested tx_height {} differs from received tx_height {} for txid {}'
.format(tx_height, merkle.get('block_height'), tx_hash))
tx_height = merkle.get('block_height')
pos = merkle.get('pos')
merkle_branch = merkle.get('merkle')
# we need to wait if header sync/reorg is still ongoing, hence lock:
async with self.network.bhi_lock:
header = self.network.blockchain().read_header(tx_height)
try:
verify_tx_is_in_block(tx_hash, merkle_branch, pos, header, tx_height)
except MerkleVerificationFailure as e:
self.print_error(str(e))
raise GracefulDisconnect(e)
# we passed all the tests
self.merkle_roots[tx_hash] = header.get('merkle_root')
try: self.requested_merkle.remove(tx_hash)
except KeyError: pass
self.print_error("verified %s" % tx_hash)
header_hash = hash_header(header)
vtx_info = VerifiedTxInfo(tx_height, header.get('timestamp'), pos, header_hash)
self.wallet.add_verified_tx(tx_hash, vtx_info)
if self.is_up_to_date() and self.wallet.is_up_to_date():
self.wallet.save_verified_tx(write=True)
@classmethod
def hash_merkle_root(cls, merkle_branch: Sequence[str], tx_hash: str, leaf_pos_in_tree: int):
"""Return calculated merkle root."""
try:
h = hash_decode(tx_hash)
merkle_branch_bytes = [hash_decode(item) for item in merkle_branch]
int(leaf_pos_in_tree) # raise if invalid
except Exception as e:
raise MerkleVerificationFailure(e)
for i, item in enumerate(merkle_branch_bytes):
h = Hash(item + h) if ((leaf_pos_in_tree >> i) & 1) else Hash(h + item)
cls._raise_if_valid_tx(bh2u(h))
return hash_encode(h)
@classmethod
def _raise_if_valid_tx(cls, raw_tx: str):
# If an inner node of the merkle proof is also a valid tx, chances are, this is an attack.
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-June/016105.html
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/attachments/20180609/9f4f5b1f/attachment-0001.pdf
# https://bitcoin.stackexchange.com/questions/76121/how-is-the-leaf-node-weakness-in-merkle-trees-exploitable/76122#76122
tx = Transaction(raw_tx)
try:
tx.deserialize()
except:
pass
else:
raise InnerNodeOfSpvProofIsValidTx()
async def _maybe_undo_verifications(self):
def undo_verifications():
height = self.blockchain.get_max_forkpoint()
self.print_error("undoing verifications back to height {}".format(height))
tx_hashes = self.wallet.undo_verifications(self.blockchain, height)
for tx_hash in tx_hashes:
self.print_error("redoing", tx_hash)
self.remove_spv_proof_for_tx(tx_hash)
if self.network.blockchain() != self.blockchain:
self.blockchain = self.network.blockchain()
undo_verifications()
def remove_spv_proof_for_tx(self, tx_hash):
self.merkle_roots.pop(tx_hash, None)
try:
self.requested_merkle.remove(tx_hash)
except KeyError:
pass
def is_up_to_date(self):
return not self.requested_merkle
def verify_tx_is_in_block(tx_hash: str, merkle_branch: Sequence[str],
leaf_pos_in_tree: int, block_header: Optional[dict],
block_height: int) -> None:
"""Raise MerkleVerificationFailure if verification fails."""
if not block_header:
raise MissingBlockHeader("merkle verification failed for {} (missing header {})"
.format(tx_hash, block_height))
calc_merkle_root = SPV.hash_merkle_root(merkle_branch, tx_hash, leaf_pos_in_tree)
if block_header.get('merkle_root') != calc_merkle_root:
raise MerkleRootMismatch("merkle verification failed for {} ({} != {})".format(
tx_hash, block_header.get('merkle_root'), calc_merkle_root))
| {
"content_hash": "d7fdf45229b8920f5a40764fc41dec3d",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 129,
"avg_line_length": 44.005847953216374,
"alnum_prop": 0.6336212624584717,
"repo_name": "cryptapus/electrum",
"id": "714bd8f9a204e4a19bc74aea9823a45de5845049",
"size": "8664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum/verifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1169"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "874"
},
{
"name": "NSIS",
"bytes": "7316"
},
{
"name": "Python",
"bytes": "2195369"
},
{
"name": "Shell",
"bytes": "20819"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from django.template import RequestContext
from pbl.models import Treatment
def view_treatment(request,slug):
treatment = get_object_or_404(Treatment, slug=slug)
return render(request, 'pbl/treatment.html', {'treatment': treatment,
'treatments': Treatment.objects.all()})
def home(request):
return render(request, 'pbl/index.html', {'treatments': Treatment.objects.all()}) | {
"content_hash": "24d743932e34bc6b4bf33434864c9e29",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 83,
"avg_line_length": 40,
"alnum_prop": 0.759090909090909,
"repo_name": "hamole/pbl8",
"id": "bfdf2071e57281f654d333324085b7cfb06aa4a0",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pbl8_project/pbl/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "32871"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
__author__ = 'cmantas'
from sys import stderr
from os.path import exists
from os import mkdir, remove, makedirs
from time import time, sleep
import ntpath
import thread
from lib.scp_utils import *
from lib.persistance_module import env_vars, home
from lib.tiramola_logging import get_logger
#choose the IaaS provider
infrastructure = env_vars['iaas']
if infrastructure == 'okeanos':
from lib import connector_okeanos as iaas
if infrastructure == 'openstack':
from lib import connector_eucalyptus as iaas
LOGS_DIR = home+"files/VM_logs"
ATTEMPT_INTERVAL = 2
class VM (object):
class Address:
"""
Helper class that represents an IP address
"""
def __init__(self, version, ip, in_type):
self.version = version
self.ip = ip
self.type = in_type
def __str__(self):
rv = "%s IPv%d: %s" % (self.type, self.version, self.ip)
return rv
def __init__(self, name, flavor_id, image_id, create=False, wait=False, IPv4=False, log_path=LOGS_DIR):
"""
VM class constructor
"""
#set attributes
self.created = False
self.name = name
self.flavor_id = flavor_id
self.log_path = log_path
self.image_id = image_id
self.public_addresses = []
self.addresses = []
self.id = -1
self.IPv4 = IPv4
if not exists(LOGS_DIR):
makedirs(LOGS_DIR)
self.logfile = "%s/%s.log" % (LOGS_DIR, self.name)
self.log = get_logger('[%s]' % self.name, 'INFO', logfile=self.logfile)
if create:
self.create(wait)
def load_addresses(self):
"""
loads the IP interfaces from the IaaS
:return:
"""
addr_list = iaas.get_addreses(self.id)
for a in addr_list:
addr = self.Address(a['version'], a['ip'], a['type'])
self.addresses.append(addr)
def from_dict(self, in_dict):
"""
creates a VM from dictionary containing 'name' and 'id' reccords
"""
self.name = in_dict['name']
self.id = in_dict['id']
def create(self, wait=False):
if wait:
self.log.info("creating (synchronously)")
self.create_sync()
self.wait_ready()
else:
self.log.info("creating (async)")
thread.start_new_thread(self.create_sync, ())
def create_sync(self):
"""
Creates this VM in the underlying IaaS provider
"""
#start the timer
timer = Timer()
timer.start()
try:
self.id = iaas.create_vm(self.name, self.flavor_id, self.image_id, self.IPv4, self.log)
except Exception as e:
self.log.error(e)
exit(-1)
new_status = iaas.get_vm_status(self.id)
delta = timer.stop()
if new_status == 'ERROR':
self.log.error("IaaS creation FAILED")
return
self.log.info('IaaS status is now %s (took %d sec)' % (new_status, delta ) )
self.created = True
self.load_addresses()
def shutdown(self):
"""
Issues the 'shutdown' command to the IaaS provider
"""
self.log.info('Shutting down (id: %d)' % self.id)
return iaas.shutdown_vm(self.id)
def startup(self):
"""
boots up an existing VM instance in okeanos
:return: true if VM exist false if not
"""
if not self.created: return False;
self.log.info('starting up (id: %d)' % self.id)
return iaas.startup_vm(self.id)
def destroy(self):
"""Issues the 'destory' command to the IaaS provider """
self.log.info("Destroying ")
iaas.destroy_vm(self.id)
#delete the logfile
try:
remove(self.logfile)
except: pass
def __str__(self):
text = ''
text += '========== VM '+self.name+" ===========\n"
text += "ID: "+str(self.id)+'\n'
text += "Addresses (%s):" % len(self.addresses)
for a in self.addresses:
text += " [" + str(a) + "],"
text += "\nCloud Status: %s\n" % self.get_cloud_status()
return text
@staticmethod
def vm_from_dict(in_dict):
"""
creates a VM instance from a synnefo "server" dict
:param in_dict: "server" or "server details" dictionary from synnefo
:return: a VM instance for an existing vm
"""
vm_id, name, flavor_id, image_id = in_dict['id'], in_dict['name'].strip(), in_dict['flavor_id'], in_dict['image_id']
rv = VM(name, flavor_id, image_id)
rv.created = True
rv.id = vm_id
rv.load_addresses()
return rv
@staticmethod
def from_id(vm_id):
""" creates a VM instance from the VM id """
vm_dict = iaas.get_vm_details(vm_id)
return VM.vm_from_dict(vm_dict)
def get_cloud_status(self):
return iaas.get_vm_status(self.id)
def run_command(self, command, user='root', indent=0, prefix="\t$: ", silent=False):
"""
runs a command to this VM if it actually exists
:param command:
:param user:
:return:
"""
if not self.created:
stderr.write('this VM does not exist (yet),'
' so you cannot run commands on it')
return "ERROR"
self.log.debug("running SSH command:\n\n%s\n\n" % reindent(command, 5))
rv = run_ssh_command(self.get_public_addr(), user, command, indent, prefix, logger=self.log)
if rv is not None:
self.log.debug("command returned:\n\n %s\n\n" % rv)
return rv
def put_files(self, files, user='root', remote_path='.', recursive=False):
"""
Puts a file or a list of files to this VM
"""
put_file_scp(self.get_public_addr(), user, files, remote_path, recursive)
def run_files(self, files):
"""
puts a file in the VM and then runs it
:param files:
:return:
"""
self.put_files(files)
filename = ''
remote_path = ''
if not isinstance(files, (list, tuple)):
head, tail = ntpath.split(files)
filename = tail or ntpath.basename(head)
remote_path = "~/scripts/" + filename
else:
for f in files:
head, tail = ntpath.split(f)
short_fname = (tail or ntpath.basename(head))
filename += short_fname + ' '
remote_path += "~/scripts/"+short_fname+"; "
#generate the command that runs the desired scripts
command = 'chmod +x %s; ' \
'mkdir -p scripts;' \
'mv %s ~/scripts/ 2>/dev/null;' \
'%s'\
% (filename, filename, remote_path)
return self.run_command(command)
def wait_ready(self):
"""
Waits until it is able to run SSH commands on the VM or a timeout is reached
"""
success = False
attempts = 0
if not self.created:
self.log.debug("Not active yet. Sleeping")
while not self.created: sleep(3)
self.log.debug("Waiting for SSH daemon (%s)" % self.get_public_addr())
#time to stop trying
end_time = datetime.now()+timedelta(seconds=env_vars['ssh_giveup_timeout'])
self.log.debug("end time:"+str(end_time))
timer = Timer()
timer.start()
#self.log.info(("VM: Trying ssh, attempt "),
while not success:
#if(attempts%5 == 0): self.log.info( ("%d" % attempts),
attempts += 1
self.log.debug("ssh attempt:"+str(attempts))
if test_ssh(self.get_public_addr(), 'root', logger=self.log):
success = True
else:
if datetime.now() > end_time:
break
sleep(ATTEMPT_INTERVAL)
delta = timer.stop()
if success:
self.log.debug("now ready (took %d sec)" % delta)
else:
self.log.error(" FAILED to be SSH-able (after %d sec)" % delta)
return success
def get_public_addr(self):
""" Returns a publicly accessible IP address !!! for now, only checks for IPv6+fixed !!!"""
rv = None
if len(self.addresses) == 0:
self.load_addresses()
for i in self.addresses:
if i.type == "fixed" and i.version == 6:
rv = i.ip
#if there is a floating IP available, return this one
for i in self.addresses:
if i.type == "floating" and i.version == 4:
rv = i.ip
return rv
def get_private_addr(self):
if len(self.addresses) == 0:
self.load_addresses()
#find fixed ip
for i in self.addresses:
if i.version == 4 and i.type == "fixed":
return i.ip
def get_all_vms(check_active=False):
"""
Creates VM instances for all the VMs of the user available in the IaaS
"""
log = get_logger("VM [static]", 'INFO')
log.debug("getting all VMs")
vms = []
vm_ids = iaas.get_all_vm_ids()
for vm_id in vm_ids:
vm = VM.vm_from_dict(iaas.get_vm_details(vm_id))
if check_active and vm.get_cloud_status() != "ACTIVE":
continue
else:
vms.append(vm)
return vms
if not exists(LOGS_DIR):
mkdir(LOGS_DIR)
class Timer():
"""
Helper class that gives the ablility to measure time between events
"""
def __init__(self):
self.started = False
self.start_time = 0
def start(self):
assert self.started is False, " Timer already started"
self.started = True
self.start_time = int(round(time() * 1000))
def stop(self):
end_time = int(round(time() * 1000))
assert self.started is True, " Timer had not been started"
start_time = self.start_time
self.start_time = 0
self.started = False
return float(end_time - start_time)/1000
@staticmethod
def get_timer():
timer = Timer()
timer.start()
return timer | {
"content_hash": "bed469fc4bca0eea59c2289f269f1819",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 124,
"avg_line_length": 32.125,
"alnum_prop": 0.540272373540856,
"repo_name": "cmantas/tiramola_v3",
"id": "432b2740f80250cf5d6301bc3316693167c97b44",
"size": "10280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14788"
},
{
"name": "Makefile",
"bytes": "934"
},
{
"name": "PHP",
"bytes": "1923"
},
{
"name": "Python",
"bytes": "237908"
},
{
"name": "Shell",
"bytes": "26636"
}
],
"symlink_target": ""
} |
import simple_web_server
simple_web_server.close = True
print("Server Stopped.")
| {
"content_hash": "79e6070d0eb78281f783f24753745f39",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 30,
"avg_line_length": 21.5,
"alnum_prop": 0.7325581395348837,
"repo_name": "astinaam/Server",
"id": "a691d6d79b6668528b2b576496c50380f5df50b0",
"size": "86",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server_Final/bin/stop_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "334"
},
{
"name": "Batchfile",
"bytes": "168"
},
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "218"
},
{
"name": "Python",
"bytes": "22055"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension that adds an autosummary:: directive, which can be
used to generate function/method/attribute/etc. summary lists, similar
to those output eg. by Epydoc and other API doc generation tools.
An :autolink: role is also provided.
autosummary directive
---------------------
The autosummary directive has the form::
.. autosummary::
:nosignatures:
:toctree: generated/
module.function_1
module.function_2
...
and it generates an output table (containing signatures, optionally)
======================== =============================================
module.function_1(args) Summary line from the docstring of function_1
module.function_2(args) Summary line from the docstring
...
======================== =============================================
If the :toctree: option is specified, files matching the function names
are inserted to the toctree with the given prefix:
generated/module.function_1
generated/module.function_2
...
Note: The file names contain the module:: or currentmodule:: prefixes.
.. seealso:: autosummary_generate.py
autolink role
-------------
The autolink role functions as ``:obj:`` when the name referred can be
resolved to a Python object, and otherwise it becomes simple emphasis.
This can be used as the default role to make links 'smart'.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
import posixpath
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from docutils import nodes
from sphinx import addnodes
from sphinx.util.compat import Directive
# -- autosummary_toc node ------------------------------------------------
class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
crawled = dict()
def crawl_toc(node, depth=1):
crawled[node] = True
for j, subnode in enumerate(node):
try:
if (isinstance(subnode, autosummary_toc)
and isinstance(subnode[0], addnodes.toctree)):
env.note_toctree(env.docname, subnode[0])
continue
except IndexError:
continue
if not isinstance(subnode, nodes.section):
continue
if subnode not in crawled:
crawl_toc(subnode, depth + 1)
crawl_toc(doctree)
def autosummary_toc_visit_html(self, node):
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
pass
# -- autosummary_table node ----------------------------------------------
class autosummary_table(nodes.comment):
pass
def autosummary_table_visit_html(self, node):
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = str(subnode.astext())
new_text = new_text.replace(" ", "\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------
class FakeDirective:
env = dict()
genopt = dict()
def get_documenter(obj, parent):
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \
ModuleDocumenter
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in list(AutoDirective._registry.values())
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter
# -- .. autosummary:: ----------------------------------------------------
class Autosummary(Directive):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec = {
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'template': directives.unchanged,
'hidden': directives.flag,
}
def warn(self, msg):
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
self.env = env = self.state.document.settings.env
self.genopt = dict()
self.warnings = list()
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
if 'hidden' in self.options:
nodes = list()
else:
nodes = self.get_table(items)
if 'toctree' in self.options:
suffix = env.config.source_suffix
dirname = posixpath.dirname(env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = list()
for name, sig, summary, real_name in items:
docname = posixpath.join(tree_prefix, real_name)
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
self.warn('toctree references unknown document %r'
% docname)
docnames.append(docname)
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docname) for docname in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode = autosummary_toc('', '', tocnode)
if not 'hidden' in self.options:
nodes.append(tocnode)
return self.warnings + nodes
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = list()
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent = import_by_name(
name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
# NB. using real_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, real_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
sig = sig.replace('*', r'\*')
# -- Grab the summary
doc = list(documenter.process_doc(documenter.get_doc()))
while doc and not doc[0].strip():
doc.pop(0)
m = re.search(r"^([A-Z][^A-Z]*?\.\s)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
# Clean attribute fake doc
__doc = type(obj).__doc__
if isinstance(__doc, str):
if __doc.startswith(summary):
summary = "Type: " + type(obj).__name__
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'll'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, sig)
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = list()
opts = list()
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars - 2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars - 4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars - len(
sig) - 4 - 2)
return "(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
Returns: joined_string
"""
full_str = sep.join(items)
if len(full_str) < max_chars:
return full_str
n_chars = 0
n_items = 0
for j, item in enumerate(items):
n_chars += len(item) + len(sep)
if n_chars < max_chars - len(overflow_marker):
n_items += 1
else:
break
return sep.join(list(items[:n_items]) + [overflow_marker])
# -- Importing items -----------------------------------------------------
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.temp_data.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.temp_data.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes
def import_by_name(name, prefixes=[None]):
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = list()
for prefix in prefixes:
try:
if prefix:
prefixed_name = '.'.join([prefix, name])
else:
prefixed_name = name
obj, parent = _import_by_name(prefixed_name)
return prefixed_name, obj, parent
except ImportError:
tried.append(prefixed_name)
raise ImportError('no module named %s' % ' or '.join(tried))
def _import_by_name(name):
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
__import__(modname)
mod = sys.modules[modname]
return getattr(mod, name_parts[-1]), mod
except (ImportError, IndexError, AttributeError):
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(list(range(1, len(name_parts) + 1))):
last_j = j
modname = '.'.join(name_parts[:j])
try:
__import__(modname)
except: # ImportError:
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent
else:
return sys.modules[modname], None
except (ValueError, ImportError, AttributeError, KeyError) as e:
raise ImportError(*e.args)
# -- :autolink: (smart default role) -------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner,
options=dict(), content=list()):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
def process_generate_options(app):
genfiles = app.config.autosummary_generate
ext = app.config.source_suffix
if genfiles and not hasattr(genfiles, '__len__'):
env = app.builder.env
genfiles = [x + ext for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
if not genfiles:
return
from .generate import generate_autosummary_docs
genfiles = [genfile + (not genfile.endswith(ext) and ext or '')
for genfile in genfiles]
generate_autosummary_docs(genfiles, builder=app.builder,
warn=app.warn, info=app.info, suffix=ext,
base_path=app.srcdir)
def setup(app):
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
html=(autosummary_toc_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_node(autosummary_table,
html=(autosummary_table_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_directive('autosummary', Autosummary)
app.add_role('autolink', autolink_role)
app.connect('doctree-read', process_autosummary_toc)
app.connect('builder-inited', process_generate_options)
app.add_config_value('autosummary_generate', list(), True)
| {
"content_hash": "e89f16d7cebb6d624da72aca8e449e24",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 79,
"avg_line_length": 32.22791519434629,
"alnum_prop": 0.54377501233485,
"repo_name": "bitcraft/pyglet",
"id": "cfd5961b9b7bd0bede5701d4bdbf406914c4c98a",
"size": "18265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/ext_/autosummary/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6745"
},
{
"name": "PHP",
"bytes": "2192"
},
{
"name": "Python",
"bytes": "6201398"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
} |
"""Commands that can be used to operate on explorations.
All functions here should be agnostic of how ExplorationModel objects are
stored in the database. In particular, the various query methods should
delegate to the Exploration model class. This will enable the exploration
storage model to be changed without affecting this module and others above it.
"""
__author__ = 'Sean Lip'
import copy
import datetime
import logging
import os
import StringIO
import zipfile
from core.domain import event_services
from core.domain import exp_domain
from core.domain import fs_domain
from core.domain import rights_manager
from core.platform import models
import feconf
memcache_services = models.Registry.import_memcache_services()
search_services = models.Registry.import_search_services()
taskqueue_services = models.Registry.import_taskqueue_services()
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
import utils
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
#Name for the exploration search index
SEARCH_INDEX_EXPLORATIONS = 'explorations'
def _migrate_states_schema(versioned_exploration_states):
"""Holds the responsibility of performing a step-by-step, sequential update
of an exploration states structure based on the schema version of the input
exploration dictionary. This is very similar to the YAML conversion process
found in exp_domain.py and, in fact, many of the conversion functions for
states are also used in the YAML conversion pipeline. If the current
exploration states schema version changes
(feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION), a new conversion
function must be added and some code appended to this function to account
for that new version.
Args:
versioned_exploration_states: A dict with two keys:
- states_schema_version: the states schema version for the
exploration.
- states: the dict of states comprising the exploration. The keys in
this dict are state names.
"""
exploration_states_schema_version = versioned_exploration_states[
'states_schema_version']
if (exploration_states_schema_version is None
or exploration_states_schema_version < 1):
exploration_states_schema_version = 0
if not (0 <= exploration_states_schema_version
<= feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned exploration '
'state schemas at present.' %
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
# Check for conversion to v1.
if exploration_states_schema_version == 0:
exp_domain.Exploration.update_states_v0_to_v1_from_model(
versioned_exploration_states)
exploration_states_schema_version = 1
# Check for conversion to v2.
if exploration_states_schema_version == 1:
exp_domain.Exploration.update_states_v1_to_v2_from_model(
versioned_exploration_states)
exploration_states_schema_version = 2
# Check for conversion to v3.
if exploration_states_schema_version == 2:
exp_domain.Exploration.update_states_v2_to_v3_from_model(
versioned_exploration_states)
exploration_states_schema_version = 3
# Check for conversion to v4.
if exploration_states_schema_version == 3:
exp_domain.Exploration.update_states_v3_to_v4_from_model(
versioned_exploration_states)
exploration_states_schema_version = 4
# Check for conversion to v5.
if exploration_states_schema_version == 4:
exp_domain.Exploration.update_states_v4_to_v5_from_model(
versioned_exploration_states)
exploration_states_schema_version = 5
# Repository GET methods.
def _get_exploration_memcache_key(exploration_id, version=None):
"""Returns a memcache key for an exploration."""
if version:
return 'exploration-version:%s:%s' % (exploration_id, version)
else:
return 'exploration:%s' % exploration_id
def get_exploration_from_model(exploration_model, run_conversion=True):
"""Returns an Exploration domain object given an exploration model loaded
from the datastore.
If run_conversion is True, then the exploration's states schema version
will be checked against the current states schema version. If they do not
match, the exploration will be automatically updated to the latest states
schema version.
IMPORTANT NOTE TO DEVELOPERS: In general, run_conversion should never be
False. This option is only used for testing that the states schema version
migration works correctly, and it should never be changed otherwise.
"""
# Ensure the original exploration model does not get altered.
versioned_exploration_states = {
'states_schema_version': exploration_model.states_schema_version,
'states': copy.deepcopy(exploration_model.states)
}
# If the exploration uses the latest states schema version, no conversion
# is necessary.
if (run_conversion and exploration_model.states_schema_version !=
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
_migrate_states_schema(versioned_exploration_states)
return exp_domain.Exploration(
exploration_model.id, exploration_model.title,
exploration_model.category, exploration_model.objective,
exploration_model.language_code, exploration_model.tags,
exploration_model.blurb, exploration_model.author_notes,
exploration_model.default_skin, exploration_model.skin_customizations,
versioned_exploration_states['states_schema_version'],
exploration_model.init_state_name,
versioned_exploration_states['states'],
exploration_model.param_specs, exploration_model.param_changes,
exploration_model.version, exploration_model.created_on,
exploration_model.last_updated)
def get_exploration_summary_from_model(exp_summary_model):
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.status,
exp_summary_model.community_owned, exp_summary_model.owner_ids,
exp_summary_model.editor_ids, exp_summary_model.viewer_ids,
exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated
)
def get_exploration_by_id(exploration_id, strict=True, version=None):
"""Returns a domain object representing an exploration."""
exploration_memcache_key = _get_exploration_memcache_key(
exploration_id, version=version)
memcached_exploration = memcache_services.get_multi(
[exploration_memcache_key]).get(exploration_memcache_key)
if memcached_exploration is not None:
return memcached_exploration
else:
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=strict, version=version)
if exploration_model:
exploration = get_exploration_from_model(exploration_model)
memcache_services.set_multi({
exploration_memcache_key: exploration})
return exploration
else:
return None
def get_exploration_summary_by_id(exploration_id):
"""Returns a domain object representing an exploration summary."""
# TODO(msl): Maybe use memcache similarly to get_exploration_by_id.
exp_summary_model = exp_models.ExpSummaryModel.get(
exploration_id)
if exp_summary_model:
exp_summary = get_exploration_summary_from_model(exp_summary_model)
return exp_summary
else:
return None
def get_multiple_explorations_by_id(exp_ids, strict=True):
"""Returns a dict of domain objects representing explorations with the
given ids as keys. If an exp_id is not present it is not included in the
return dict.
"""
exp_ids = set(exp_ids)
result = {}
uncached = []
memcache_keys = [_get_exploration_memcache_key(i) for i in exp_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for exp_obj in cache_result.itervalues():
result[exp_obj.id] = exp_obj
for _id in exp_ids:
if _id not in result:
uncached.append(_id)
db_exp_models = exp_models.ExplorationModel.get_multi(uncached)
db_results_dict = {}
not_found = []
for i, eid in enumerate(uncached):
model = db_exp_models[i]
if model:
exploration = get_exploration_from_model(model)
db_results_dict[eid] = exploration
else:
logging.info('Tried to fetch exploration with id %s, but no such '
'exploration exists in the datastore' % eid)
not_found.append(eid)
if strict and not_found:
raise ValueError(
'Couldn\'t find explorations with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
eid: db_results_dict[eid] for eid in db_results_dict.iterkeys()
if db_results_dict[eid] is not None
}
if cache_update:
memcache_services.set_multi(cache_update)
result.update(db_results_dict)
return result
def get_new_exploration_id():
"""Returns a new exploration id."""
return exp_models.ExplorationModel.get_new_id('')
def is_exp_summary_editable(exp_summary, user_id=None):
"""Checks if a given user may edit an exploration by checking
the given domain object."""
return user_id is not None and (
user_id in exp_summary.editor_ids
or user_id in exp_summary.owner_ids
or exp_summary.community_owned)
# Query methods.
def get_exploration_titles_and_categories(exp_ids):
"""Returns exploration titles and categories for the given ids.
The result is a dict with exploration ids as keys. The corresponding values
are dicts with the keys 'title' and 'category'.
Any invalid exp_ids will not be included in the return dict. No error will
be raised.
"""
explorations = [
(get_exploration_from_model(e) if e else None)
for e in exp_models.ExplorationModel.get_multi(exp_ids)]
result = {}
for ind, exploration in enumerate(explorations):
if exploration is None:
logging.error(
'Could not find exploration corresponding to id')
else:
result[exploration.id] = {
'title': exploration.title,
'category': exploration.category,
}
return result
def _get_exploration_summary_dicts_from_models(exp_summary_models):
"""Given an iterable of ExpSummaryModel instances, create a dict containing
corresponding exploration summary domain objects, keyed by id."""
exploration_summaries = [
get_exploration_summary_from_model(exp_summary_model)
for exp_summary_model in exp_summary_models]
result = {}
for exp_summary in exploration_summaries:
result[exp_summary.id] = exp_summary
return result
def get_exploration_summaries_matching_ids(exp_ids):
"""Given a list of exploration ids, return a list with the corresponding
summary domain objects (or None if the corresponding summary does not
exist).
"""
return [
(get_exploration_summary_from_model(model) if model else None)
for model in exp_models.ExpSummaryModel.get_multi(exp_ids)]
def get_exploration_summaries_matching_query(query_string, cursor=None):
"""Returns a list with all exploration summary domain objects matching the
given search query string, as well as a search cursor for future fetches.
This method returns exactly feconf.GALLERY_PAGE_SIZE results if there are
at least that many, otherwise it returns all remaining results. (If this
behaviour does not occur, an error will be logged.) The method also returns
a search cursor.
"""
MAX_ITERATIONS = 10
summary_models = []
search_cursor = cursor
for i in range(MAX_ITERATIONS):
remaining_to_fetch = feconf.GALLERY_PAGE_SIZE - len(summary_models)
exp_ids, search_cursor = search_explorations(
query_string, remaining_to_fetch, cursor=search_cursor)
invalid_exp_ids = []
for ind, model in enumerate(
exp_models.ExpSummaryModel.get_multi(exp_ids)):
if model is not None:
summary_models.append(model)
else:
invalid_exp_ids.append(exp_ids[ind])
if len(summary_models) == feconf.GALLERY_PAGE_SIZE or (
search_cursor is None):
break
else:
logging.error(
'Search index contains stale exploration ids: %s' %
', '.join(invalid_exp_ids))
if (len(summary_models) < feconf.GALLERY_PAGE_SIZE
and search_cursor is not None):
logging.error(
'Could not fulfill search request for query string %s; at least '
'%s retries were needed.' % (query_string, MAX_ITERATIONS))
return ([
get_exploration_summary_from_model(summary_model)
for summary_model in summary_models
], search_cursor)
def get_non_private_exploration_summaries():
"""Returns a dict with all non-private exploration summary domain objects,
keyed by their id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_non_private())
def get_all_exploration_summaries():
"""Returns a dict with all exploration summary domain objects,
keyed by their id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_all())
def get_private_at_least_viewable_exploration_summaries(user_id):
"""Returns a dict with all exploration summary domain objects that are
at least viewable by given user. The dict is keyed by exploration id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_private_at_least_viewable(
user_id=user_id))
def get_at_least_editable_exploration_summaries(user_id):
"""Returns a dict with all exploration summary domain objects that are
at least editable by given user. The dict is keyed by exploration id."""
return _get_exploration_summary_dicts_from_models(
exp_models.ExpSummaryModel.get_at_least_editable(
user_id=user_id))
def count_explorations():
"""Returns the total number of explorations."""
return exp_models.ExplorationModel.get_exploration_count()
# Methods for exporting states and explorations to other formats.
def export_to_zip_file(exploration_id, version=None):
"""Returns a ZIP archive of the exploration."""
exploration = get_exploration_by_id(exploration_id, version=version)
yaml_repr = exploration.to_yaml()
o = StringIO.StringIO()
with zipfile.ZipFile(o, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr('%s.yaml' % exploration.title, yaml_repr)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
for filepath in dir_list:
# Currently, the version number of all files is 1, since they are
# not modifiable post-upload.
# TODO(sll): When allowing editing of files, implement versioning
# for them.
file_contents = fs.get(filepath, version=1)
str_filepath = 'assets/%s' % filepath
assert isinstance(str_filepath, str)
unicode_filepath = str_filepath.decode('utf-8')
zf.writestr(unicode_filepath, file_contents)
return o.getvalue()
def export_states_to_yaml(exploration_id, version=None, width=80):
"""Returns a python dictionary of the exploration, whose keys are state
names and values are yaml strings representing the state contents with
lines wrapped at 'width' characters."""
exploration = get_exploration_by_id(exploration_id, version=version)
exploration_dict = {}
for state in exploration.states:
exploration_dict[state] = utils.yaml_from_dict(
exploration.states[state].to_dict(), width=width)
return exploration_dict
# Repository SAVE and DELETE methods.
def apply_change_list(exploration_id, change_list):
"""Applies a changelist to a pristine exploration and returns the result.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
the resulting exploration domain object.
"""
exploration = get_exploration_by_id(exploration_id)
try:
changes = [exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
for change in changes:
if change.cmd == exp_domain.CMD_ADD_STATE:
exploration.add_states([change.state_name])
elif change.cmd == exp_domain.CMD_RENAME_STATE:
exploration.rename_state(
change.old_state_name, change.new_state_name)
elif change.cmd == exp_domain.CMD_DELETE_STATE:
exploration.delete_state(change.state_name)
elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY:
state = exploration.states[change.state_name]
if (change.property_name ==
exp_domain.STATE_PROPERTY_PARAM_CHANGES):
state.update_param_changes(change.new_value)
elif change.property_name == exp_domain.STATE_PROPERTY_CONTENT:
state.update_content(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ID):
state.update_interaction_id(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS):
state.update_interaction_customization_args(
change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_HANDLERS):
raise utils.InvalidInputException(
'Editing interaction handlers is no longer supported')
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS):
state.update_interaction_answer_groups(change.new_value)
elif (change.property_name ==
exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME):
state.update_interaction_default_outcome(change.new_value)
elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY:
if change.property_name == 'title':
exploration.update_title(change.new_value)
elif change.property_name == 'category':
exploration.update_category(change.new_value)
elif change.property_name == 'objective':
exploration.update_objective(change.new_value)
elif change.property_name == 'language_code':
exploration.update_language_code(change.new_value)
elif change.property_name == 'tags':
exploration.update_tags(change.new_value)
elif change.property_name == 'blurb':
exploration.update_blurb(change.new_value)
elif change.property_name == 'author_notes':
exploration.update_author_notes(change.new_value)
elif change.property_name == 'param_specs':
exploration.update_param_specs(change.new_value)
elif change.property_name == 'param_changes':
exploration.update_param_changes(change.new_value)
elif change.property_name == 'default_skin_id':
exploration.update_default_skin_id(change.new_value)
elif change.property_name == 'init_state_name':
exploration.update_init_state_name(change.new_value)
elif (change.cmd ==
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
# Loading the exploration model from the datastore into an
# Eploration domain object automatically converts it to use the
# latest states schema version. As a result, simply resaving the
# exploration is sufficient to apply the states schema update.
continue
return exploration
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, exploration_id, change_list)
)
raise
def get_summary_of_change_list(base_exploration, change_list):
"""Applies a changelist to a pristine exploration and returns a summary.
Each entry in change_list is a dict that represents an ExplorationChange
object.
Returns:
a dict with five keys:
exploration_property_changes: a dict, where each key is a property_name
of the exploration, and the corresponding values are dicts with keys
old_value and new_value.
state_property_changes: a dict, where each key is a state name, and the
corresponding values are dicts; the keys of these dicts represent
properties of the state, and the corresponding values are dicts with
keys old_value and new_value. If a state name is changed, this is
listed as a property name change under the old state name in the
outer dict.
changed_states: a list of state names. This indicates that the state
has changed but we do not know what the changes are. This can happen
for complicated operations like removing a state and later adding a
new state with the same name as the removed state.
added_states: a list of added state names.
deleted_states: a list of deleted state names.
"""
# TODO(sll): This really needs tests, especially the diff logic. Probably
# worth comparing with the actual changed exploration.
# Ensure that the original exploration does not get altered.
exploration = copy.deepcopy(base_exploration)
changes = [
exp_domain.ExplorationChange(change_dict)
for change_dict in change_list]
exploration_property_changes = {}
state_property_changes = {}
changed_states = []
added_states = []
deleted_states = []
original_state_names = {
state_name: state_name for state_name in exploration.states.keys()
}
for change in changes:
if change.cmd == exp_domain.CMD_ADD_STATE:
if change.state_name in changed_states:
continue
elif change.state_name in deleted_states:
changed_states.append(change.state_name)
del state_property_changes[change.state_name]
deleted_states.remove(change.state_name)
else:
added_states.append(change.state_name)
original_state_names[change.state_name] = change.state_name
elif change.cmd == exp_domain.CMD_RENAME_STATE:
orig_state_name = original_state_names[change.old_state_name]
original_state_names[change.new_state_name] = orig_state_name
if orig_state_name in changed_states:
continue
if orig_state_name not in state_property_changes:
state_property_changes[orig_state_name] = {}
if 'name' not in state_property_changes[orig_state_name]:
state_property_changes[orig_state_name]['name'] = {
'old_value': change.old_state_name
}
state_property_changes[orig_state_name]['name']['new_value'] = (
change.new_state_name)
elif change.cmd == exp_domain.CMD_DELETE_STATE:
orig_state_name = original_state_names[change.state_name]
if orig_state_name in changed_states:
continue
elif orig_state_name in added_states:
added_states.remove(orig_state_name)
else:
deleted_states.append(orig_state_name)
elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY:
orig_state_name = original_state_names[change.state_name]
if orig_state_name in changed_states:
continue
property_name = change.property_name
if orig_state_name not in state_property_changes:
state_property_changes[orig_state_name] = {}
if property_name not in state_property_changes[orig_state_name]:
state_property_changes[orig_state_name][property_name] = {
'old_value': change.old_value
}
state_property_changes[orig_state_name][property_name][
'new_value'] = change.new_value
elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY:
property_name = change.property_name
if property_name not in exploration_property_changes:
exploration_property_changes[property_name] = {
'old_value': change.old_value
}
exploration_property_changes[property_name]['new_value'] = (
change.new_value)
elif (change.cmd ==
exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION):
continue
unchanged_exploration_properties = []
for property_name in exploration_property_changes:
if (exploration_property_changes[property_name]['old_value'] ==
exploration_property_changes[property_name]['new_value']):
unchanged_exploration_properties.append(property_name)
for property_name in unchanged_exploration_properties:
del exploration_property_changes[property_name]
unchanged_state_names = []
for state_name in state_property_changes:
unchanged_state_properties = []
changes = state_property_changes[state_name]
for property_name in changes:
if (changes[property_name]['old_value'] ==
changes[property_name]['new_value']):
unchanged_state_properties.append(property_name)
for property_name in unchanged_state_properties:
del changes[property_name]
if len(changes) == 0:
unchanged_state_names.append(state_name)
for state_name in unchanged_state_names:
del state_property_changes[state_name]
return {
'exploration_property_changes': exploration_property_changes,
'state_property_changes': state_property_changes,
'changed_states': changed_states,
'added_states': added_states,
'deleted_states': deleted_states,
}
def _save_exploration(
committer_id, exploration, commit_message, change_list):
"""Validates an exploration and commits it to persistent storage.
If successful, increments the version number of the incoming exploration
domain object by 1.
"""
if change_list is None:
change_list = []
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exploration_model = exp_models.ExplorationModel.get(
exploration.id, strict=False)
if exploration_model is None:
exploration_model = exp_models.ExplorationModel(id=exploration.id)
else:
if exploration.version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
elif exploration.version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, exploration.version))
exploration_model.category = exploration.category
exploration_model.title = exploration.title
exploration_model.objective = exploration.objective
exploration_model.language_code = exploration.language_code
exploration_model.tags = exploration.tags
exploration_model.blurb = exploration.blurb
exploration_model.author_notes = exploration.author_notes
exploration_model.default_skin = exploration.default_skin
exploration_model.skin_customizations = (
exploration.skin_instance.to_dict()['skin_customizations'])
exploration_model.states_schema_version = exploration.states_schema_version
exploration_model.init_state_name = exploration.init_state_name
exploration_model.states = {
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()}
exploration_model.param_specs = exploration.param_specs_dict
exploration_model.param_changes = exploration.param_change_dicts
exploration_model.commit(
committer_id, commit_message, change_list)
memcache_services.delete(_get_exploration_memcache_key(exploration.id))
event_services.ExplorationContentChangeEventHandler.record(exploration.id)
index_explorations_given_ids([exploration.id])
exploration.version += 1
def _create_exploration(
committer_id, exploration, commit_message, commit_cmds):
"""Ensures that rights for a new exploration are saved first.
This is because _save_exploration() depends on the rights object being
present to tell it whether to do strict validation or not.
"""
# This line is needed because otherwise a rights object will be created,
# but the creation of an exploration object will fail.
exploration.validate()
rights_manager.create_new_exploration_rights(exploration.id, committer_id)
model = exp_models.ExplorationModel(
id=exploration.id,
category=exploration.category,
title=exploration.title,
objective=exploration.objective,
language_code=exploration.language_code,
tags=exploration.tags,
blurb=exploration.blurb,
author_notes=exploration.author_notes,
default_skin=exploration.default_skin,
skin_customizations=exploration.skin_instance.to_dict(
)['skin_customizations'],
states_schema_version=exploration.states_schema_version,
init_state_name=exploration.init_state_name,
states={
state_name: state.to_dict()
for (state_name, state) in exploration.states.iteritems()},
param_specs=exploration.param_specs_dict,
param_changes=exploration.param_change_dicts,
)
model.commit(committer_id, commit_message, commit_cmds)
event_services.ExplorationContentChangeEventHandler.record(exploration.id)
exploration.version += 1
create_exploration_summary(exploration.id)
def save_new_exploration(committer_id, exploration):
commit_message = (
'New exploration created with title \'%s\'.' % exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
def delete_exploration(committer_id, exploration_id, force_deletion=False):
"""Deletes the exploration with the given exploration_id.
IMPORTANT: Callers of this function should ensure that committer_id has
permissions to delete this exploration, prior to calling this function.
If force_deletion is True the exploration and its history are fully deleted
and are unrecoverable. Otherwise, the exploration and all its history are
marked as deleted, but the corresponding models are still retained in the
datastore. This last option is the preferred one.
"""
# TODO(sll): Delete the files too?
exploration_rights_model = exp_models.ExplorationRightsModel.get(
exploration_id)
exploration_rights_model.delete(
committer_id, '', force_deletion=force_deletion)
exploration_model = exp_models.ExplorationModel.get(exploration_id)
exploration_model.delete(
committer_id, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED,
force_deletion=force_deletion)
# This must come after the exploration is retrieved. Otherwise the memcache
# key will be reinstated.
exploration_memcache_key = _get_exploration_memcache_key(exploration_id)
memcache_services.delete(exploration_memcache_key)
#delete the exploration from search.
delete_documents_from_search_index([exploration_id])
# delete summary of exploration
delete_exploration_summary(exploration_id, force_deletion=force_deletion)
# Operations on exploration snapshots.
def _get_simple_changelist_summary(
exploration_id, version_number, change_list):
"""Returns an auto-generated changelist summary for the history logs."""
# TODO(sll): Get this from memcache where possible. It won't change, so we
# can keep it there indefinitely.
base_exploration = get_exploration_by_id(
exploration_id, version=version_number)
if (len(change_list) == 1 and change_list[0]['cmd'] in
['create_new', 'AUTO_revert_version_number']):
# An automatic summary is not needed here, because the original commit
# message is sufficiently descriptive.
return ''
else:
full_summary = get_summary_of_change_list(
base_exploration, change_list)
short_summary_fragments = []
if full_summary['added_states']:
short_summary_fragments.append(
'added \'%s\'' % '\', \''.join(full_summary['added_states']))
if full_summary['deleted_states']:
short_summary_fragments.append(
'deleted \'%s\'' % '\', \''.join(
full_summary['deleted_states']))
if (full_summary['changed_states'] or
full_summary['state_property_changes']):
affected_states = (
full_summary['changed_states'] +
full_summary['state_property_changes'].keys())
short_summary_fragments.append(
'edited \'%s\'' % '\', \''.join(affected_states))
if full_summary['exploration_property_changes']:
short_summary_fragments.append(
'edited exploration properties %s' % ', '.join(
full_summary['exploration_property_changes'].keys()))
return '; '.join(short_summary_fragments)
def get_exploration_snapshots_metadata(exploration_id):
"""Returns the snapshots for this exploration, as dicts.
Args:
exploration_id: str. The id of the exploration in question.
Returns:
list of dicts, each representing a recent snapshot. Each dict has the
following keys: committer_id, commit_message, commit_cmds, commit_type,
created_on_ms, version_number. The version numbers are consecutive and
in ascending order. There are exploration.version_number items in the
returned list.
"""
exploration = get_exploration_by_id(exploration_id)
current_version = exploration.version
version_nums = range(1, current_version + 1)
return exp_models.ExplorationModel.get_snapshots_metadata(
exploration_id, version_nums)
def update_exploration(
committer_id, exploration_id, change_list, commit_message):
"""Update an exploration. Commits changes.
Args:
- committer_id: str. The id of the user who is performing the update
action.
- exploration_id: str. The exploration id.
- change_list: list of dicts, each representing a _Change object. These
changes are applied in sequence to produce the resulting exploration.
- commit_message: str or None. A description of changes made to the state.
For published explorations, this must be present; for unpublished
explorations, it should be equal to None.
"""
is_public = rights_manager.is_exploration_public(exploration_id)
if is_public and not commit_message:
raise ValueError(
'Exploration is public so expected a commit message but '
'received none.')
exploration = apply_change_list(exploration_id, change_list)
_save_exploration(committer_id, exploration, commit_message, change_list)
# update summary of changed exploration
update_exploration_summary(exploration.id)
def create_exploration_summary(exploration_id):
"""Create summary of an exploration and store in datastore."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = get_summary_of_exploration(exploration)
save_exploration_summary(exp_summary)
def update_exploration_summary(exploration_id):
"""Update the summary of an exploration."""
exploration = get_exploration_by_id(exploration_id)
exp_summary = get_summary_of_exploration(exploration)
save_exploration_summary(exp_summary)
def get_summary_of_exploration(exploration):
"""Create ExplorationSummary domain object for a given Exploration
domain object and return it.
"""
exp_rights = exp_models.ExplorationRightsModel.get_by_id(exploration.id)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id(exploration.id)
if exp_summary_model:
old_exp_summary = get_exploration_summary_from_model(exp_summary_model)
ratings = old_exp_summary.ratings or feconf.get_empty_ratings()
else:
ratings = feconf.get_empty_ratings()
exploration_model_last_updated = exploration.last_updated
exploration_model_created_on = exploration.created_on
exp_summary = exp_domain.ExplorationSummary(
exploration.id, exploration.title, exploration.category,
exploration.objective, exploration.language_code,
exploration.tags, ratings, exp_rights.status,
exp_rights.community_owned, exp_rights.owner_ids,
exp_rights.editor_ids, exp_rights.viewer_ids, exploration.version,
exploration_model_created_on, exploration_model_last_updated
)
return exp_summary
def save_exploration_summary(exp_summary):
"""Save exploration summary domain object as ExpSummaryModel
entity in datastore."""
exp_summary_model = exp_models.ExpSummaryModel(
id=exp_summary.id,
title=exp_summary.title,
category=exp_summary.category,
objective=exp_summary.objective,
language_code=exp_summary.language_code,
tags=exp_summary.tags,
ratings = exp_summary.ratings,
status=exp_summary.status,
community_owned=exp_summary.community_owned,
owner_ids=exp_summary.owner_ids,
editor_ids=exp_summary.editor_ids,
viewer_ids=exp_summary.viewer_ids,
version=exp_summary.version,
exploration_model_last_updated=(
exp_summary.exploration_model_last_updated),
exploration_model_created_on=(
exp_summary.exploration_model_created_on)
)
exp_summary_model.put()
def delete_exploration_summary(exploration_id, force_deletion=False):
"""Delete an exploration summary model."""
exp_models.ExpSummaryModel.get(exploration_id).delete()
def revert_exploration(
committer_id, exploration_id, current_version, revert_to_version):
"""Reverts an exploration to the given version number. Commits changes."""
exploration_model = exp_models.ExplorationModel.get(
exploration_id, strict=False)
if current_version > exploration_model.version:
raise Exception(
'Unexpected error: trying to update version %s of exploration '
'from version %s. Please reload the page and try again.'
% (exploration_model.version, current_version))
elif current_version < exploration_model.version:
raise Exception(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_model.version, current_version))
# Validate the previous version of the exploration before committing the
# change.
exploration = get_exploration_by_id(
exploration_id, version=revert_to_version)
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
if exploration_rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
exp_models.ExplorationModel.revert(exploration_model,
committer_id, 'Reverted exploration to version %s' % revert_to_version,
revert_to_version)
memcache_services.delete(_get_exploration_memcache_key(exploration_id))
update_exploration_summary(exploration_id)
# Creation and deletion methods.
def get_demo_exploration_components(demo_path):
"""Gets the content of `demo_path` in the sample explorations folder.
Args:
demo_path: the file or folder path for the content of an exploration
in SAMPLE_EXPLORATIONS_DIR. E.g.: 'adventure.yaml' or 'tar/'.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
"""
demo_filepath = os.path.join(feconf.SAMPLE_EXPLORATIONS_DIR, demo_path)
if demo_filepath.endswith('yaml'):
file_contents = utils.get_file_contents(demo_filepath)
return file_contents, []
elif os.path.isdir(demo_filepath):
return utils.get_exploration_components_from_dir(demo_filepath)
else:
raise Exception('Unrecognized file path: %s' % demo_path)
def save_new_exploration_from_yaml_and_assets(
committer_id, yaml_content, title, category, exploration_id,
assets_list):
if assets_list is None:
assets_list = []
exploration = exp_domain.Exploration.from_yaml(
exploration_id, title, category, yaml_content)
commit_message = (
'New exploration created from YAML file with title \'%s\'.'
% exploration.title)
_create_exploration(committer_id, exploration, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': exploration.title,
'category': exploration.category,
}])
for (asset_filename, asset_content) in assets_list:
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
fs.commit(committer_id, asset_filename, asset_content)
def delete_demo(exploration_id):
"""Deletes a single demo exploration."""
if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration = get_exploration_by_id(exploration_id, strict=False)
if not exploration:
logging.info('Exploration with id %s was not deleted, because it '
'does not exist.' % exploration_id)
else:
delete_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration_id, force_deletion=True)
def load_demo(exploration_id):
"""Loads a demo exploration.
The resulting exploration will have version 2 (one for its initial
creation and one for its subsequent modification.)
"""
# TODO(sll): Speed this method up. It is too slow.
delete_demo(exploration_id)
if not (0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS)):
raise Exception('Invalid demo exploration id %s' % exploration_id)
exploration_info = feconf.DEMO_EXPLORATIONS[int(exploration_id)]
if len(exploration_info) == 3:
(exp_filename, title, category) = exploration_info
else:
raise Exception('Invalid demo exploration: %s' % exploration_info)
yaml_content, assets_list = get_demo_exploration_components(exp_filename)
save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, title, category,
exploration_id, assets_list)
rights_manager.publish_exploration(
feconf.SYSTEM_COMMITTER_ID, exploration_id)
# Release ownership of all explorations.
rights_manager.release_ownership(
feconf.SYSTEM_COMMITTER_ID, exploration_id)
index_explorations_given_ids([exploration_id])
logging.info('Exploration with id %s was loaded.' % exploration_id)
def get_next_page_of_all_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None):
"""Returns a page of commits to all explorations in reverse time order.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_commits(
page_size, urlsafe_start_cursor))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def get_next_page_of_all_non_private_commits(
page_size=feconf.COMMIT_LIST_PAGE_SIZE, urlsafe_start_cursor=None,
max_age=None):
"""Returns a page of non-private commits in reverse time order. If max_age
is given, it should be a datetime.timedelta instance.
The return value is a triple (results, cursor, more) as described in
fetch_page() at:
https://developers.google.com/appengine/docs/python/ndb/queryclass
"""
if max_age is not None and not isinstance(max_age, datetime.timedelta):
raise ValueError(
"max_age must be a datetime.timedelta instance. or None.")
results, new_urlsafe_start_cursor, more = (
exp_models.ExplorationCommitLogEntryModel.get_all_non_private_commits(
page_size, urlsafe_start_cursor, max_age=max_age))
return ([exp_domain.ExplorationCommitLogEntry(
entry.created_on, entry.last_updated, entry.user_id, entry.username,
entry.exploration_id, entry.commit_type, entry.commit_message,
entry.commit_cmds, entry.version, entry.post_commit_status,
entry.post_commit_community_owned, entry.post_commit_is_private
) for entry in results], new_urlsafe_start_cursor, more)
def _exp_rights_to_search_dict(rights):
# Allow searches like "is:featured".
doc = {}
if rights.status == rights_manager.EXPLORATION_STATUS_PUBLICIZED:
doc['is'] = 'featured'
return doc
def _should_index(exp):
rights = rights_manager.get_exploration_rights(exp.id)
return rights.status != rights_manager.EXPLORATION_STATUS_PRIVATE
def _get_search_rank(exp_id):
"""Returns an integer determining the document's rank in search.
Featured explorations get a ranking bump, and so do explorations that
have been more recently updated. Good ratings will increase the ranking
and bad ones will lower it.
"""
# TODO(sll): Improve this calculation.
_STATUS_PUBLICIZED_BONUS = 30
# This is done to prevent the rank hitting 0 too easily. Note that
# negative ranks are disallowed in the Search API.
_DEFAULT_RANK = 20
exploration = get_exploration_by_id(exp_id)
rights = rights_manager.get_exploration_rights(exp_id)
summary = get_exploration_summary_by_id(exp_id)
rank = _DEFAULT_RANK + (
_STATUS_PUBLICIZED_BONUS
if rights.status == rights_manager.EXPLORATION_STATUS_PUBLICIZED
else 0)
if summary.ratings:
RATING_WEIGHTINGS = {'1': -5, '2': -2, '3': 2, '4': 5, '5': 10}
for rating_value in summary.ratings:
rank += (
summary.ratings[rating_value] *
RATING_WEIGHTINGS[rating_value])
# Iterate backwards through the exploration history metadata until we find
# the most recent snapshot that was committed by a human.
last_human_update_ms = 0
snapshots_metadata = get_exploration_snapshots_metadata(exp_id)
for snapshot_metadata in reversed(snapshots_metadata):
if snapshot_metadata['committer_id'] != feconf.MIGRATION_BOT_USER_ID:
last_human_update_ms = snapshot_metadata['created_on_ms']
break
_TIME_NOW_MS = utils.get_current_time_in_millisecs()
_MS_IN_ONE_DAY = 24 * 60 * 60 * 1000
time_delta_days = int(
(_TIME_NOW_MS - last_human_update_ms) / _MS_IN_ONE_DAY)
if time_delta_days == 0:
rank += 80
elif time_delta_days == 1:
rank += 50
elif 2 <= time_delta_days <= 7:
rank += 35
# Ranks must be non-negative.
return max(rank, 0)
def _exp_to_search_dict(exp):
rights = rights_manager.get_exploration_rights(exp.id)
doc = {
'id': exp.id,
'language_code': exp.language_code,
'title': exp.title,
'category': exp.category,
'tags': exp.tags,
'blurb': exp.blurb,
'objective': exp.objective,
'author_notes': exp.author_notes,
'rank': _get_search_rank(exp.id),
}
doc.update(_exp_rights_to_search_dict(rights))
return doc
def clear_search_index():
"""WARNING: This runs in-request, and may therefore fail if there are too
many entries in the index.
"""
search_services.clear_index(SEARCH_INDEX_EXPLORATIONS)
def index_explorations_given_ids(exp_ids):
# We pass 'strict=False' so as not to index deleted explorations.
exploration_models = get_multiple_explorations_by_id(exp_ids, strict=False)
search_services.add_documents_to_index([
_exp_to_search_dict(exp) for exp in exploration_models.values()
if _should_index(exp)
], SEARCH_INDEX_EXPLORATIONS)
def patch_exploration_search_document(exp_id, update):
"""Patches an exploration's current search document, with the values
from the 'update' dictionary."""
doc = search_services.get_document_from_index(
exp_id, SEARCH_INDEX_EXPLORATIONS)
doc.update(update)
search_services.add_documents_to_index([doc], SEARCH_INDEX_EXPLORATIONS)
def update_exploration_status_in_search(exp_id):
rights = rights_manager.get_exploration_rights(exp_id)
if rights.status == rights_manager.EXPLORATION_STATUS_PRIVATE:
delete_documents_from_search_index([exp_id])
else:
patch_exploration_search_document(
rights.id, _exp_rights_to_search_dict(rights))
def delete_documents_from_search_index(exploration_ids):
search_services.delete_documents_from_index(
exploration_ids, SEARCH_INDEX_EXPLORATIONS)
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_get_search_rank to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of exploration ids that match the query.
- a cursor if there are more matching explorations to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True)
| {
"content_hash": "2d6418dc5b052f60fed7f75d1a10a39a",
"timestamp": "",
"source": "github",
"line_count": 1284,
"max_line_length": 80,
"avg_line_length": 41.06464174454829,
"alnum_prop": 0.6691069091736681,
"repo_name": "Dev4X/oppia",
"id": "327a158dd1fd56f4af044a4ff1ace868e640f4f6",
"size": "53350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/domain/exp_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "46680"
},
{
"name": "HTML",
"bytes": "290469"
},
{
"name": "JavaScript",
"bytes": "1362092"
},
{
"name": "Python",
"bytes": "1621039"
},
{
"name": "Shell",
"bytes": "26864"
}
],
"symlink_target": ""
} |
"""
Utility functions for manipulation with Python dictionaries.
"""
def group_keys_by_values(d):
"""
Take a dict (A -> B( and return another one (B -> [A]). It groups keys
from the original dict by their values.
.. testsetup::
from proso.dict import group_keys_by_values
from pprint import pprint
.. doctest::
>>> pprint(group_keys_by_values({1: True, 2: False, 3: True, 4: True}))
{False: [2], True: [1, 3, 4]}
Args:
d (dict): original dictionary which will be transformed.
Returns:
dict: new keys are taken from original values, each new key points to a
list where all values are original keys pointing to the same value
"""
result = {}
for k, v in d.items():
saved = result.get(v, [])
saved.append(k)
result[v] = saved
return result
def group_keys_by_value_lists(d):
"""
Take a dict (A -> [B]( and return another one (B -> [A]). It groups keys
from the original dict by their values in lists.
.. testsetup::
from proso.dict import group_keys_by_value_lists
from pprint import pprint
.. doctest::
>>> pprint(group_keys_by_value_lists({1: [True], 2: [False], 3: [True], 4: [True, False]}))
{False: [2, 4], True: [1, 3, 4]}
Args:
d (dict): original dictionary which will be transformed.
Returns:
dict: new keys are taken from original values, each new key points to a
list where all values are original keys pointing to the same value
"""
result = {}
for k, values in d.items():
for v in values:
saved = result.get(v, [])
saved.append(k)
result[v] = saved
return result
| {
"content_hash": "100f86844260e7e229842b9954c10eac",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 99,
"avg_line_length": 28.403225806451612,
"alnum_prop": 0.5820556501987507,
"repo_name": "adaptive-learning/proso-apps",
"id": "db34397ee4c98153c5003ae628e87b5041667d74",
"size": "1761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proso/dict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4739"
},
{
"name": "HTML",
"bytes": "35781"
},
{
"name": "JavaScript",
"bytes": "865"
},
{
"name": "Makefile",
"bytes": "4125"
},
{
"name": "Python",
"bytes": "645104"
}
],
"symlink_target": ""
} |
def test_modflow():
import flopy
mf = flopy.modflow.Modflow()
assert isinstance(mf, flopy.modflow.Modflow)
assert not mf.has_package("DIS") # not yet
dis = flopy.modflow.ModflowDis(mf)
assert mf.has_package("DIS")
assert mf.has_package("dis") # case-insensitive
assert not mf.has_package("DISU") # not here
assert isinstance(dis, flopy.modflow.ModflowDis)
bas = flopy.modflow.ModflowBas(mf)
assert isinstance(bas, flopy.modflow.ModflowBas)
lpf = flopy.modflow.ModflowLpf(mf)
assert isinstance(lpf, flopy.modflow.ModflowLpf)
wel = flopy.modflow.ModflowWel(mf)
assert isinstance(wel, flopy.modflow.ModflowWel)
oc = flopy.modflow.ModflowOc(mf)
assert isinstance(oc, flopy.modflow.ModflowOc)
pcg = flopy.modflow.ModflowPcg(mf)
assert isinstance(pcg, flopy.modflow.ModflowPcg)
return
if __name__ == "__main__":
test_modflow()
| {
"content_hash": "257bd59c658c123d2279c5bc040f2cd9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 52,
"avg_line_length": 35.03846153846154,
"alnum_prop": 0.6948408342480791,
"repo_name": "jentjr/flopy",
"id": "a508e748bcebbd7ec54f59da0d293617864128e5",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autotest/t002_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "832"
},
{
"name": "CSS",
"bytes": "321"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6353118"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
} |
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
*New in Pygments 0.7.*
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``). *New in Pygments 1.2.*
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``). *New in Pygments 1.2.*
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.iteritems():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(ur'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments:
outfile.write(ur',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
else:
value = escape_tex(value, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
| {
"content_hash": "4f811b07742b55f4bfa62514da058fea",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 80,
"avg_line_length": 36.93103448275862,
"alnum_prop": 0.5437046613517201,
"repo_name": "blighli/SublimeHighlight",
"id": "15c13d81b71b46def26bf1f9d6666ce2e418a94f",
"size": "13947",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pygments/formatters/latex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6160"
},
{
"name": "Python",
"bytes": "2314350"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import optparse
import re
import socket
from swift.common.utils import expand_ipv6, is_valid_ip, is_valid_ipv4, \
is_valid_ipv6
def tiers_for_dev(dev):
"""
Returns a tuple of tiers for a given device in ascending order by
length.
:returns: tuple of tiers
"""
t1 = dev['region']
t2 = dev['zone']
t3 = dev['ip']
t4 = dev['id']
return ((t1,),
(t1, t2),
(t1, t2, t3),
(t1, t2, t3, t4))
def build_tier_tree(devices):
"""
Construct the tier tree from the zone layout.
The tier tree is a dictionary that maps tiers to their child tiers.
A synthetic root node of () is generated so that there's one tree,
not a forest.
Example:
region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0
| | |
| | +---- device id 1
| | |
| | +---- device id 2
| |
| +---- 192.168.101.2 -+---- device id 3
| |
| +---- device id 4
| |
| +---- device id 5
|
+---- zone 2 -+---- 192.168.102.1 -+---- device id 6
| |
| +---- device id 7
| |
| +---- device id 8
|
+---- 192.168.102.2 -+---- device id 9
|
+---- device id 10
region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12
| |
| +---- device id 13
| |
| +---- device id 14
|
+---- 192.168.201.2 -+---- device id 15
|
+---- device id 16
|
+---- device id 17
The tier tree would look like:
{
(): [(1,), (2,)],
(1,): [(1, 1), (1, 2)],
(2,): [(2, 1)],
(1, 1): [(1, 1, 192.168.101.1),
(1, 1, 192.168.101.2)],
(1, 2): [(1, 2, 192.168.102.1),
(1, 2, 192.168.102.2)],
(2, 1): [(2, 1, 192.168.201.1),
(2, 1, 192.168.201.2)],
(1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0),
(1, 1, 192.168.101.1, 1),
(1, 1, 192.168.101.1, 2)],
(1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3),
(1, 1, 192.168.101.2, 4),
(1, 1, 192.168.101.2, 5)],
(1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6),
(1, 2, 192.168.102.1, 7),
(1, 2, 192.168.102.1, 8)],
(1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9),
(1, 2, 192.168.102.2, 10)],
(2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12),
(2, 1, 192.168.201.1, 13),
(2, 1, 192.168.201.1, 14)],
(2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15),
(2, 1, 192.168.201.2, 16),
(2, 1, 192.168.201.2, 17)],
}
:devices: device dicts from which to generate the tree
:returns: tier tree
"""
tier2children = defaultdict(set)
for dev in devices:
for tier in tiers_for_dev(dev):
if len(tier) > 1:
tier2children[tier[0:-1]].add(tier)
else:
tier2children[()].add(tier)
return tier2children
def validate_and_normalize_ip(ip):
"""
Return normalized ip if the ip is a valid ip.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
"""
# first convert to lower case
new_ip = ip.lower()
if is_valid_ipv4(new_ip):
return new_ip
elif is_valid_ipv6(new_ip):
return expand_ipv6(new_ip)
else:
raise ValueError('Invalid ip %s' % ip)
def validate_and_normalize_address(address):
"""
Return normalized address if the address is a valid ip or hostname.
Otherwise raise ValueError Exception. The hostname is
normalized to all lower case. IPv6-addresses are converted to
lowercase and fully expanded.
RFC1123 2.1 Host Names and Nubmers
DISCUSSION
This last requirement is not intended to specify the complete
syntactic form for entering a dotted-decimal host number;
that is considered to be a user-interface issue. For
example, a dotted-decimal number must be enclosed within
"[ ]" brackets for SMTP mail (see Section 5.2.17). This
notation could be made universal within a host system,
simplifying the syntactic checking for a dotted-decimal
number.
If a dotted-decimal number can be entered without such
identifying delimiters, then a full syntactic check must be
made, because a segment of a host domain name is now allowed
to begin with a digit and could legally be entirely numeric
(see Section 6.1.2.4). However, a valid host name can never
have the dotted-decimal form #.#.#.#, since at least the
highest-level component label will be alphabetic.
"""
new_address = address.lstrip('[').rstrip(']')
if address.startswith('[') and address.endswith(']'):
return validate_and_normalize_ip(new_address)
new_address = new_address.lower()
if is_valid_ipv4(new_address):
return new_address
elif is_valid_ipv6(new_address):
return expand_ipv6(new_address)
elif is_valid_hostname(new_address):
return new_address
else:
raise ValueError('Invalid address %s' % address)
def is_valid_hostname(hostname):
"""
Return True if the provided hostname is a valid hostname
"""
if len(hostname) < 1 or len(hostname) > 255:
return False
if hostname.endswith('.'):
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def is_local_device(my_ips, my_port, dev_ip, dev_port):
"""
Return True if the provided dev_ip and dev_port are among the IP
addresses specified in my_ips and my_port respectively.
To support accurate locality determination in the server-per-port
deployment, when my_port is None, only IP addresses are used for
determining locality (dev_port is ignored).
If dev_ip is a hostname then it is first translated to an IP
address before checking it against my_ips.
"""
candidate_ips = []
if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip):
try:
# get the ip for this host; use getaddrinfo so that
# it works for both ipv4 and ipv6 addresses
addrinfo = socket.getaddrinfo(dev_ip, dev_port)
for addr in addrinfo:
family = addr[0]
dev_ip = addr[4][0] # get the ip-address
if family == socket.AF_INET6:
dev_ip = expand_ipv6(dev_ip)
candidate_ips.append(dev_ip)
except socket.gaierror:
return False
else:
if is_valid_ipv6(dev_ip):
dev_ip = expand_ipv6(dev_ip)
candidate_ips = [dev_ip]
for dev_ip in candidate_ips:
if dev_ip in my_ips and (my_port is None or dev_port == my_port):
return True
return False
def parse_search_value(search_value):
"""The <search-value> can be of the form::
d<device_id>r<region>z<zone>-<ip>:<port>R<r_ip>:<r_port>/
<device_name>_<meta>
Where <r_ip> and <r_port> are replication ip and port.
Any part is optional, but you must include at least one part.
Examples::
d74 Matches the device id 74
r4 Matches devices in region 4
z1 Matches devices in zone 1
z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4
1.2.3.4 Matches devices in any zone with the ip 1.2.3.4
z1:5678 Matches devices in zone 1 using port 5678
:5678 Matches devices that use port 5678
R5.6.7.8 Matches devices that use replication ip 5.6.7.8
R:5678 Matches devices that use replication port 5678
1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip
5.6.7.8
/sdb1 Matches devices with the device name sdb1
_shiny Matches devices with shiny in the meta data
_"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data
[::1] Matches devices in any zone with the ip ::1
z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678
Most specific example::
d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8"
Nerd explanation:
All items require their single character prefix except the ip, in which
case the - is optional unless the device id or zone is also included.
"""
orig_search_value = search_value
match = {}
if search_value.startswith('d'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['id'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('r'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['region'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('z'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['zone'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('-'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while i < len(search_value) and search_value[i] in '0123456789.':
i += 1
match['ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['ip'] = validate_and_normalize_ip(match['ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['port'] = int(search_value[1:i])
search_value = search_value[i:]
# replication parameters
if search_value.startswith('R'):
search_value = search_value[1:]
if search_value and search_value[0].isdigit():
i = 1
while (i < len(search_value) and
search_value[i] in '0123456789.'):
i += 1
match['replication_ip'] = search_value[:i]
search_value = search_value[i:]
elif search_value and search_value.startswith('['):
i = 1
while i < len(search_value) and search_value[i] != ']':
i += 1
i += 1
match['replication_ip'] = search_value[:i].lstrip('[').rstrip(']')
search_value = search_value[i:]
if 'replication_ip' in match:
# ipv6 addresses are converted to all lowercase
# and use the fully expanded representation
match['replication_ip'] = \
validate_and_normalize_ip(match['replication_ip'])
if search_value.startswith(':'):
i = 1
while i < len(search_value) and search_value[i].isdigit():
i += 1
match['replication_port'] = int(search_value[1:i])
search_value = search_value[i:]
if search_value.startswith('/'):
i = 1
while i < len(search_value) and search_value[i] != '_':
i += 1
match['device'] = search_value[1:i]
search_value = search_value[i:]
if search_value.startswith('_'):
match['meta'] = search_value[1:]
search_value = ''
if search_value:
raise ValueError('Invalid <search-value>: %s' %
repr(orig_search_value))
return match
def parse_search_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for searching.
:param opts: optparse style options
:returns: a dictionary with search values to filter devices,
supported parameters are id, region, zone, ip, port,
replication_ip, replication_port, device, weight, meta
"""
search_values = {}
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
value = getattr(opts, key, None)
if value:
if key == 'ip' or key == 'replication_ip':
value = validate_and_normalize_address(value)
search_values[key] = value
return search_values
def parse_change_values_from_opts(opts):
"""
Convert optparse style options into a dictionary for changing.
:param opts: optparse style options
:returns: a dictonary with change values to filter devices,
supported parameters are ip, port, replication_ip,
replication_port
"""
change_values = {}
for key in ('change_ip', 'change_port', 'change_replication_ip',
'change_replication_port', 'change_device', 'change_meta'):
value = getattr(opts, key, None)
if value:
if key == 'change_ip' or key == 'change_replication_ip':
value = validate_and_normalize_address(value)
change_values[key.replace('change_', '')] = value
return change_values
def parse_add_value(add_value):
"""
Convert an add value, like 'r1z2-10.1.2.3:7878/sdf', to a dictionary.
If the string does not start with 'r<N>', then the value of 'region' in
the returned dictionary will be None. Callers should check for this and
set a reasonable default. This is done so callers can emit errors or
warnings if desired.
Similarly, 'replication_ip' and 'replication_port' will be None if not
specified.
:returns: dictionary with keys 'region', 'zone', 'ip', 'port', 'device',
'replication_ip', 'replication_port', 'meta'
:raises: ValueError if add_value is malformed
"""
region = None
rest = add_value
if add_value.startswith('r'):
i = 1
while i < len(add_value) and add_value[i].isdigit():
i += 1
region = int(add_value[1:i])
rest = add_value[i:]
if not rest.startswith('z'):
raise ValueError('Invalid add value: %s' % add_value)
i = 1
while i < len(rest) and rest[i].isdigit():
i += 1
zone = int(rest[1:i])
rest = rest[i:]
if not rest.startswith('-'):
raise ValueError('Invalid add value: %s' % add_value)
ip, port, rest = parse_address(rest[1:])
replication_ip = replication_port = None
if rest.startswith('R'):
replication_ip, replication_port, rest = \
parse_address(rest[1:])
if not rest.startswith('/'):
raise ValueError(
'Invalid add value: %s' % add_value)
i = 1
while i < len(rest) and rest[i] != '_':
i += 1
device_name = rest[1:i]
if not validate_device_name(device_name):
raise ValueError('Invalid device name')
rest = rest[i:]
meta = ''
if rest.startswith('_'):
meta = rest[1:]
return {'region': region, 'zone': zone, 'ip': ip, 'port': port,
'device': device_name, 'replication_ip': replication_ip,
'replication_port': replication_port, 'meta': meta}
def parse_address(rest):
if rest.startswith('['):
# remove first [] for ip
rest = rest.replace('[', '', 1).replace(']', '', 1)
pos = 0
while (pos < len(rest) and
not (rest[pos] == 'R' or rest[pos] == '/')):
pos += 1
address = rest[:pos]
rest = rest[pos:]
port_start = address.rfind(':')
if port_start == -1:
raise ValueError('Invalid port in add value')
ip = address[:port_start]
try:
port = int(address[(port_start + 1):])
except (TypeError, ValueError):
raise ValueError(
'Invalid port %s in add value' % address[port_start:])
# if this is an ipv6 address then we want to convert it
# to all lowercase and use its fully expanded representation
# to make searches easier
ip = validate_and_normalize_ip(ip)
return (ip, port, rest)
def validate_args(argvish):
"""
Build OptionParse and validate it whether the format is new command-line
format or not.
"""
opts, args = parse_args(argvish)
# id can be 0 (swift starts generating id from 0),
# also zone, region and weight can be set to zero.
new_cmd_format = opts.id is not None or opts.region is not None or \
opts.zone is not None or opts.ip or opts.port or \
opts.replication_ip or opts.replication_port or \
opts.device or opts.weight is not None or opts.meta
return (new_cmd_format, opts, args)
def parse_args(argvish):
"""
Build OptionParser and evaluate command line arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-u', '--id', type="int",
help="Device ID")
parser.add_option('-r', '--region', type="int",
help="Region")
parser.add_option('-z', '--zone', type="int",
help="Zone")
parser.add_option('-i', '--ip', type="string",
help="IP address")
parser.add_option('-p', '--port', type="int",
help="Port number")
parser.add_option('-j', '--replication-ip', type="string",
help="Replication IP address")
parser.add_option('-q', '--replication-port', type="int",
help="Replication port number")
parser.add_option('-d', '--device', type="string",
help="Device name (e.g. md0, sdb1)")
parser.add_option('-w', '--weight', type="float",
help="Device weight")
parser.add_option('-m', '--meta', type="string", default="",
help="Extra device info (just a string)")
parser.add_option('-I', '--change-ip', type="string",
help="IP address for change")
parser.add_option('-P', '--change-port', type="int",
help="Port number for change")
parser.add_option('-J', '--change-replication-ip', type="string",
help="Replication IP address for change")
parser.add_option('-Q', '--change-replication-port', type="int",
help="Replication port number for change")
parser.add_option('-D', '--change-device', type="string",
help="Device name (e.g. md0, sdb1) for change")
parser.add_option('-M', '--change-meta', type="string", default="",
help="Extra device info (just a string) for change")
parser.add_option('-y', '--yes', default=False, action="store_true",
help="Assume a yes response to all questions")
return parser.parse_args(argvish)
def parse_builder_ring_filename_args(argvish):
first_arg = argvish[1]
if first_arg.endswith('.ring.gz'):
ring_file = first_arg
builder_file = first_arg[:-len('.ring.gz')] + '.builder'
else:
builder_file = first_arg
if not builder_file.endswith('.builder'):
ring_file = first_arg
else:
ring_file = builder_file[:-len('.builder')]
ring_file += '.ring.gz'
return builder_file, ring_file
def build_dev_from_opts(opts):
"""
Convert optparse stype options into a device dictionary.
"""
for attribute, shortopt, longopt in (['region', '-r', '--region'],
['zone', '-z', '--zone'],
['ip', '-i', '--ip'],
['port', '-p', '--port'],
['device', '-d', '--device'],
['weight', '-w', '--weight']):
if getattr(opts, attribute, None) is None:
raise ValueError('Required argument %s/%s not specified.' %
(shortopt, longopt))
ip = validate_and_normalize_address(opts.ip)
replication_ip = validate_and_normalize_address(
(opts.replication_ip or opts.ip))
replication_port = opts.replication_port or opts.port
if not validate_device_name(opts.device):
raise ValueError('Invalid device name')
return {'region': opts.region, 'zone': opts.zone, 'ip': ip,
'port': opts.port, 'device': opts.device, 'meta': opts.meta,
'replication_ip': replication_ip,
'replication_port': replication_port, 'weight': opts.weight}
def dispersion_report(builder, search_filter=None, verbose=False):
if not builder._dispersion_graph:
builder._build_dispersion_graph()
max_allowed_replicas = builder._build_max_replicas_by_tier()
worst_tier = None
max_dispersion = 0.0
sorted_graph = []
for tier, replica_counts in sorted(builder._dispersion_graph.items()):
tier_name = get_tier_name(tier, builder)
if search_filter and not re.match(search_filter, tier_name):
continue
max_replicas = int(max_allowed_replicas[tier])
at_risk_parts = sum(replica_counts[max_replicas + 1:])
placed_parts = sum(replica_counts[1:])
tier_dispersion = 100.0 * at_risk_parts / placed_parts
if tier_dispersion > max_dispersion:
max_dispersion = tier_dispersion
worst_tier = tier_name
max_dispersion = max(max_dispersion, tier_dispersion)
if not verbose:
continue
tier_report = {
'max_replicas': max_replicas,
'placed_parts': placed_parts,
'dispersion': tier_dispersion,
'replicas': replica_counts,
}
sorted_graph.append((tier_name, tier_report))
return {
'max_dispersion': max_dispersion,
'worst_tier': worst_tier,
'graph': sorted_graph,
}
def get_tier_name(tier, builder):
if len(tier) == 1:
return "r%s" % (tier[0], )
if len(tier) == 2:
return "r%sz%s" % (tier[0], tier[1])
if len(tier) == 3:
return "r%sz%s-%s" % (tier[0], tier[1], tier[2])
if len(tier) == 4:
device = builder.devs[tier[3]] or {}
return "r%sz%s-%s/%s" % (tier[0], tier[1], tier[2],
device.get('device', 'IDd%s' % tier[3]))
def validate_device_name(device_name):
return not (
device_name.startswith(' ') or
device_name.endswith(' ') or
len(device_name) == 0)
| {
"content_hash": "ac789b6bd96e3d750a057f7f42cf8aed",
"timestamp": "",
"source": "github",
"line_count": 649,
"max_line_length": 79,
"avg_line_length": 37.21109399075501,
"alnum_prop": 0.5290269151138717,
"repo_name": "larsbutler/swift",
"id": "a59fb3115ca85d1294a1561d6f15080795f0cddd",
"size": "24744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swift/common/ring/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7772621"
},
{
"name": "Shell",
"bytes": "1975"
}
],
"symlink_target": ""
} |
import base64
import random
from pathlib import Path
from ... import options as opts
from ... import types
from ...charts.chart import Chart
from ...commons.utils import JsCode
from ...exceptions import WordCloudMaskImageException
from ...globals import ChartType
SHAPES = ("cardioid", "diamond", "triangle-forward", "triangle", "pentagon", "star")
def gen_color():
"""
generate random color for WordCloud
"""
return "rgb(%s,%s,%s)" % (
random.randint(0, 160),
random.randint(0, 160),
random.randint(0, 160),
)
class WordCloud(Chart):
"""
<<< WordCloud >>>
Word cloud is to visually highlight the keywords that
appear frequently in the text.
"""
def __init__(self, init_opts: types.Init = opts.InitOpts()):
super().__init__(init_opts=init_opts)
self.js_dependencies.add("echarts-wordcloud")
self._mask_image_suffix: types.Sequence = ["jpg", "jpeg", "png", "ico"]
def _create_mask_image_variable(self, data: str) -> JsCode:
image_str = self._encode_image_to_base64(image_or_path=data)
if image_str is None:
raise WordCloudMaskImageException(data=data)
current_chart_id = self.chart_id
self.add_js_funcs(
f"""
var maskImage_{current_chart_id} = new Image();
maskImage_{current_chart_id}.src = '{image_str}';
"""
)
return JsCode(f"maskImage_{current_chart_id}")
def _encode_image_to_base64(self, image_or_path: str) -> types.Optional[str]:
try:
# 尝试判断是否为图片路径(base64 长度很长会导致 Pathlib 会异常)
is_image_file = Path(image_or_path).is_file()
is_image_file_exist = Path(image_or_path).exists()
if is_image_file and is_image_file_exist:
ext = Path(image_or_path).suffix[1:]
if ext in self._mask_image_suffix:
with open(Path(image_or_path), "rb") as f:
data = base64.b64encode(f.read()).decode()
image_str = f"data:image/{ext};base64,{data}"
return image_str
except OSError:
return image_or_path
def add(
self,
series_name: str,
data_pair: types.Sequence,
*,
shape: str = "circle",
mask_image: types.Optional[str] = None,
word_gap: types.Numeric = 20,
word_size_range: types.Optional[types.Sequence] = None,
rotate_step: types.Numeric = 45,
pos_left: types.Optional[str] = None,
pos_top: types.Optional[str] = None,
pos_right: types.Optional[str] = None,
pos_bottom: types.Optional[str] = None,
width: types.Optional[str] = None,
height: types.Optional[str] = None,
is_draw_out_of_bound: bool = False,
tooltip_opts: types.Tooltip = None,
itemstyle_opts: types.ItemStyle = None,
textstyle_opts: types.TextStyle = None,
emphasis_shadow_blur: types.Optional[types.Numeric] = None,
emphasis_shadow_color: types.Optional[str] = None,
):
data = []
for n, v in data_pair:
data.append(
{"name": n, "value": v, "textStyle": {"normal": {"color": gen_color()}}}
)
word_size_range = word_size_range or (12, 60)
_rmin, _rmax = -90, 90
# 确保设置的形状有效,单词的旋转角度应该设置在 [-90, 90]
if shape in SHAPES:
_rmin = _rmax = 0
else:
shape = "circle"
if mask_image is not None:
shape = None
mask_image = self._create_mask_image_variable(data=mask_image)
self.options.get("series").append(
{
"type": ChartType.WORDCLOUD,
"name": series_name,
"shape": shape,
"maskImage": mask_image,
"rotationRange": [_rmin, _rmax],
"rotationStep": rotate_step,
"girdSize": word_gap,
"sizeRange": word_size_range,
"data": data,
"tooltip": tooltip_opts,
"itemStyle": itemstyle_opts,
"left": pos_left,
"right": pos_right,
"top": pos_top,
"bottom": pos_bottom,
"width": width,
"height": height,
"drawOutOfBound": is_draw_out_of_bound,
"textStyle": {
"normal": textstyle_opts,
"emphasis": {
"shadowBlur": emphasis_shadow_blur,
"shadowColor": emphasis_shadow_color,
},
},
}
)
return self
| {
"content_hash": "919db075f857287c81ba5179fa7bd4ff",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 88,
"avg_line_length": 35.26086956521739,
"alnum_prop": 0.512741471434443,
"repo_name": "chenjiandongx/pyecharts",
"id": "09842779642eba16a41ee56f104cd1e32a55ed7f",
"size": "4952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyecharts/charts/basic_charts/wordcloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1866"
},
{
"name": "Makefile",
"bytes": "33"
},
{
"name": "Python",
"bytes": "397526"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import traceback
import errno
import filecmp
import os
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
from color import Coloring
from git_command import GitCommand, git_require
from git_config import GitConfig, IsId, GetSchemeFromUrl, ID_RE
from error import GitError, HookError, UploadError
from error import ManifestInvalidRevisionError
from error import NoManifestException
from trace import IsTrace, Trace
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from pyversion import is_python3
if not is_python3():
# pylint:disable=W0622
input = raw_input
# pylint:enable=W0622
def _lwrite(path, content):
lock = '%s.lock' % path
fd = open(lock, 'wb')
try:
fd.write(content)
finally:
fd.close()
try:
os.rename(lock, path)
except OSError:
os.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
_project_hook_list = None
def _ProjectHooks():
"""List the hooks present in the 'hooks' directory.
These hooks are project hooks and are copied to the '.git/hooks' directory
of all subprojects.
This function caches the list of hooks (based on the contents of the
'repo/hooks' directory) on the first call.
Returns:
A list of absolute paths to all of the files in the hooks directory.
"""
global _project_hook_list
if _project_hook_list is None:
d = os.path.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d , 'hooks')
_project_hook_list = [os.path.join(d, x) for x in os.listdir(d)]
return _project_hook_list
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
R_HEADS + self.name,
'--')
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(
not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log(
'--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
def UploadForReview(self, people, auto_topic=False, draft=False, dest_branch=None):
self.project.UploadForReview(self.name,
people,
auto_topic=auto_topic,
draft=draft,
dest_branch=dest_branch)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr = 'bold')
self.branch = self.printer('header', attr = 'bold')
self.nobranch = self.printer('nobranch', fg = 'red')
self.important = self.printer('important', fg = 'red')
self.added = self.printer('added', fg = 'green')
self.changed = self.printer('changed', fg = 'red')
self.untracked = self.printer('untracked', fg = 'red')
class DiffColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'diff')
self.project = self.printer('header', attr = 'bold')
class _Annotation:
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
class _CopyFile:
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Copy(self):
src = self.abs_src
dest = self.abs_dest
# copy file if it does not exist or is out of date
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
os.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
# make the file read-only
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class _LinkFile:
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Link(self):
src = self.abs_src
dest = self.abs_dest
# link file if it does not exist or is out of date
if not os.path.islink(dest) or os.readlink(dest) != src:
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
os.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
os.symlink(src, dest)
except IOError:
_error('Cannot link file %s to %s', src, dest)
class RemoteSpec(object):
def __init__(self,
name,
url = None,
review = None):
self.name = name
self.url = url
self.review = review
class RepoHook(object):
"""A RepoHook contains information about a script to run as a hook.
Hooks are used to run a python script before running an upload (for instance,
to run presubmit checks). Eventually, we may have hooks for other actions.
This shouldn't be confused with files in the 'repo/hooks' directory. Those
files are copied into each '.git/hooks' folder for each project. Repo-level
hooks are associated instead with repo actions.
Hooks are always python. When a hook is run, we will load the hook into the
interpreter and execute its main() function.
"""
def __init__(self,
hook_type,
hooks_project,
topdir,
abort_if_user_denies=False):
"""RepoHook constructor.
Params:
hook_type: A string representing the type of hook. This is also used
to figure out the name of the file containing the hook. For
example: 'pre-upload'.
hooks_project: The project containing the repo hooks. If you have a
manifest, this is manifest.repo_hooks_project. OK if this is None,
which will make the hook a no-op.
topdir: Repo's top directory (the one containing the .repo directory).
Scripts will run with CWD as this directory. If you have a manifest,
this is manifest.topdir
abort_if_user_denies: If True, we'll throw a HookError() if the user
doesn't allow us to run the hook.
"""
self._hook_type = hook_type
self._hooks_project = hooks_project
self._topdir = topdir
self._abort_if_user_denies = abort_if_user_denies
# Store the full path to the script for convenience.
if self._hooks_project:
self._script_fullpath = os.path.join(self._hooks_project.worktree,
self._hook_type + '.py')
else:
self._script_fullpath = None
def _GetHash(self):
"""Return a hash of the contents of the hooks directory.
We'll just use git to do this. This hash has the property that if anything
changes in the directory we will return a different has.
SECURITY CONSIDERATION:
This hash only represents the contents of files in the hook directory, not
any other files imported or called by hooks. Changes to imported files
can change the script behavior without affecting the hash.
Returns:
A string representing the hash. This will always be ASCII so that it can
be printed to the user easily.
"""
assert self._hooks_project, "Must have hooks to calculate their hash."
# We will use the work_git object rather than just calling GetRevisionId().
# That gives us a hash of the latest checked in version of the files that
# the user will actually be executing. Specifically, GetRevisionId()
# doesn't appear to change even if a user checks out a different version
# of the hooks repo (via git checkout) nor if a user commits their own revs.
#
# NOTE: Local (non-committed) changes will not be factored into this hash.
# I think this is OK, since we're really only worried about warning the user
# about upstream changes.
return self._hooks_project.work_git.rev_parse('HEAD')
def _GetMustVerb(self):
"""Return 'must' if the hook is required; 'should' if not."""
if self._abort_if_user_denies:
return 'must'
else:
return 'should'
def _CheckForHookApproval(self):
"""Check to see whether this hook has been approved.
We'll look at the hash of all of the hooks. If this matches the hash that
the user last approved, we're done. If it doesn't, we'll ask the user
about approval.
Note that we ask permission for each individual hook even though we use
the hash of all hooks when detecting changes. We'd like the user to be
able to approve / deny each hook individually. We only use the hash of all
hooks because there is no other easy way to detect changes to local imports.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
hooks_config = self._hooks_project.config
git_approval_key = 'repo.hooks.%s.approvedhash' % self._hook_type
# Get the last hash that the user approved for this hook; may be None.
old_hash = hooks_config.GetString(git_approval_key)
# Get the current hash so we can tell if scripts changed since approval.
new_hash = self._GetHash()
if old_hash is not None:
# User previously approved hook and asked not to be prompted again.
if new_hash == old_hash:
# Approval matched. We're done.
return True
else:
# Give the user a reason why we're prompting, since they last told
# us to "never ask again".
prompt = 'WARNING: Scripts have changed since %s was allowed.\n\n' % (
self._hook_type)
else:
prompt = ''
# Prompt the user if we're not on a tty; on a tty we'll assume "no".
if sys.stdout.isatty():
prompt += ('Repo %s run the script:\n'
' %s\n'
'\n'
'Do you want to allow this script to run '
'(yes/yes-never-ask-again/NO)? ') % (
self._GetMustVerb(), self._script_fullpath)
response = input(prompt).lower()
print()
# User is doing a one-time approval.
if response in ('y', 'yes'):
return True
elif response == 'yes-never-ask-again':
hooks_config.SetString(git_approval_key, new_hash)
return True
# For anything else, we'll assume no approval.
if self._abort_if_user_denies:
raise HookError('You must allow the %s hook or use --no-verify.' %
self._hook_type)
return False
def _ExecuteHook(self, **kwargs):
"""Actually execute the given hook.
This will run the hook's 'main' function in our python interpreter.
Args:
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
"""
# Keep sys.path and CWD stashed away so that we can always restore them
# upon function exit.
orig_path = os.getcwd()
orig_syspath = sys.path
try:
# Always run hooks with CWD as topdir.
os.chdir(self._topdir)
# Put the hook dir as the first item of sys.path so hooks can do
# relative imports. We want to replace the repo dir as [0] so
# hooks can't import repo files.
sys.path = [os.path.dirname(self._script_fullpath)] + sys.path[1:]
# Exec, storing global context in the context dict. We catch exceptions
# and convert to a HookError w/ just the failing traceback.
context = {}
try:
execfile(self._script_fullpath, context)
except Exception:
raise HookError('%s\nFailed to import %s hook; see traceback above.' % (
traceback.format_exc(), self._hook_type))
# Running the script should have defined a main() function.
if 'main' not in context:
raise HookError('Missing main() in: "%s"' % self._script_fullpath)
# Add 'hook_should_take_kwargs' to the arguments to be passed to main.
# We don't actually want hooks to define their main with this argument--
# it's there to remind them that their hook should always take **kwargs.
# For instance, a pre-upload hook should be defined like:
# def main(project_list, **kwargs):
#
# This allows us to later expand the API without breaking old hooks.
kwargs = kwargs.copy()
kwargs['hook_should_take_kwargs'] = True
# Call the main function in the hook. If the hook should cause the
# build to fail, it will raise an Exception. We'll catch that convert
# to a HookError w/ just the failing traceback.
try:
context['main'](**kwargs)
except Exception:
raise HookError('%s\nFailed to run main() for %s hook; see traceback '
'above.' % (
traceback.format_exc(), self._hook_type))
finally:
# Restore sys.path and CWD.
sys.path = orig_syspath
os.chdir(orig_path)
def Run(self, user_allows_all_hooks, **kwargs):
"""Run the hook.
If the hook doesn't exist (because there is no hooks project or because
this particular hook is not enabled), this is a no-op.
Args:
user_allows_all_hooks: If True, we will never prompt about running the
hook--we'll just assume it's OK to run it.
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
Raises:
HookError: If there was a problem finding the hook or the user declined
to run a required hook (from _CheckForHookApproval).
"""
# No-op if there is no hooks project or if hook is disabled.
if ((not self._hooks_project) or
(self._hook_type not in self._hooks_project.enabled_repo_hooks)):
return
# Bail with a nice error if we can't find the hook.
if not os.path.isfile(self._script_fullpath):
raise HookError('Couldn\'t find repo hook: "%s"' % self._script_fullpath)
# Make sure the user is OK with running the hook.
if (not user_allows_all_hooks) and (not self._CheckForHookApproval()):
return
# Run the hook with the same version of python we're using.
self._ExecuteHook(**kwargs)
class Project(object):
def __init__(self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase = True,
groups = None,
sync_c = False,
sync_s = False,
clone_depth = None,
upstream = None,
parent = None,
is_derived = False,
dest_branch = None):
"""Init a Project object.
Args:
manifest: The XmlManifest object.
name: The `name` attribute of manifest.xml's project element.
remote: RemoteSpec object specifying its remote's properties.
gitdir: Absolute path of git directory.
objdir: Absolute path of directory to store git objects.
worktree: Absolute path of git working tree.
relpath: Relative path of git working tree to repo's top directory.
revisionExpr: The `revision` attribute of manifest.xml's project element.
revisionId: git commit id for checking out.
rebase: The `rebase` attribute of manifest.xml's project element.
groups: The `groups` attribute of manifest.xml's project element.
sync_c: The `sync-c` attribute of manifest.xml's project element.
sync_s: The `sync-s` attribute of manifest.xml's project element.
upstream: The `upstream` attribute of manifest.xml's project element.
parent: The parent Project object.
is_derived: False if the project was explicitly defined in the manifest;
True if the project is a discovered submodule.
dest_branch: The branch to which to push changes for review by default.
"""
self.manifest = manifest
self.name = name
self.remote = remote
self.gitdir = gitdir.replace('\\', '/')
self.objdir = objdir.replace('\\', '/')
if worktree:
self.worktree = worktree.replace('\\', '/')
else:
self.worktree = None
self.relpath = relpath
self.revisionExpr = revisionExpr
if revisionId is None \
and revisionExpr \
and IsId(revisionExpr):
self.revisionId = revisionExpr
else:
self.revisionId = revisionId
self.rebase = rebase
self.groups = groups
self.sync_c = sync_c
self.sync_s = sync_s
self.clone_depth = clone_depth
self.upstream = upstream
self.parent = parent
self.is_derived = is_derived
self.subprojects = []
self.snapshots = {}
self.copyfiles = []
self.linkfiles = []
self.annotations = []
self.config = GitConfig.ForRepository(
gitdir = self.gitdir,
defaults = self.manifest.globalConfig)
if self.worktree:
self.work_git = self._GitGetByExec(self, bare=False, gitdir=gitdir)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True, gitdir=gitdir)
self.bare_ref = GitRefs(gitdir)
self.bare_objdir = self._GitGetByExec(self, bare=True, gitdir=objdir)
self.dest_branch = dest_branch
# This will be filled in if a project is later identified to be the
# project containing repo hooks.
self.enabled_repo_hooks = []
@property
def Derived(self):
return self.is_derived
@property
def Exists(self):
return os.path.isdir(self.gitdir)
@property
def CurrentBranch(self):
"""Obtain the name of the currently checked out branch.
The branch name omits the 'refs/heads/' prefix.
None is returned if the project is on a detached HEAD.
"""
b = self.work_git.GetHead()
if b.startswith(R_HEADS):
return b[len(R_HEADS):]
return None
def IsRebaseInProgress(self):
w = self.worktree
g = os.path.join(w, '.git')
return os.path.exists(os.path.join(g, 'rebase-apply')) \
or os.path.exists(os.path.join(g, 'rebase-merge')) \
or os.path.exists(os.path.join(w, '.dotest'))
def IsDirty(self, consider_untracked=True):
"""Is the working directory modified in some way?
"""
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD):
return True
if self.work_git.DiffZ('diff-files'):
return True
if consider_untracked and self.work_git.LsOthers():
return True
return False
_userident_name = None
_userident_email = None
@property
def UserName(self):
"""Obtain the user's personal name.
"""
if self._userident_name is None:
self._LoadUserIdentity()
return self._userident_name
@property
def UserEmail(self):
"""Obtain the user's email address. This is very likely
to be their Gerrit login.
"""
if self._userident_email is None:
self._LoadUserIdentity()
return self._userident_email
def _LoadUserIdentity(self):
u = self.bare_git.var('GIT_COMMITTER_IDENT')
m = re.compile("^(.*) <([^>]*)> ").match(u)
if m:
self._userident_name = m.group(1)
self._userident_email = m.group(2)
else:
self._userident_name = ''
self._userident_email = ''
def GetRemote(self, name):
"""Get the configuration for a single remote.
"""
return self.config.GetRemote(name)
def GetBranch(self, name):
"""Get the configuration for a single branch.
"""
return self.config.GetBranch(name)
def GetBranches(self):
"""Get all existing local branches.
"""
current = self.CurrentBranch
all_refs = self._allrefs
heads = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
b = self.GetBranch(name)
b.current = name == current
b.published = None
b.revision = ref_id
heads[name] = b
for name, ref_id in all_refs.items():
if name.startswith(R_PUB):
name = name[len(R_PUB):]
b = heads.get(name)
if b:
b.published = ref_id
return heads
def MatchesGroups(self, manifest_groups):
"""Returns true if the manifest groups specified at init should cause
this project to be synced.
Prefixing a manifest group with "-" inverts the meaning of a group.
All projects are implicitly labelled with "all".
labels are resolved in order. In the example case of
project_groups: "all,group1,group2"
manifest_groups: "-group1,group2"
the project will be matched.
The special manifest group "default" will match any project that
does not have the special project group "notdefault"
"""
expanded_manifest_groups = manifest_groups or ['default']
expanded_project_groups = ['all'] + (self.groups or [])
if not 'notdefault' in expanded_project_groups:
expanded_project_groups += ['default']
matched = False
for group in expanded_manifest_groups:
if group.startswith('-') and group[1:] in expanded_project_groups:
matched = False
elif group in expanded_project_groups:
matched = True
return matched
## Status Display ##
def HasChanges(self):
"""Returns true if there are uncommitted changes.
"""
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.IsRebaseInProgress():
return True
if self.work_git.DiffZ('diff-index', '--cached', HEAD):
return True
if self.work_git.DiffZ('diff-files'):
return True
if self.work_git.LsOthers():
return True
return False
def PrintWorkTreeStatus(self, output_redir=None):
"""Prints the status of the repository to stdout.
Args:
output: If specified, redirect the output to this object.
"""
if not os.path.isdir(self.worktree):
if output_redir == None:
output_redir = sys.stdout
print(file=output_redir)
print('project %s/' % self.relpath, file=output_redir)
print(' missing (run "repo sync")', file=output_redir)
return
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
rb = self.IsRebaseInProgress()
di = self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD)
df = self.work_git.DiffZ('diff-files')
do = self.work_git.LsOthers()
if not rb and not di and not df and not do and not self.CurrentBranch:
return 'CLEAN'
out = StatusColoring(self.config)
if not output_redir == None:
out.redirect(output_redir)
out.project('project %-40s', self.relpath + '/')
branch = self.CurrentBranch
if branch is None:
out.nobranch('(*** NO BRANCH ***)')
else:
out.branch('branch %s', branch)
out.nl()
if rb:
out.important('prior sync failed; rebase still in progress')
out.nl()
paths = list()
paths.extend(di.keys())
paths.extend(df.keys())
paths.extend(do)
for p in sorted(set(paths)):
try:
i = di[p]
except KeyError:
i = None
try:
f = df[p]
except KeyError:
f = None
if i:
i_status = i.status.upper()
else:
i_status = '-'
if f:
f_status = f.status.lower()
else:
f_status = '-'
if i and i.src_path:
line = ' %s%s\t%s => %s (%s%%)' % (i_status, f_status,
i.src_path, p, i.level)
else:
line = ' %s%s\t%s' % (i_status, f_status, p)
if i and not f:
out.added('%s', line)
elif (i and f) or (not i and f):
out.changed('%s', line)
elif not i and not f:
out.untracked('%s', line)
else:
out.write('%s', line)
out.nl()
return 'DIRTY'
def PrintWorkTreeDiff(self, absolute_paths=False):
"""Prints the status of the repository to stdout.
"""
out = DiffColoring(self.config)
cmd = ['diff']
if out.is_on:
cmd.append('--color')
cmd.append(HEAD)
if absolute_paths:
cmd.append('--src-prefix=a/%s/' % self.relpath)
cmd.append('--dst-prefix=b/%s/' % self.relpath)
cmd.append('--')
p = GitCommand(self,
cmd,
capture_stdout = True,
capture_stderr = True)
has_diff = False
for line in p.process.stdout:
if not has_diff:
out.nl()
out.project('project %s/' % self.relpath)
out.nl()
has_diff = True
print(line[:-1])
p.Wait()
## Publish / Upload ##
def WasPublished(self, branch, all_refs=None):
"""Was the branch published (uploaded) for code review?
If so, returns the SHA-1 hash of the last published
state for the branch.
"""
key = R_PUB + branch
if all_refs is None:
try:
return self.bare_git.rev_parse(key)
except GitError:
return None
else:
try:
return all_refs[key]
except KeyError:
return None
def CleanPublishedCache(self, all_refs=None):
"""Prunes any stale published refs.
"""
if all_refs is None:
all_refs = self._allrefs
heads = set()
canrm = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
heads.add(name)
elif name.startswith(R_PUB):
canrm[name] = ref_id
for name, ref_id in canrm.items():
n = name[len(R_PUB):]
if R_HEADS + n not in heads:
self.bare_git.DeleteRef(name, ref_id)
def GetUploadableBranches(self, selected_branch=None):
"""List any branches which can be uploaded for review.
"""
heads = {}
pubed = {}
for name, ref_id in self._allrefs.items():
if name.startswith(R_HEADS):
heads[name[len(R_HEADS):]] = ref_id
elif name.startswith(R_PUB):
pubed[name[len(R_PUB):]] = ref_id
ready = []
for branch, ref_id in heads.items():
if branch in pubed and pubed[branch] == ref_id:
continue
if selected_branch and branch != selected_branch:
continue
rb = self.GetUploadableBranch(branch)
if rb:
ready.append(rb)
return ready
def GetUploadableBranch(self, branch_name):
"""Get a single uploadable branch, or None.
"""
branch = self.GetBranch(branch_name)
base = branch.LocalMerge
if branch.LocalMerge:
rb = ReviewableBranch(self, branch, base)
if rb.commits:
return rb
return None
def UploadForReview(self, branch=None,
people=([],[]),
auto_topic=False,
draft=False,
dest_branch=None):
"""Uploads the named branch for code review.
"""
if branch is None:
branch = self.CurrentBranch
if branch is None:
raise GitError('not currently on a branch')
branch = self.GetBranch(branch)
if not branch.LocalMerge:
raise GitError('branch %s does not track a remote' % branch.name)
if not branch.remote.review:
raise GitError('remote %s has no review url' % branch.remote.name)
if dest_branch is None:
dest_branch = self.dest_branch
if dest_branch is None:
dest_branch = branch.merge
if not dest_branch.startswith(R_HEADS):
dest_branch = R_HEADS + dest_branch
if not branch.remote.projectname:
branch.remote.projectname = self.name
branch.remote.Save()
url = branch.remote.ReviewUrl(self.UserEmail)
if url is None:
raise UploadError('review not configured')
cmd = ['push']
if url.startswith('ssh://'):
rp = ['gerrit receive-pack']
for e in people[0]:
rp.append('--reviewer=%s' % sq(e))
for e in people[1]:
rp.append('--cc=%s' % sq(e))
cmd.append('--receive-pack=%s' % " ".join(rp))
cmd.append(url)
if dest_branch.startswith(R_HEADS):
dest_branch = dest_branch[len(R_HEADS):]
upload_type = 'for'
if draft:
upload_type = 'drafts'
ref_spec = '%s:refs/%s/%s' % (R_HEADS + branch.name, upload_type,
dest_branch)
if auto_topic:
ref_spec = ref_spec + '/' + branch.name
if not url.startswith('ssh://'):
rp = ['r=%s' % p for p in people[0]] + \
['cc=%s' % p for p in people[1]]
if rp:
ref_spec = ref_spec + '%' + ','.join(rp)
cmd.append(ref_spec)
if GitCommand(self, cmd, bare = True).Wait() != 0:
raise UploadError('Upload failed')
msg = "posted to %s for %s" % (branch.remote.review, dest_branch)
self.bare_git.UpdateRef(R_PUB + branch.name,
R_HEADS + branch.name,
message = msg)
## Sync ##
def _ExtractArchive(self, tarpath, path=None):
"""Extract the given tar on its current location
Args:
- tarpath: The path to the actual tar file
"""
try:
with tarfile.open(tarpath, 'r') as tar:
tar.extractall(path=path)
return True
except (IOError, tarfile.TarError) as e:
print("error: Cannot extract archive %s: "
"%s" % (tarpath, str(e)), file=sys.stderr)
return False
def Sync_NetworkHalf(self,
quiet=False,
is_new=None,
current_branch_only=False,
clone_bundle=True,
no_tags=False,
archive=False):
"""Perform only the network IO portion of the sync process.
Local working directory/branch state is not affected.
"""
if archive and not isinstance(self, MetaProject):
if self.remote.url.startswith(('http://', 'https://')):
print("error: %s: Cannot fetch archives from http/https "
"remotes." % self.name, file=sys.stderr)
return False
name = self.relpath.replace('\\', '/')
name = name.replace('/', '_')
tarpath = '%s.tar' % name
topdir = self.manifest.topdir
try:
self._FetchArchive(tarpath, cwd=topdir)
except GitError as e:
print('error: %s' % str(e), file=sys.stderr)
return False
# From now on, we only need absolute tarpath
tarpath = os.path.join(topdir, tarpath)
if not self._ExtractArchive(tarpath, path=topdir):
return False
try:
os.remove(tarpath)
except OSError as e:
print("warn: Cannot remove archive %s: "
"%s" % (tarpath, str(e)), file=sys.stderr)
self._CopyAndLinkFiles()
return True
if is_new is None:
is_new = not self.Exists
if is_new:
self._InitGitDir()
else:
self._UpdateHooks()
self._InitRemote()
if is_new:
alt = os.path.join(self.gitdir, 'objects/info/alternates')
try:
fd = open(alt, 'rb')
try:
alt_dir = fd.readline().rstrip()
finally:
fd.close()
except IOError:
alt_dir = None
else:
alt_dir = None
if clone_bundle \
and alt_dir is None \
and self._ApplyCloneBundle(initial=is_new, quiet=quiet):
is_new = False
if not current_branch_only:
if self.sync_c:
current_branch_only = True
elif not self.manifest._loaded:
# Manifest cannot check defaults until it syncs.
current_branch_only = False
elif self.manifest.default.sync_c:
current_branch_only = True
has_sha1 = ID_RE.match(self.revisionExpr) and self._CheckForSha1()
if (not has_sha1 #Need to fetch since we don't already have this revision
and not self._RemoteFetch(initial=is_new, quiet=quiet, alt_dir=alt_dir,
current_branch_only=current_branch_only,
no_tags=no_tags)):
return False
if self.worktree:
self._InitMRef()
else:
self._InitMirrorHead()
try:
os.remove(os.path.join(self.gitdir, 'FETCH_HEAD'))
except OSError:
pass
return True
def PostRepoUpgrade(self):
self._InitHooks()
def _CopyAndLinkFiles(self):
for copyfile in self.copyfiles:
copyfile._Copy()
for linkfile in self.linkfiles:
linkfile._Link()
def GetCommitRevisionId(self):
"""Get revisionId of a commit.
Use this method instead of GetRevisionId to get the id of the commit rather
than the id of the current git object (for example, a tag)
"""
if not self.revisionExpr.startswith(R_TAGS):
return self.GetRevisionId(self._allrefs)
try:
return self.bare_git.rev_list(self.revisionExpr, '-1')[0]
except GitError:
raise ManifestInvalidRevisionError(
'revision %s in %s not found' % (self.revisionExpr,
self.name))
def GetRevisionId(self, all_refs=None):
if self.revisionId:
return self.revisionId
rem = self.GetRemote(self.remote.name)
rev = rem.ToLocal(self.revisionExpr)
if all_refs is not None and rev in all_refs:
return all_refs[rev]
try:
return self.bare_git.rev_parse('--verify', '%s^0' % rev)
except GitError:
raise ManifestInvalidRevisionError(
'revision %s in %s not found' % (self.revisionExpr,
self.name))
def Sync_LocalHalf(self, syncbuf):
"""Perform only the local IO portion of the sync process.
Network access is not required.
"""
self._InitWorkTree()
all_refs = self.bare_ref.all
self.CleanPublishedCache(all_refs)
revid = self.GetRevisionId(all_refs)
def _doff():
self._FastForward(revid)
self._CopyAndLinkFiles()
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
branch = head[len(R_HEADS):]
try:
head = all_refs[head]
except KeyError:
head = None
else:
branch = None
if branch is None or syncbuf.detach_head:
# Currently on a detached HEAD. The user is assumed to
# not have any local modifications worth worrying about.
#
if self.IsRebaseInProgress():
syncbuf.fail(self, _PriorSyncFailedError())
return
if head == revid:
# No changes; don't do anything further.
# Except if the head needs to be detached
#
if not syncbuf.detach_head:
return
else:
lost = self._revlist(not_rev(revid), HEAD)
if lost:
syncbuf.info(self, "discarding %d commits", len(lost))
try:
self._Checkout(revid, quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
if head == revid:
# No changes; don't do anything further.
#
return
branch = self.GetBranch(branch)
if not branch.LocalMerge:
# The current branch has no tracking configuration.
# Jump off it to a detached HEAD.
#
syncbuf.info(self,
"leaving %s; does not track upstream",
branch.name)
try:
self._Checkout(revid, quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
upstream_gain = self._revlist(not_rev(HEAD), revid)
pub = self.WasPublished(branch.name, all_refs)
if pub:
not_merged = self._revlist(not_rev(revid), pub)
if not_merged:
if upstream_gain:
# The user has published this branch and some of those
# commits are not yet merged upstream. We do not want
# to rewrite the published commits so we punt.
#
syncbuf.fail(self,
"branch %s is published (but not merged) and is now %d commits behind"
% (branch.name, len(upstream_gain)))
return
elif pub == head:
# All published commits are merged, and thus we are a
# strict subset. We can fast-forward safely.
#
syncbuf.later1(self, _doff)
return
# Examine the local commits not in the remote. Find the
# last one attributed to this user, if any.
#
local_changes = self._revlist(not_rev(revid), HEAD, format='%H %ce')
last_mine = None
cnt_mine = 0
for commit in local_changes:
commit_id, committer_email = commit.decode('utf-8').split(' ', 1)
if committer_email == self.UserEmail:
last_mine = commit_id
cnt_mine += 1
if not upstream_gain and cnt_mine == len(local_changes):
return
if self.IsDirty(consider_untracked=False):
syncbuf.fail(self, _DirtyError())
return
# If the upstream switched on us, warn the user.
#
if branch.merge != self.revisionExpr:
if branch.merge and self.revisionExpr:
syncbuf.info(self,
'manifest switched %s...%s',
branch.merge,
self.revisionExpr)
elif branch.merge:
syncbuf.info(self,
'manifest no longer tracks %s',
branch.merge)
if cnt_mine < len(local_changes):
# Upstream rebased. Not everything in HEAD
# was created by this user.
#
syncbuf.info(self,
"discarding %d commits removed from upstream",
len(local_changes) - cnt_mine)
branch.remote = self.GetRemote(self.remote.name)
if not ID_RE.match(self.revisionExpr):
# in case of manifest sync the revisionExpr might be a SHA1
branch.merge = self.revisionExpr
branch.Save()
if cnt_mine > 0 and self.rebase:
def _dorebase():
self._Rebase(upstream = '%s^1' % last_mine, onto = revid)
self._CopyAndLinkFiles()
syncbuf.later2(self, _dorebase)
elif local_changes:
try:
self._ResetHard(revid)
self._CopyAndLinkFiles()
except GitError as e:
syncbuf.fail(self, e)
return
else:
syncbuf.later1(self, _doff)
def AddCopyFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src an absolute path
abssrc = os.path.join(self.worktree, src)
self.copyfiles.append(_CopyFile(src, dest, abssrc, absdest))
def AddLinkFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src an absolute path
abssrc = os.path.join(self.worktree, src)
self.linkfiles.append(_LinkFile(src, dest, abssrc, absdest))
def AddAnnotation(self, name, value, keep):
self.annotations.append(_Annotation(name, value, keep))
def DownloadPatchSet(self, change_id, patch_id):
"""Download a single patch set of a single change to FETCH_HEAD.
"""
remote = self.GetRemote(self.remote.name)
cmd = ['fetch', remote.name]
cmd.append('refs/changes/%2.2d/%d/%d' \
% (change_id % 100, change_id, patch_id))
if GitCommand(self, cmd, bare=True).Wait() != 0:
return None
return DownloadedChange(self,
self.GetRevisionId(),
change_id,
patch_id,
self.bare_git.rev_parse('FETCH_HEAD'))
## Branch Management ##
def StartBranch(self, name):
"""Create a new branch off the manifest's revision.
"""
head = self.work_git.GetHead()
if head == (R_HEADS + name):
return True
all_refs = self.bare_ref.all
if (R_HEADS + name) in all_refs:
return GitCommand(self,
['checkout', name, '--'],
capture_stdout = True,
capture_stderr = True).Wait() == 0
branch = self.GetBranch(name)
branch.remote = self.GetRemote(self.remote.name)
branch.merge = self.revisionExpr
revid = self.GetRevisionId(all_refs)
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid and head and revid == head:
ref = os.path.join(self.gitdir, R_HEADS + name)
try:
os.makedirs(os.path.dirname(ref))
except OSError:
pass
_lwrite(ref, '%s\n' % revid)
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
branch.Save()
return True
if GitCommand(self,
['checkout', '-b', branch.name, revid],
capture_stdout = True,
capture_stderr = True).Wait() == 0:
branch.Save()
return True
return False
def CheckoutBranch(self, name):
"""Checkout a local topic branch.
Args:
name: The name of the branch to checkout.
Returns:
True if the checkout succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
head = self.work_git.GetHead()
if head == rev:
# Already on the branch
#
return True
all_refs = self.bare_ref.all
try:
revid = all_refs[rev]
except KeyError:
# Branch does not exist in this project
#
return None
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if head == revid:
# Same revision; just update HEAD to point to the new
# target branch, but otherwise take no other action.
#
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
return True
return GitCommand(self,
['checkout', name, '--'],
capture_stdout = True,
capture_stderr = True).Wait() == 0
def AbandonBranch(self, name):
"""Destroy a local topic branch.
Args:
name: The name of the branch to abandon.
Returns:
True if the abandon succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
all_refs = self.bare_ref.all
if rev not in all_refs:
# Doesn't exist
return None
head = self.work_git.GetHead()
if head == rev:
# We can't destroy the branch while we are sitting
# on it. Switch to a detached HEAD.
#
head = all_refs[head]
revid = self.GetRevisionId(all_refs)
if head == revid:
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'%s\n' % revid)
else:
self._Checkout(revid, quiet=True)
return GitCommand(self,
['branch', '-D', name],
capture_stdout = True,
capture_stderr = True).Wait() == 0
def PruneHeads(self):
"""Prune any topic branches already merged into upstream.
"""
cb = self.CurrentBranch
kill = []
left = self._allrefs
for name in left.keys():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
if cb is None or name != cb:
kill.append(name)
rev = self.GetRevisionId(left)
if cb is not None \
and not self._revlist(HEAD + '...' + rev) \
and not self.IsDirty(consider_untracked = False):
self.work_git.DetachHead(HEAD)
kill.append(cb)
if kill:
old = self.bare_git.GetHead()
if old is None:
old = 'refs/heads/please_never_use_this_as_a_branch_name'
try:
self.bare_git.DetachHead(rev)
b = ['branch', '-d']
b.extend(kill)
b = GitCommand(self, b, bare=True,
capture_stdout=True,
capture_stderr=True)
b.Wait()
finally:
self.bare_git.SetHead(old)
left = self._allrefs
for branch in kill:
if (R_HEADS + branch) not in left:
self.CleanPublishedCache()
break
if cb and cb not in kill:
kill.append(cb)
kill.sort()
kept = []
for branch in kill:
if (R_HEADS + branch) in left:
branch = self.GetBranch(branch)
base = branch.LocalMerge
if not base:
base = rev
kept.append(ReviewableBranch(self, branch, base))
return kept
## Submodule Management ##
def GetRegisteredSubprojects(self):
result = []
def rec(subprojects):
if not subprojects:
return
result.extend(subprojects)
for p in subprojects:
rec(p.subprojects)
rec(self.subprojects)
return result
def _GetSubmodules(self):
# Unfortunately we cannot call `git submodule status --recursive` here
# because the working tree might not exist yet, and it cannot be used
# without a working tree in its current implementation.
def get_submodules(gitdir, rev):
# Parse .gitmodules for submodule sub_paths and sub_urls
sub_paths, sub_urls = parse_gitmodules(gitdir, rev)
if not sub_paths:
return []
# Run `git ls-tree` to read SHAs of submodule object, which happen to be
# revision of submodule repository
sub_revs = git_ls_tree(gitdir, rev, sub_paths)
submodules = []
for sub_path, sub_url in zip(sub_paths, sub_urls):
try:
sub_rev = sub_revs[sub_path]
except KeyError:
# Ignore non-exist submodules
continue
submodules.append((sub_rev, sub_path, sub_url))
return submodules
re_path = re.compile(r'^submodule\.([^.]+)\.path=(.*)$')
re_url = re.compile(r'^submodule\.([^.]+)\.url=(.*)$')
def parse_gitmodules(gitdir, rev):
cmd = ['cat-file', 'blob', '%s:.gitmodules' % rev]
try:
p = GitCommand(None, cmd, capture_stdout = True, capture_stderr = True,
bare = True, gitdir = gitdir)
except GitError:
return [], []
if p.Wait() != 0:
return [], []
gitmodules_lines = []
fd, temp_gitmodules_path = tempfile.mkstemp()
try:
os.write(fd, p.stdout)
os.close(fd)
cmd = ['config', '--file', temp_gitmodules_path, '--list']
p = GitCommand(None, cmd, capture_stdout = True, capture_stderr = True,
bare = True, gitdir = gitdir)
if p.Wait() != 0:
return [], []
gitmodules_lines = p.stdout.split('\n')
except GitError:
return [], []
finally:
os.remove(temp_gitmodules_path)
names = set()
paths = {}
urls = {}
for line in gitmodules_lines:
if not line:
continue
m = re_path.match(line)
if m:
names.add(m.group(1))
paths[m.group(1)] = m.group(2)
continue
m = re_url.match(line)
if m:
names.add(m.group(1))
urls[m.group(1)] = m.group(2)
continue
names = sorted(names)
return ([paths.get(name, '') for name in names],
[urls.get(name, '') for name in names])
def git_ls_tree(gitdir, rev, paths):
cmd = ['ls-tree', rev, '--']
cmd.extend(paths)
try:
p = GitCommand(None, cmd, capture_stdout = True, capture_stderr = True,
bare = True, gitdir = gitdir)
except GitError:
return []
if p.Wait() != 0:
return []
objects = {}
for line in p.stdout.split('\n'):
if not line.strip():
continue
object_rev, object_path = line.split()[2:4]
objects[object_path] = object_rev
return objects
try:
rev = self.GetRevisionId()
except GitError:
return []
return get_submodules(self.gitdir, rev)
def GetDerivedSubprojects(self):
result = []
if not self.Exists:
# If git repo does not exist yet, querying its submodules will
# mess up its states; so return here.
return result
for rev, path, url in self._GetSubmodules():
name = self.manifest.GetSubprojectName(self, path)
relpath, worktree, gitdir, objdir = \
self.manifest.GetSubprojectPaths(self, name, path)
project = self.manifest.paths.get(relpath)
if project:
result.extend(project.GetDerivedSubprojects())
continue
remote = RemoteSpec(self.remote.name,
url = url,
review = self.remote.review)
subproject = Project(manifest = self.manifest,
name = name,
remote = remote,
gitdir = gitdir,
objdir = objdir,
worktree = worktree,
relpath = relpath,
revisionExpr = self.revisionExpr,
revisionId = rev,
rebase = self.rebase,
groups = self.groups,
sync_c = self.sync_c,
sync_s = self.sync_s,
parent = self,
is_derived = True)
result.append(subproject)
result.extend(subproject.GetDerivedSubprojects())
return result
## Direct Git Commands ##
def _CheckForSha1(self):
try:
# if revision (sha or tag) is not present then following function
# throws an error.
self.bare_git.rev_parse('--verify', '%s^0' % self.revisionExpr)
return True
except GitError:
# There is no such persistent revision. We have to fetch it.
return False
def _FetchArchive(self, tarpath, cwd=None):
cmd = ['archive', '-v', '-o', tarpath]
cmd.append('--remote=%s' % self.remote.url)
cmd.append('--prefix=%s/' % self.relpath)
cmd.append(self.revisionExpr)
command = GitCommand(self, cmd, cwd=cwd,
capture_stdout=True,
capture_stderr=True)
if command.Wait() != 0:
raise GitError('git archive %s: %s' % (self.name, command.stderr))
def _RemoteFetch(self, name=None,
current_branch_only=False,
initial=False,
quiet=False,
alt_dir=None,
no_tags=False):
is_sha1 = False
tag_name = None
depth = None
# The depth should not be used when fetching to a mirror because
# it will result in a shallow repository that cannot be cloned or
# fetched from.
if not self.manifest.IsMirror:
if self.clone_depth:
depth = self.clone_depth
else:
depth = self.manifest.manifestProject.config.GetString('repo.depth')
if depth:
current_branch_only = True
if current_branch_only:
if ID_RE.match(self.revisionExpr) is not None:
is_sha1 = True
elif self.revisionExpr.startswith(R_TAGS):
# this is a tag and its sha1 value should never change
tag_name = self.revisionExpr[len(R_TAGS):]
if is_sha1 or tag_name is not None:
if self._CheckForSha1():
return True
if is_sha1 and (not self.upstream or ID_RE.match(self.upstream)):
current_branch_only = False
if not name:
name = self.remote.name
ssh_proxy = False
remote = self.GetRemote(name)
if remote.PreConnectFetch():
ssh_proxy = True
if initial:
if alt_dir and 'objects' == os.path.basename(alt_dir):
ref_dir = os.path.dirname(alt_dir)
packed_refs = os.path.join(self.gitdir, 'packed-refs')
remote = self.GetRemote(name)
all_refs = self.bare_ref.all
ids = set(all_refs.values())
tmp = set()
for r, ref_id in GitRefs(ref_dir).all.items():
if r not in all_refs:
if r.startswith(R_TAGS) or remote.WritesTo(r):
all_refs[r] = ref_id
ids.add(ref_id)
continue
if ref_id in ids:
continue
r = 'refs/_alt/%s' % ref_id
all_refs[r] = ref_id
ids.add(ref_id)
tmp.add(r)
tmp_packed = ''
old_packed = ''
for r in sorted(all_refs):
line = '%s %s\n' % (all_refs[r], r)
tmp_packed += line
if r not in tmp:
old_packed += line
_lwrite(packed_refs, tmp_packed)
else:
alt_dir = None
cmd = ['fetch']
# The --depth option only affects the initial fetch; after that we'll do
# full fetches of changes.
if depth and initial:
cmd.append('--depth=%s' % depth)
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(name)
# If using depth then we should not get all the tags since they may
# be outside of the depth.
if no_tags or depth:
cmd.append('--no-tags')
else:
cmd.append('--tags')
if not current_branch_only:
# Fetch whole repo
cmd.append(str((u'+refs/heads/*:') + remote.ToLocal('refs/heads/*')))
elif tag_name is not None:
cmd.append('tag')
cmd.append(tag_name)
else:
branch = self.revisionExpr
if is_sha1:
branch = self.upstream
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
cmd.append(str((u'+refs/heads/%s:' % branch) + remote.ToLocal('refs/heads/%s' % branch)))
ok = False
for _i in range(2):
ret = GitCommand(self, cmd, bare=True, ssh_proxy=ssh_proxy).Wait()
if ret == 0:
ok = True
break
elif current_branch_only and is_sha1 and ret == 128:
# Exit code 128 means "couldn't find the ref you asked for"; if we're in sha1
# mode, we just tried sync'ing from the upstream field; it doesn't exist, thus
# abort the optimization attempt and do a full sync.
break
time.sleep(random.randint(30, 45))
if initial:
# Ensure that some refs exist. Otherwise, we probably aren't looking
# at a real git repository and may have a bad url.
if not self.bare_ref.all:
ok = False
if alt_dir:
if old_packed != '':
_lwrite(packed_refs, old_packed)
else:
os.remove(packed_refs)
self.bare_git.pack_refs('--all', '--prune')
if is_sha1 and current_branch_only and self.upstream:
# We just synced the upstream given branch; verify we
# got what we wanted, else trigger a second run of all
# refs.
if not self._CheckForSha1():
return self._RemoteFetch(name=name, current_branch_only=False,
initial=False, quiet=quiet, alt_dir=alt_dir)
return ok
def _ApplyCloneBundle(self, initial=False, quiet=False):
if initial and (self.manifest.manifestProject.config.GetString('repo.depth') or self.clone_depth):
return False
remote = self.GetRemote(self.remote.name)
bundle_url = remote.url + '/clone.bundle'
bundle_url = GitConfig.ForUser().UrlInsteadOf(bundle_url)
if GetSchemeFromUrl(bundle_url) not in (
'http', 'https', 'persistent-http', 'persistent-https'):
return False
bundle_dst = os.path.join(self.gitdir, 'clone.bundle')
bundle_tmp = os.path.join(self.gitdir, 'clone.bundle.tmp')
exist_dst = os.path.exists(bundle_dst)
exist_tmp = os.path.exists(bundle_tmp)
if not initial and not exist_dst and not exist_tmp:
return False
if not exist_dst:
exist_dst = self._FetchBundle(bundle_url, bundle_tmp, bundle_dst, quiet)
if not exist_dst:
return False
cmd = ['fetch']
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(bundle_dst)
for f in remote.fetch:
cmd.append(str(f))
cmd.append('refs/tags/*:refs/tags/*')
ok = GitCommand(self, cmd, bare=True).Wait() == 0
if os.path.exists(bundle_dst):
os.remove(bundle_dst)
if os.path.exists(bundle_tmp):
os.remove(bundle_tmp)
return ok
def _FetchBundle(self, srcUrl, tmpPath, dstPath, quiet):
if os.path.exists(dstPath):
os.remove(dstPath)
cmd = ['curl', '--fail', '--output', tmpPath, '--netrc', '--location']
if quiet:
cmd += ['--silent']
if os.path.exists(tmpPath):
size = os.stat(tmpPath).st_size
if size >= 1024:
cmd += ['--continue-at', '%d' % (size,)]
else:
os.remove(tmpPath)
if 'http_proxy' in os.environ and 'darwin' == sys.platform:
cmd += ['--proxy', os.environ['http_proxy']]
cookiefile = self._GetBundleCookieFile(srcUrl)
if cookiefile:
cmd += ['--cookie', cookiefile]
if srcUrl.startswith('persistent-'):
srcUrl = srcUrl[len('persistent-'):]
cmd += [srcUrl]
if IsTrace():
Trace('%s', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd)
except OSError:
return False
curlret = proc.wait()
if curlret == 22:
# From curl man page:
# 22: HTTP page not retrieved. The requested url was not found or
# returned another error with the HTTP error code being 400 or above.
# This return code only appears if -f, --fail is used.
if not quiet:
print("Server does not provide clone.bundle; ignoring.",
file=sys.stderr)
return False
if os.path.exists(tmpPath):
if curlret == 0 and self._IsValidBundle(tmpPath):
os.rename(tmpPath, dstPath)
return True
else:
os.remove(tmpPath)
return False
else:
return False
def _IsValidBundle(self, path):
try:
with open(path) as f:
if f.read(16) == '# v2 git bundle\n':
return True
else:
print("Invalid clone.bundle file; ignoring.", file=sys.stderr)
return False
except OSError:
return False
def _GetBundleCookieFile(self, url):
if url.startswith('persistent-'):
try:
p = subprocess.Popen(
['git-remote-persistent-https', '-print_config', url],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.close() # Tell subprocess it's ok to close.
prefix = 'http.cookiefile='
cookiefile = None
for line in p.stdout:
line = line.strip()
if line.startswith(prefix):
cookiefile = line[len(prefix):]
break
if p.wait():
err_msg = p.stderr.read()
if ' -print_config' in err_msg:
pass # Persistent proxy doesn't support -print_config.
else:
print(err_msg, file=sys.stderr)
if cookiefile:
return cookiefile
except OSError as e:
if e.errno == errno.ENOENT:
pass # No persistent proxy.
raise
return GitConfig.ForUser().GetString('http.cookiefile')
def _Checkout(self, rev, quiet=False):
cmd = ['checkout']
if quiet:
cmd.append('-q')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s checkout %s ' % (self.name, rev))
def _CherryPick(self, rev, quiet=False):
cmd = ['cherry-pick']
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s cherry-pick %s ' % (self.name, rev))
def _Revert(self, rev, quiet=False):
cmd = ['revert']
cmd.append('--no-edit')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s revert %s ' % (self.name, rev))
def _ResetHard(self, rev, quiet=True):
cmd = ['reset', '--hard']
if quiet:
cmd.append('-q')
cmd.append(rev)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s reset --hard %s ' % (self.name, rev))
def _Rebase(self, upstream, onto = None):
cmd = ['rebase']
if onto is not None:
cmd.extend(['--onto', onto])
cmd.append(upstream)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s rebase %s ' % (self.name, upstream))
def _FastForward(self, head, ffonly=False):
cmd = ['merge', head]
if ffonly:
cmd.append("--ff-only")
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s merge %s ' % (self.name, head))
def _InitGitDir(self, mirror_git=None):
if not os.path.exists(self.gitdir):
# Initialize the bare repository, which contains all of the objects.
if not os.path.exists(self.objdir):
os.makedirs(self.objdir)
self.bare_objdir.init()
# If we have a separate directory to hold refs, initialize it as well.
if self.objdir != self.gitdir:
os.makedirs(self.gitdir)
self._ReferenceGitDir(self.objdir, self.gitdir, share_refs=False,
copy_all=True)
mp = self.manifest.manifestProject
ref_dir = mp.config.GetString('repo.reference') or ''
if ref_dir or mirror_git:
if not mirror_git:
mirror_git = os.path.join(ref_dir, self.name + '.git')
repo_git = os.path.join(ref_dir, '.repo', 'projects',
self.relpath + '.git')
if os.path.exists(mirror_git):
ref_dir = mirror_git
elif os.path.exists(repo_git):
ref_dir = repo_git
else:
ref_dir = None
if ref_dir:
_lwrite(os.path.join(self.gitdir, 'objects/info/alternates'),
os.path.join(ref_dir, 'objects') + '\n')
self._UpdateHooks()
m = self.manifest.manifestProject.config
for key in ['user.name', 'user.email']:
if m.Has(key, include_defaults = False):
self.config.SetString(key, m.GetString(key))
if self.manifest.IsMirror:
self.config.SetString('core.bare', 'true')
else:
self.config.SetString('core.bare', None)
def _UpdateHooks(self):
if os.path.exists(self.gitdir):
# Always recreate hooks since they can have been changed
# since the latest update.
hooks = self._gitdir_path('hooks')
try:
to_rm = os.listdir(hooks)
except OSError:
to_rm = []
for old_hook in to_rm:
os.remove(os.path.join(hooks, old_hook))
self._InitHooks()
def _InitHooks(self):
hooks = os.path.realpath(self._gitdir_path('hooks'))
if not os.path.exists(hooks):
os.makedirs(hooks)
for stock_hook in _ProjectHooks():
name = os.path.basename(stock_hook)
if name in ('commit-msg',) and not self.remote.review \
and not self is self.manifest.manifestProject:
# Don't install a Gerrit Code Review hook if this
# project does not appear to use it for reviews.
#
# Since the manifest project is one of those, but also
# managed through gerrit, it's excluded
continue
dst = os.path.join(hooks, name)
if os.path.islink(dst):
continue
if os.path.exists(dst):
if filecmp.cmp(stock_hook, dst, shallow=False):
os.remove(dst)
else:
_error("%s: Not replacing %s hook", self.relpath, name)
continue
try:
os.symlink(os.path.relpath(stock_hook, os.path.dirname(dst)), dst)
except OSError as e:
if e.errno == errno.EPERM:
raise GitError('filesystem must support symlinks')
else:
raise
def _InitRemote(self):
if self.remote.url:
remote = self.GetRemote(self.remote.name)
remote.url = self.remote.url
remote.review = self.remote.review
remote.projectname = self.name
if self.worktree:
remote.ResetFetch(mirror=False)
else:
remote.ResetFetch(mirror=True)
remote.Save()
def _InitMRef(self):
if self.manifest.branch:
self._InitAnyMRef(R_M + self.manifest.branch)
def _InitMirrorHead(self):
self._InitAnyMRef(HEAD)
def _InitAnyMRef(self, ref):
cur = self.bare_ref.symref(ref)
if self.revisionId:
if cur != '' or self.bare_ref.get(ref) != self.revisionId:
msg = 'manifest set to %s' % self.revisionId
dst = self.revisionId + '^0'
self.bare_git.UpdateRef(ref, dst, message = msg, detach = True)
else:
remote = self.GetRemote(self.remote.name)
dst = remote.ToLocal(self.revisionExpr)
if cur != dst:
msg = 'manifest set to %s' % self.revisionExpr
self.bare_git.symbolic_ref('-m', msg, ref, dst)
def _ReferenceGitDir(self, gitdir, dotgit, share_refs, copy_all):
"""Update |dotgit| to reference |gitdir|, using symlinks where possible.
Args:
gitdir: The bare git repository. Must already be initialized.
dotgit: The repository you would like to initialize.
share_refs: If true, |dotgit| will store its refs under |gitdir|.
Only one work tree can store refs under a given |gitdir|.
copy_all: If true, copy all remaining files from |gitdir| -> |dotgit|.
This saves you the effort of initializing |dotgit| yourself.
"""
# These objects can be shared between several working trees.
symlink_files = ['description', 'info']
symlink_dirs = ['hooks', 'objects', 'rr-cache', 'svn']
if share_refs:
# These objects can only be used by a single working tree.
symlink_files += ['config', 'packed-refs', 'shallow']
symlink_dirs += ['logs', 'refs']
to_symlink = symlink_files + symlink_dirs
to_copy = []
if copy_all:
to_copy = os.listdir(gitdir)
for name in set(to_copy).union(to_symlink):
try:
src = os.path.realpath(os.path.join(gitdir, name))
dst = os.path.realpath(os.path.join(dotgit, name))
if os.path.lexists(dst) and not os.path.islink(dst):
raise GitError('cannot overwrite a local work tree')
# If the source dir doesn't exist, create an empty dir.
if name in symlink_dirs and not os.path.lexists(src):
os.makedirs(src)
if name in to_symlink:
os.symlink(os.path.relpath(src, os.path.dirname(dst)), dst)
elif copy_all and not os.path.islink(dst):
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copy(src, dst)
except OSError as e:
if e.errno == errno.EPERM:
raise GitError('filesystem must support symlinks')
else:
raise
def _InitWorkTree(self):
dotgit = os.path.join(self.worktree, '.git')
if not os.path.exists(dotgit):
os.makedirs(dotgit)
self._ReferenceGitDir(self.gitdir, dotgit, share_refs=True,
copy_all=False)
_lwrite(os.path.join(dotgit, HEAD), '%s\n' % self.GetRevisionId())
cmd = ['read-tree', '--reset', '-u']
cmd.append('-v')
cmd.append(HEAD)
if GitCommand(self, cmd).Wait() != 0:
raise GitError("cannot initialize work tree")
self._CopyAndLinkFiles()
def _gitdir_path(self, path):
return os.path.realpath(os.path.join(self.gitdir, path))
def _revlist(self, *args, **kw):
a = []
a.extend(args)
a.append('--')
return self.work_git.rev_list(*a, **kw)
@property
def _allrefs(self):
return self.bare_ref.all
def _getLogs(self, rev1, rev2, oneline=False, color=True):
"""Get logs between two revisions of this project."""
comp = '..'
if rev1:
revs = [rev1]
if rev2:
revs.extend([comp, rev2])
cmd = ['log', ''.join(revs)]
out = DiffColoring(self.config)
if out.is_on and color:
cmd.append('--color')
if oneline:
cmd.append('--oneline')
try:
log = GitCommand(self, cmd, capture_stdout=True, capture_stderr=True)
if log.Wait() == 0:
return log.stdout
except GitError:
# worktree may not exist if groups changed for example. In that case,
# try in gitdir instead.
if not os.path.exists(self.worktree):
return self.bare_git.log(*cmd[1:])
else:
raise
return None
def getAddedAndRemovedLogs(self, toProject, oneline=False, color=True):
"""Get the list of logs from this revision to given revisionId"""
logs = {}
selfId = self.GetRevisionId(self._allrefs)
toId = toProject.GetRevisionId(toProject._allrefs)
logs['added'] = self._getLogs(selfId, toId, oneline=oneline, color=color)
logs['removed'] = self._getLogs(toId, selfId, oneline=oneline, color=color)
return logs
class _GitGetByExec(object):
def __init__(self, project, bare, gitdir):
self._project = project
self._bare = bare
self._gitdir = gitdir
def LsOthers(self):
p = GitCommand(self._project,
['ls-files',
'-z',
'--others',
'--exclude-standard'],
bare = False,
gitdir=self._gitdir,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
out = p.stdout
if out:
return out[:-1].split('\0') # pylint: disable=W1401
# Backslash is not anomalous
return []
def DiffZ(self, name, *args):
cmd = [name]
cmd.append('-z')
cmd.extend(args)
p = GitCommand(self._project,
cmd,
gitdir=self._gitdir,
bare = False,
capture_stdout = True,
capture_stderr = True)
try:
out = p.process.stdout.read()
r = {}
if out:
out = iter(out[:-1].split('\0')) # pylint: disable=W1401
while out:
try:
info = out.next()
path = out.next()
except StopIteration:
break
class _Info(object):
def __init__(self, path, omode, nmode, oid, nid, state):
self.path = path
self.src_path = None
self.old_mode = omode
self.new_mode = nmode
self.old_id = oid
self.new_id = nid
if len(state) == 1:
self.status = state
self.level = None
else:
self.status = state[:1]
self.level = state[1:]
while self.level.startswith('0'):
self.level = self.level[1:]
info = info[1:].split(' ')
info = _Info(path, *info)
if info.status in ('R', 'C'):
info.src_path = info.path
info.path = out.next()
r[info.path] = info
return r
finally:
p.Wait()
def GetHead(self):
if self._bare:
path = os.path.join(self._project.gitdir, HEAD)
else:
path = os.path.join(self._project.worktree, '.git', HEAD)
try:
fd = open(path, 'rb')
except IOError as e:
raise NoManifestException(path, str(e))
try:
line = fd.read()
finally:
fd.close()
try:
line = line.decode()
except AttributeError:
pass
if line.startswith('ref: '):
return line[5:-1]
return line[:-1]
def SetHead(self, ref, message=None):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(ref)
self.symbolic_ref(*cmdv)
def DetachHead(self, new, message=None):
cmdv = ['--no-deref']
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(new)
self.update_ref(*cmdv)
def UpdateRef(self, name, new, old=None,
message=None,
detach=False):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
if detach:
cmdv.append('--no-deref')
cmdv.append(name)
cmdv.append(new)
if old is not None:
cmdv.append(old)
self.update_ref(*cmdv)
def DeleteRef(self, name, old=None):
if not old:
old = self.rev_parse(name)
self.update_ref('-d', name, old)
self._project.bare_ref.deleted(name)
def rev_list(self, *args, **kw):
if 'format' in kw:
cmdv = ['log', '--pretty=format:%s' % kw['format']]
else:
cmdv = ['rev-list']
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare = self._bare,
gitdir=self._gitdir,
capture_stdout = True,
capture_stderr = True)
r = []
for line in p.process.stdout:
if line[-1] == '\n':
line = line[:-1]
r.append(line)
if p.Wait() != 0:
raise GitError('%s rev-list %s: %s' % (
self._project.name,
str(args),
p.stderr))
return r
def __getattr__(self, name):
"""Allow arbitrary git commands using pythonic syntax.
This allows you to do things like:
git_obj.rev_parse('HEAD')
Since we don't have a 'rev_parse' method defined, the __getattr__ will
run. We'll replace the '_' with a '-' and try to run a git command.
Any other positional arguments will be passed to the git command, and the
following keyword arguments are supported:
config: An optional dict of git config options to be passed with '-c'.
Args:
name: The name of the git command to call. Any '_' characters will
be replaced with '-'.
Returns:
A callable object that will try to call git with the named command.
"""
name = name.replace('_', '-')
def runner(*args, **kwargs):
cmdv = []
config = kwargs.pop('config', None)
for k in kwargs:
raise TypeError('%s() got an unexpected keyword argument %r'
% (name, k))
if config is not None:
if not git_require((1, 7, 2)):
raise ValueError('cannot set config on command line for %s()'
% name)
for k, v in config.items():
cmdv.append('-c')
cmdv.append('%s=%s' % (k, v))
cmdv.append(name)
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare = self._bare,
gitdir=self._gitdir,
capture_stdout = True,
capture_stderr = True)
if p.Wait() != 0:
raise GitError('%s %s: %s' % (
self._project.name,
name,
p.stderr))
r = p.stdout
try:
r = r.decode('utf-8')
except AttributeError:
pass
if r.endswith('\n') and r.index('\n') == len(r) - 1:
return r[:-1]
return r
return runner
class _PriorSyncFailedError(Exception):
def __str__(self):
return 'prior sync failed; rebase still in progress'
class _DirtyError(Exception):
def __str__(self):
return 'contains uncommitted changes'
class _InfoMessage(object):
def __init__(self, project, text):
self.project = project
self.text = text
def Print(self, syncbuf):
syncbuf.out.info('%s/: %s', self.project.relpath, self.text)
syncbuf.out.nl()
class _Failure(object):
def __init__(self, project, why):
self.project = project
self.why = why
def Print(self, syncbuf):
syncbuf.out.fail('error: %s/: %s',
self.project.relpath,
str(self.why))
syncbuf.out.nl()
class _Later(object):
def __init__(self, project, action):
self.project = project
self.action = action
def Run(self, syncbuf):
out = syncbuf.out
out.project('project %s/', self.project.relpath)
out.nl()
try:
self.action()
out.nl()
return True
except GitError:
out.nl()
return False
class _SyncColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'reposync')
self.project = self.printer('header', attr = 'bold')
self.info = self.printer('info')
self.fail = self.printer('fail', fg='red')
class SyncBuffer(object):
def __init__(self, config, detach_head=False):
self._messages = []
self._failures = []
self._later_queue1 = []
self._later_queue2 = []
self.out = _SyncColoring(config)
self.out.redirect(sys.stderr)
self.detach_head = detach_head
self.clean = True
def info(self, project, fmt, *args):
self._messages.append(_InfoMessage(project, fmt % args))
def fail(self, project, err=None):
self._failures.append(_Failure(project, err))
self.clean = False
def later1(self, project, what):
self._later_queue1.append(_Later(project, what))
def later2(self, project, what):
self._later_queue2.append(_Later(project, what))
def Finish(self):
self._PrintMessages()
self._RunLater()
self._PrintMessages()
return self.clean
def _RunLater(self):
for q in ['_later_queue1', '_later_queue2']:
if not self._RunQueue(q):
return
def _RunQueue(self, queue):
for m in getattr(self, queue):
if not m.Run(self):
self.clean = False
return False
setattr(self, queue, [])
return True
def _PrintMessages(self):
for m in self._messages:
m.Print(self)
for m in self._failures:
m.Print(self)
self._messages = []
self._failures = []
class MetaProject(Project):
"""A special project housed under .repo.
"""
def __init__(self, manifest, name, gitdir, worktree):
Project.__init__(self,
manifest = manifest,
name = name,
gitdir = gitdir,
objdir = gitdir,
worktree = worktree,
remote = RemoteSpec('origin'),
relpath = '.repo/%s' % name,
revisionExpr = 'refs/heads/master',
revisionId = None,
groups = None)
def PreSync(self):
if self.Exists:
cb = self.CurrentBranch
if cb:
base = self.GetBranch(cb).merge
if base:
self.revisionExpr = base
self.revisionId = None
def MetaBranchSwitch(self, target):
""" Prepare MetaProject for manifest branch switch
"""
# detach and delete manifest branch, allowing a new
# branch to take over
syncbuf = SyncBuffer(self.config, detach_head = True)
self.Sync_LocalHalf(syncbuf)
syncbuf.Finish()
return GitCommand(self,
['update-ref', '-d', 'refs/heads/default'],
capture_stdout = True,
capture_stderr = True).Wait() == 0
@property
def LastFetch(self):
try:
fh = os.path.join(self.gitdir, 'FETCH_HEAD')
return os.path.getmtime(fh)
except OSError:
return 0
@property
def HasChanges(self):
"""Has the remote received new commits not yet checked out?
"""
if not self.remote or not self.revisionExpr:
return False
all_refs = self.bare_ref.all
revid = self.GetRevisionId(all_refs)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid == head:
return False
elif self._revlist(not_rev(HEAD), revid):
return True
return False
| {
"content_hash": "7f829e14365331bd473ad2ddb9e6f7ed",
"timestamp": "",
"source": "github",
"line_count": 2657,
"max_line_length": 102,
"avg_line_length": 30.825366955212647,
"alnum_prop": 0.5796002588427774,
"repo_name": "lshain/repo",
"id": "127176e54b6c5f38c3ebc258947a493c4de3df8a",
"size": "82504",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "346948"
},
{
"name": "Shell",
"bytes": "5988"
}
],
"symlink_target": ""
} |
import io
import os
import re
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return io.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
# requirements
setup_requirements = ['pytest-runner'] if {'pytest', 'test', 'ptr'}.intersection(sys.argv) else []
install_requirements = ['guessit>=3.0.0', 'babelfish>=0.5.2', 'enzyme>=0.4.1', 'beautifulsoup4>=4.4.0',
'requests>=2.0', 'click>=4.0', 'dogpile.cache>=0.6.0', 'stevedore>=1.20.0',
'chardet>=2.3.0', 'pysrt>=1.0.1', 'six>=1.9.0', 'appdirs>=1.3', 'rarfile>=2.7',
'pytz>=2012c']
if sys.version_info < (3, 2):
install_requirements.append('futures>=3.0')
test_requirements = ['sympy', 'vcrpy>=1.6.1', 'pytest', 'pytest-flakes', 'pytest-cov']
if sys.version_info < (3, 3):
test_requirements.append('mock')
dev_requirements = ['tox', 'sphinx', 'sphinx_rtd_theme', 'sphinxcontrib-programoutput', 'wheel']
setup(name='subliminal',
version=find_version('subliminal', '__init__.py'),
license='MIT',
description='Subtitles, faster than your thoughts',
long_description=read('README.rst') + '\n\n' + read('HISTORY.rst'),
keywords='subtitle subtitles video movie episode tv show series',
url='https://github.com/Diaoul/subliminal',
author='Antoine Bertin',
author_email='[email protected]',
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia :: Video'
],
entry_points={
'subliminal.providers': [
'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
'argenteam = subliminal.providers.argenteam:ArgenteamProvider',
'legendastv = subliminal.providers.legendastv:LegendasTVProvider',
'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider',
'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider',
'shooter = subliminal.providers.shooter:ShooterProvider',
'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider',
'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider'
],
'subliminal.refiners': [
'hash = subliminal.refiners.hash:refine',
'metadata = subliminal.refiners.metadata:refine',
'omdb = subliminal.refiners.omdb:refine',
'tvdb = subliminal.refiners.tvdb:refine'
],
'babelfish.language_converters': [
'addic7ed = subliminal.converters.addic7ed:Addic7edConverter',
'legendastv = subliminal.converters.legendastv:LegendasTVConverter',
'shooter = subliminal.converters.shooter:ShooterConverter',
'thesubdb = subliminal.converters.thesubdb:TheSubDBConverter',
'tvsubtitles = subliminal.converters.tvsubtitles:TVsubtitlesConverter'
],
'console_scripts': [
'subliminal = subliminal.cli:subliminal'
]
},
setup_requires=setup_requirements,
install_requires=install_requirements,
tests_require=test_requirements,
extras_require={
'test': test_requirements,
'dev': dev_requirements
})
| {
"content_hash": "ea0ee6b69bdf57a59543250fd181ffb7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 103,
"avg_line_length": 43.18627450980392,
"alnum_prop": 0.6161180476730987,
"repo_name": "h3llrais3r/subliminal",
"id": "ccc1cdab7a0346d472e13a05744a841dbc8621c8",
"size": "4451",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "331"
},
{
"name": "Python",
"bytes": "356989"
}
],
"symlink_target": ""
} |
import itertools
import logging
import os
import posixpath
import urllib.parse
from typing import List
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.models.index import PyPI
from pip._internal.utils.compat import has_tls
from pip._internal.utils.misc import normalize_path, redact_auth_from_url
logger = logging.getLogger(__name__)
class SearchScope:
"""
Encapsulates the locations that pip is configured to search.
"""
__slots__ = ["find_links", "index_urls", "no_index"]
@classmethod
def create(
cls,
find_links: List[str],
index_urls: List[str],
no_index: bool,
) -> "SearchScope":
"""
Create a SearchScope object after normalizing the `find_links`.
"""
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
built_find_links: List[str] = []
for link in find_links:
if link.startswith("~"):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
built_find_links.append(link)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not has_tls():
for link in itertools.chain(index_urls, built_find_links):
parsed = urllib.parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
return cls(
find_links=built_find_links,
index_urls=index_urls,
no_index=no_index,
)
def __init__(
self,
find_links: List[str],
index_urls: List[str],
no_index: bool,
) -> None:
self.find_links = find_links
self.index_urls = index_urls
self.no_index = no_index
def get_formatted_locations(self) -> str:
lines = []
redacted_index_urls = []
if self.index_urls and self.index_urls != [PyPI.simple_url]:
for url in self.index_urls:
redacted_index_url = redact_auth_from_url(url)
# Parse the URL
purl = urllib.parse.urlsplit(redacted_index_url)
# URL is generally invalid if scheme and netloc is missing
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not purl.scheme and not purl.netloc:
logger.warning(
'The index url "%s" seems invalid, please provide a scheme.',
redacted_index_url,
)
redacted_index_urls.append(redacted_index_url)
lines.append(
"Looking in indexes: {}".format(", ".join(redacted_index_urls))
)
if self.find_links:
lines.append(
"Looking in links: {}".format(
", ".join(redact_auth_from_url(url) for url in self.find_links)
)
)
return "\n".join(lines)
def get_index_urls_locations(self, project_name: str) -> List[str]:
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url: str) -> str:
loc = posixpath.join(
url, urllib.parse.quote(canonicalize_name(project_name))
)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith("/"):
loc = loc + "/"
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
| {
"content_hash": "932b68daaa2bd30c35558fa9b42be9fd",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 85,
"avg_line_length": 34.91729323308271,
"alnum_prop": 0.5546942291128337,
"repo_name": "sbidoul/pip",
"id": "a64af73899de1a292ebadebb3cf4ad9bae27d6fc",
"size": "4644",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "src/pip/_internal/models/search_scope.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7107244"
}
],
"symlink_target": ""
} |
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined attributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
from openstack_dashboard.api import base
from openstack_dashboard.api import ceilometer
from openstack_dashboard.api import cinder
from openstack_dashboard.api import fwaas
from openstack_dashboard.api import glance
from openstack_dashboard.api import heat
from openstack_dashboard.api import keystone
from openstack_dashboard.api import lbaas
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.api import swift
from openstack_dashboard.api import vpn
__all__ = [
"base",
"cinder",
"fwaas",
"glance",
"heat",
"keystone",
"lbaas",
"network",
"neutron",
"nova",
"swift",
"ceilometer",
"vpn",
]
| {
"content_hash": "67638e54f9e2d0e4479e7c6141f5c79e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.7476190476190476,
"repo_name": "zhaogaolong/oneFinger",
"id": "66d0285ca754642aab98df656293801ea357436a",
"size": "2286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack/test/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "436003"
},
{
"name": "HTML",
"bytes": "2963802"
},
{
"name": "JavaScript",
"bytes": "2960983"
},
{
"name": "Python",
"bytes": "546184"
}
],
"symlink_target": ""
} |
"""
Decorators used for testing.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from nose import SkipTest
from functools import wraps
from unittest import TestCase
from chevah.compat import process_capabilities
def skipOnCondition(callback, message):
"""
Helper to decorate skip class or methods based on callback results.
This case is inspired by Python unittest implementation.
"""
def inner(test_item):
if not (
isinstance(test_item, type) and
issubclass(test_item, TestCase)
):
# Only raise SkipTest in methods.
@wraps(test_item)
def wrapper(*args, **kwargs):
if callback():
raise SkipTest(message)
return test_item(*args, **kwargs)
result = wrapper
else:
result = test_item
if callback():
result.__unittest_skip__ = True
result.__unittest_skip_why__ = message
return result
return inner
def onOSFamily(family):
"""
Run test only if current os is from `family`.
"""
def check_os_family():
return process_capabilities.os_family != family.lower()
return skipOnCondition(
check_os_family, 'OS family "%s" not available.' % family)
def onOSName(name):
"""
Run test only if current os is `name` or is in one from `name` list.
"""
if not isinstance(name, list) and not isinstance(name, tuple):
name = [name.lower()]
else:
name = [item.lower() for item in name]
def check_os_name():
return process_capabilities.os_name not in name
return skipOnCondition(
check_os_name, 'OS name "%s" not available.' % name)
def onCapability(name, value):
"""
Run test only if capability with `name` equals `value`.
"""
capability = getattr(process_capabilities, name)
def check_capability():
return capability != value
return skipOnCondition(
check_capability, 'Capability "%s" not present.' % name)
| {
"content_hash": "29f58945320a6464f7509d3d32bb476c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 72,
"avg_line_length": 26.024390243902438,
"alnum_prop": 0.6091846298031866,
"repo_name": "chevah/empirical",
"id": "9d14a8b7ffec85188ce778636f7a9f7868a30e9b",
"size": "2194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chevah/empirical/conditionals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "147316"
},
{
"name": "Shell",
"bytes": "17849"
}
],
"symlink_target": ""
} |
import os
import cv2
import numpy as np
import sqlite3
class face_rec():
def getProfile(self,id):
conn=sqlite3.connect("email_sys")
cmd="select Name from User where ID=" +str(id)
cursor=conn.execute(cmd)
data = cursor.fetchone()
conn.close()
return data
def rec(self):
if os.path.isfile('recognizer/trainingData.yml') is False:
return "0"
rec=cv2.createLBPHFaceRecognizer();
rec.load("recognizer/trainingData.yml")
detector=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cam = cv2.VideoCapture(0)
font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX,1,1,0,1,1)
while(True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
id,conf=rec.predict(gray[y:y+h,x:x+w])
profile=self.getProfile(id)
if( profile!=None):
return profile[0]
else:
return "0"
cv2.imshow('frame',img)
break
cam.release()
cv2.destroyAllWindows()
| {
"content_hash": "83e7dde8dd403bb3aed4b282828c2ee0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 27.90909090909091,
"alnum_prop": 0.5822475570032574,
"repo_name": "yahya-idriss/Python-Personal-assistant",
"id": "188f60fb65cb284290da1f93b14afe5b4136dbbe",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "detect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16146"
}
],
"symlink_target": ""
} |
"""Script of running base tests on utils."""
from simplified_models import TrueModel
from utils import Coordinate2d
def create_input_data():
"""Utility function for creating input data."""
position = Coordinate2d(3.0, 4.0)
speed = Coordinate2d(2.0, 5.0)
sample_time = 0.1
return {'position': position,
'speed': speed,
'sample_time': sample_time}
def test_base():
"""Check we can create the model."""
input_data = create_input_data()
base_model = TrueModel(input_data['position'],
input_data['speed'],
input_data['sample_time'])
assert(True)
def test_get_position_velocity():
"""Check we can get the right value of velocity."""
input_data = create_input_data()
base_model = TrueModel(input_data['position'],
input_data['speed'],
input_data['sample_time'])
assert(base_model.get_position() == input_data['position'])
assert(base_model.get_speed() == input_data['speed'])
def test_get_state():
"""Verify we return the correct state."""
input_data = create_input_data()
base_model = TrueModel(input_data['position'],
input_data['speed'],
input_data['sample_time'])
state = base_model.get_state()
assert(state[0] == input_data['position'].get_x())
assert(state[1] == input_data['speed'].get_x())
assert(state[2] == input_data['position'].get_y())
assert(state[3] == input_data['speed'].get_y())
def test_set_state():
"""Verify we set the state correctly."""
input_data = create_input_data()
base_model = TrueModel(input_data['position'],
input_data['speed'],
input_data['sample_time'])
state = [0, 1, 2, 3]
base_model.set_state(state)
new_state = base_model.get_state()
for idx in range(len(state)):
assert(state[idx] == new_state[idx])
assert(base_model.get_position() == Coordinate2d(0, 2))
assert(base_model.get_speed() == Coordinate2d(1, 3))
def test_update_step():
"""Verify update step."""
initial_position = Coordinate2d(0, 0)
initial_speed = Coordinate2d(3, 4)
sample_time = 0.1
base_model = TrueModel(initial_position,
initial_speed,
sample_time)
acceleration = Coordinate2d(0, 0)
base_model.update_state(acceleration)
new_state = base_model.get_state()
assert abs(new_state[0] - 0.3) < 1e-8
assert(new_state[1] == 3)
assert abs(new_state[2] - 0.4) < 1e-8
assert(new_state[3] == 4)
def compute_new_state(initial_position, initial_speed, acceleration,
sample_time):
expected_position_x = initial_position.get_x() + \
sample_time * initial_speed.get_x() + \
(sample_time**2) / 2 * acceleration.get_x()
expected_position_y = initial_position.get_y() + \
sample_time * initial_speed.get_y() + \
(sample_time**2) / 2 * acceleration.get_y()
expected_speed_x = initial_speed.get_x() + \
sample_time * acceleration.get_x()
expected_speed_y = initial_speed.get_y() + \
sample_time * acceleration.get_y()
return (expected_position_x, expected_speed_x,
expected_position_y, expected_speed_y)
def test_update_step_acceleration():
"""Verify update step with non zero acceleration."""
initial_position = Coordinate2d(0, 0)
initial_speed = Coordinate2d(0, 0)
sample_time = 0.1
base_model = TrueModel(initial_position,
initial_speed,
sample_time)
for iteration in range(5):
acceleration = Coordinate2d(1*iteration, 2*iteration)
current_position = base_model.get_position()
current_speed = base_model.get_speed()
base_model.update_state(acceleration)
new_state = base_model.get_state()
expected_state = compute_new_state(
current_position, current_speed,
acceleration, sample_time)
assert all([abs(new_state[i] - expected_state[i]) < 1e-8]
for i in range(len(new_state)))
| {
"content_hash": "677cf5b02693eac65b94df8c05004311",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 69,
"avg_line_length": 37.05042016806723,
"alnum_prop": 0.5622590156498072,
"repo_name": "lparolin/state_estimation",
"id": "260e7f08a64fe4578e6b487f26be3c5c01838ccb",
"size": "4409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test_simplified_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "365322"
},
{
"name": "Python",
"bytes": "28431"
},
{
"name": "TeX",
"bytes": "44901"
}
],
"symlink_target": ""
} |
import sys
import unittest
from stratuslab.VersionChecker import VersionChecker
class VersionCheckTest(unittest.TestCase):
MIN_SAVE = ()
VERSION_INFO_SAVE = ()
def setUp(self):
VERSION_INFO_SAVE = sys.version_info
sys.version_info = (2, 6, 5, 'final', 0)
VersionCheckTest.MIN_SAVE = VersionChecker.MINIMUM_VERSION
def tearDown(self):
sys.version_info = VersionCheckTest.VERSION_INFO_SAVE
VersionChecker.MINIMUM_VERSION = VersionCheckTest.MIN_SAVE
def testGoodVersions(self):
VersionChecker().check()
VersionChecker.MINIMUM_VERSION = (2, 5)
VersionChecker().check()
def testBadVersions(self):
min = VersionChecker.MINIMUM_VERSION
VersionChecker.MINIMUM_VERSION = (3, 6)
try:
VersionChecker().check()
except:
pass
else:
self.fail('should have raised')
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "cda79785f032e87fb51a08101854882e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 66,
"avg_line_length": 24.341463414634145,
"alnum_prop": 0.6132264529058116,
"repo_name": "StratusLab/client",
"id": "856fd044659acf946ccfeb333b387067f91dc801",
"size": "1734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/code/src/test/python/VersionCheckerTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30262"
},
{
"name": "HTML",
"bytes": "399451"
},
{
"name": "JavaScript",
"bytes": "26512"
},
{
"name": "Python",
"bytes": "2417913"
},
{
"name": "Shell",
"bytes": "5806"
},
{
"name": "Smarty",
"bytes": "34040"
}
],
"symlink_target": ""
} |
"""EXRIOTensor"""
import tensorflow as tf
from tensorflow_io.python.ops import io_tensor_ops
from tensorflow_io.python.ops import core_ops
class EXRPartIOTensor(io_tensor_ops._TableIOTensor): # pylint: disable=protected-access
"""EXRPartIOTensor"""
# =============================================================================
# Constructor (private)
# =============================================================================
def __init__(self, spec, columns, values, internal=False):
with tf.name_scope("EXRPartIOTensor"):
super().__init__(spec, columns, values, internal=internal)
class EXRIOTensor(
io_tensor_ops._CollectionIOTensor
): # pylint: disable=protected-access
"""EXRIOTensor"""
# =============================================================================
# Constructor (private)
# =============================================================================
def __init__(self, filename, internal=False):
with tf.name_scope("EXRIOTensor"):
data = tf.io.read_file(filename)
shapes, dtypes, channels = core_ops.io_decode_exr_info(data)
parts = []
index = 0
for (shape, dtypes, channels) in zip(
shapes.numpy(), dtypes.numpy(), channels.numpy()
):
# Remove trailing 0 from dtypes
while dtypes[-1] == 0:
dtypes.pop()
channels.pop()
spec = tuple(
tf.TensorSpec(tf.TensorShape(shape), dtype) for dtype in dtypes
)
columns = [channel.decode() for channel in channels]
elements = [
io_tensor_ops.TensorIOTensor(
core_ops.io_decode_exr(data, index, channel, dtype=dtype),
internal=internal,
)
for (channel, dtype) in zip(columns, dtypes)
]
parts.append(
EXRPartIOTensor(spec, columns, elements, internal=internal)
)
index += 1
spec = tuple(part.spec for part in parts)
columns = [i for i, _ in enumerate(parts)]
super().__init__(spec, columns, parts, internal=internal)
| {
"content_hash": "e8ec1667d307a0507668828035d2bdfa",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 88,
"avg_line_length": 41.01754385964912,
"alnum_prop": 0.4632164242942686,
"repo_name": "tensorflow/io",
"id": "a36e29c8f74c06ad6a8f122359fec19adab6eacf",
"size": "3027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_io/python/experimental/openexr_io_tensor_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1583693"
},
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "Dockerfile",
"bytes": "3938"
},
{
"name": "Go",
"bytes": "3757"
},
{
"name": "JavaScript",
"bytes": "6794"
},
{
"name": "Python",
"bytes": "1380386"
},
{
"name": "R",
"bytes": "82002"
},
{
"name": "Shell",
"bytes": "36295"
},
{
"name": "Starlark",
"bytes": "74322"
},
{
"name": "Swift",
"bytes": "19103"
}
],
"symlink_target": ""
} |
"""Utility functions for LipidFinder."""
from __future__ import print_function
import os
def normalise_path(path):
# type: (str) -> str
"""Return a valid path for the current OS.
Keyword Arguments:
path -- path to normalize
"""
return os.path.normpath(os.path.realpath(path))
def traceless_warning(message, category, filename, lineno, file=None,
line=None):
# type: (str, Warning, str, int, file, str) -> str
"""Return a warning message without the traceback information.
Keyword Arguments:
message -- warning message
category -- Warning instance
filename -- name of the file where the warning was raised
lineno -- line number where the warning was raised
file -- file instance
line -- line object
"""
return 'Warning{0}{1}{1}'.format(message, os.linesep)
def mz_delta(mz, fixederr, ppmerr, precision=5):
# type: (float, float, float, int) -> float
"""Return the delta tolerance for the given m/z.
Keyword Arguments:
mz -- m/z reference value
fixederr -- allowed fixed error
ppmerr -- mass-dependant PPM error to add to the fixed error
precision -- number of decimal digits to use with floats (e.g. a
precision of 2 forces a difference of 0.01 between
two any consecutive float numbers) [default: 5]
"""
return round(fixederr + (mz * ppmerr * 1e-6), precision)
def mz_tol_range(mz, fixederr, ppmerr, precision=5):
# type: (float, float, float, int) -> float
"""Return lower and upper tolerance limits for the given m/z.
Keyword Arguments:
mz -- m/z reference value
fixederr -- allowed fixed error
ppmerr -- mass-dependant PPM error to add to the fixed error
precision -- number of decimal digits to use with floats (e.g. a
precision of 2 forces a difference of 0.01 between
two any consecutive float numbers) [default: 5]
"""
delta = mz_delta(mz, fixederr, ppmerr, precision)
return (round(mz - delta, precision), round(mz + delta, precision))
def rt_delta(maxdiff, precision=5):
# type: (float, int) -> float
"""Return the delta tolerance for the given retention time.
Keyword Arguments:
maxdiff -- maximum time difference between a feature edge and
an adjacent frame to be considered part of the same
feature
precision -- number of decimal digits to use with floats (e.g. a
precision of 2 forces a difference of 0.01 between
any two consecutive float numbers) [default: 5]
"""
return round(maxdiff, precision)
def rt_tol_range(rt, maxdiff, precision=5):
# type: (float, float, int) -> float
"""Return lower and upper tolerance limits for the given retention
time.
Keyword Arguments:
rt -- retention time (RT) reference value
maxdiff -- maximum time difference between a feature edge and
an adjacent frame to be considered part of the same
feature
precision -- number of decimal digits to use with floats (e.g. a
precision of 2 forces a difference of 0.01 between
any two consecutive float numbers) [default: 5]
"""
delta = rt_delta(maxdiff, precision)
return (round(rt - delta, precision), round(rt + delta, precision))
def print_progress_bar(iteration, total, prefix='', suffix='Completed',
length=34):
# type: (int, int, str, str, int) -> None
"""Call in a loop to create terminal progress bar.
Extracted from first answer at: https://stackoverflow.com/questions/
3173320/text-progress-bar-in-the-console
Keyword Arguments:
iteration -- current iteration
total -- total iterations
prefix -- prefix string [default: no prefix]
suffix -- suffix string [default: "Completed"]
length -- character length of bar [default: 34]
"""
percent = "{0:.1f}".format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = '#' * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if (iteration == total):
print()
| {
"content_hash": "ead15c139b15fb135b941235ab559138",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 75,
"avg_line_length": 37.773109243697476,
"alnum_prop": 0.6093437152391546,
"repo_name": "cjbrasher/LipidFinder",
"id": "fc05f0fbeb642c29b2d5ec81825a7206eaf34abb",
"size": "4755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LipidFinder/_utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "211378"
}
],
"symlink_target": ""
} |
"""
sentry.rules.conditions.regression_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from sentry.rules.conditions.base import EventCondition
class RegressionEventCondition(EventCondition):
label = 'An issue changes state from resolved to unresolved'
def passes(self, event, state):
return state.is_regression
| {
"content_hash": "f19e82d44d2d5a1c96730a29a9a9e523",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 27.555555555555557,
"alnum_prop": 0.6915322580645161,
"repo_name": "looker/sentry",
"id": "6227697ae5bac482a7d2081b9d1cdc28f938df96",
"size": "496",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/rules/conditions/regression_event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from gearshift import view, config
import unittest
class TestView(unittest.TestCase):
def setUp(self):
#if not view.engines:
# view.load_engines()
pass
def test_cycle(self):
oe = view.base.cycle(('odd','even'))
assert str(oe) == str(None)
assert oe.next() == 'odd'
assert str(oe) == 'odd'
assert oe.next() == 'even'
assert oe.value == 'even'
assert oe.next() == 'odd'
assert oe.value == 'odd'
def test_selector(self):
assert view.base.selector(False) is None
assert view.base.selector(True) == 'selected'
def test_checker(self):
assert view.base.checker(False) is None
assert view.base.checker(True) == 'checked'
def test_ipeek(self):
seq = xrange(3, 6)
assert view.base.ipeek(seq)
assert list(seq) == range(3, 6)
seq = xrange(3, 3)
assert not view.base.ipeek(seq)
assert list(seq) == []
def test_UnicodeValueAppearingInATemplateIsFine(self):
ustr = u"micro-eXtreme Programming ( µ XP): Embedding XP Within Standard Projects"
info = dict(someval=ustr)
val = view.render(info, template="gearshift.tests.simple")
self.failUnless(u"Paging all " + ustr in val.decode("utf-8"))
def test_templateRetrievalByPath(self):
config.update({'server.environment' : 'development'})
from turbokid import kidsupport
ks = kidsupport.KidSupport()
cls = ks.load_template("gearshift.tests.simple")
assert cls is not None
t = cls()
t.someval = "hello"
filled = str(t)
assert "groovy" in filled
assert "html" in filled
# the following import should not fail, if everything is working correctly:
import gearshift.tests.simple
def test_default_output_encoding(self):
info = dict(someval="someval")
template = "gearshift.tests.simple"
headers = {}
# default encoding is utf-8
val = view.render(info, template, headers=headers)
assert headers.get('Content-Type') == 'text/html; charset=utf-8'
# encoding can be changed and will be added to existing content type
try:
config.update({'kid.encoding': 'iso-8859-1'})
headers['Content-Type'] = 'text/html'
view.render(info, template, headers=headers)
assert headers.get('Content-Type') == 'text/html; charset=iso-8859-1'
finally:
config.update({'kid.encoding': 'utf-8'})
def test_content_types(self):
info = dict(someval="someval")
template = "gearshift.tests.simple"
headers = {}
view.render(info, template, headers=headers)
assert headers.get('Content-Type') == 'text/html; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='html')
assert headers.get('Content-Type') == 'text/html; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='html-strict')
assert headers.get('Content-Type') == 'text/html; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='xhtml')
assert headers.get('Content-Type') == 'text/html; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='xhtml-strict')
assert headers.get('Content-Type') == 'text/html; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='xml')
assert headers.get('Content-Type') == 'text/xml; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='json')
assert headers.get('Content-Type') == 'application/json'
config.update({'global':
{'tg.format_mime_types': {'xhtml': 'application/xhtml+xml'}}})
headers = {}
view.render(info, template, headers=headers, format='xhtml')
assert headers.get('Content-Type') == 'application/xhtml+xml; charset=utf-8'
headers = {}
view.render(info, template, headers=headers, format='xhtml-strict')
assert headers.get('Content-Type') == 'application/xhtml+xml; charset=utf-8'
config.update({'global': {'tg.format_mime_types': {}}})
def test_plain_format(self):
info = dict(someval="dumbos")
template = "gearshift.tests.simple"
headers = {}
plain = view.render(info, template, headers=headers, format='plain')
assert headers.get('Content-Type') == 'text/plain; charset=utf-8'
assert plain.strip() == ('This is the groovy test template.'
' Paging all dumbos.')
headers = {}
text = view.render(info, template, headers=headers, format='text')
assert headers.get('Content-Type') == 'text/plain; charset=utf-8'
assert text == plain
try:
view.render(info, template, headers=headers, format='dumbo')
except KeyError:
pass
else:
assert False, "'dumbo' should not be accepted as format"
| {
"content_hash": "a979c8deafafe9c91f85ea651152aa93",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 90,
"avg_line_length": 41.088,
"alnum_prop": 0.6016355140186916,
"repo_name": "dbrattli/python-gearshift",
"id": "623b3d321489616b10866883da59d2d5c9a49895",
"size": "5162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gearshift/tests/test_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7127"
},
{
"name": "Python",
"bytes": "606764"
}
],
"symlink_target": ""
} |
from pprint import pprint
import systextil.models
import lotes.models
def sync_regra_colecao():
colecoes = systextil.models.Colecao.objects.exclude(
colecao=0).order_by('colecao')
regra_colecao = lotes.models.RegraColecao.objects.all(
).order_by('colecao')
acoes = {}
inter_col = colecoes.iterator()
inter_regra = regra_colecao.iterator()
walk = 'b' # from, to, both
while True:
if walk in ['f', 'b']:
try:
col = next(inter_col)
except StopIteration:
col = None
if walk in ['t', 'b']:
try:
rc = next(inter_regra)
except StopIteration:
rc = None
if rc is None and col is None:
break
rec = {}
acao_definida = False
if rc is not None:
if col is None or col.colecao > rc.colecao:
acao_definida = True
rec['status'] = 'd'
rec['colecao'] = rc.colecao
walk = 't'
if not acao_definida:
rec['colecao'] = col.colecao
if rc is None or col.colecao < rc.colecao:
acao_definida = True
rec['status'] = 'i'
walk = 'f'
if not acao_definida:
rec['status'] = 'u'
walk = 'b'
acoes[rec['colecao']] = rec
for colecao in acoes:
if acoes[colecao]['status'] == 'd':
try:
lotes.models.RegraColecao.objects.filter(colecao=colecao).delete()
except lotes.models.RegraColecao.DoesNotExist:
return 'Erro apagando regra de coleção'
continue
if acoes[colecao]['status'] == 'i':
try:
rc = lotes.models.RegraColecao()
rc.colecao = colecao
rc.save()
except Exception:
return 'Erro inserindo regra de coleção '
return None
| {
"content_hash": "84826f2edca733925e778bbaa9d828fb",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 82,
"avg_line_length": 26.972972972972972,
"alnum_prop": 0.49749498997995995,
"repo_name": "anselmobd/fo2",
"id": "a4bfc4be3786dea9cbc6dcbc5a972c0a91583d20",
"size": "2000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lotes/models/functions/sync_regra_colecao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from app import db
from app.models.base_model import BaseEntity
class Location(db.Model, BaseEntity):
__tablename__ = 'location'
city = db.Column(db.String(256))
country = db.Column(db.String(256))
address = db.Column(db.String(256))
zip = db.Column(db.String(32))
postoffice_box = db.Column(db.String(32))
email = db.Column(db.String(256))
phone_nr = db.Column(db.String(64))
def __init__(self, city='', country='', address='', zip='',
postoffice_box='', email='', phone_nr=''):
self.city = city
self.country = country
self.address = address
self.zip = zip
self.postoffice_box = postoffice_box
self.email = email
self.phone_nr = phone_nr
| {
"content_hash": "3329ef8ae89dca0409a7f763a59c9112",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6023936170212766,
"repo_name": "viaict/viaduct",
"id": "040ec36a791dee593664b0cbb7c6aaf0249c982d",
"size": "752",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/models/location.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1583078"
},
{
"name": "Dockerfile",
"bytes": "1131"
},
{
"name": "HTML",
"bytes": "227955"
},
{
"name": "JavaScript",
"bytes": "63026"
},
{
"name": "Makefile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "770976"
},
{
"name": "Shell",
"bytes": "3004"
},
{
"name": "TypeScript",
"bytes": "3288"
},
{
"name": "Vue",
"bytes": "27869"
}
],
"symlink_target": ""
} |
""" (disabled by default) support for testing pytest and pytest plugins. """
import inspect
import sys
import os
import codecs
import re
import time
import platform
from fnmatch import fnmatch
import subprocess
import py
import pytest
from py.builtin import print_
from _pytest.core import HookRelay
from _pytest.main import Session, EXIT_OK
def get_public_names(l):
"""Only return names from iterator l without a leading underscore."""
return [x for x in l if x[0] != "_"]
def pytest_addoption(parser):
group = parser.getgroup("pylib")
group.addoption('--no-tools-on-path',
action="store_true", dest="notoolsonpath", default=False,
help=("discover tools on PATH instead of going through py.cmdline.")
)
def pytest_configure(config):
# This might be called multiple times. Only take the first.
global _pytest_fullpath
try:
_pytest_fullpath
except NameError:
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
def pytest_funcarg___pytest(request):
return PytestArg(request)
class PytestArg:
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
hookrecorder.start_recording(hook._hookspecs)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
class ParsedCall:
def __init__(self, name, locals):
assert '_name' not in locals
self.__dict__.update(locals)
self.__dict__.pop('self')
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d['_name']
return "<ParsedCall %r(**%r)>" %(self._name, d)
class HookRecorder:
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
self._recorders = {}
def start_recording(self, hookspecs):
if not isinstance(hookspecs, (list, tuple)):
hookspecs = [hookspecs]
for hookspec in hookspecs:
assert hookspec not in self._recorders
class RecordCalls:
_recorder = self
for name, method in vars(hookspec).items():
if name[0] != "_":
setattr(RecordCalls, name, self._makecallparser(method))
recorder = RecordCalls()
self._recorders[hookspec] = recorder
self._pluginmanager.register(recorder)
self.hook = HookRelay(hookspecs, pm=self._pluginmanager,
prefix="pytest_")
def finish_recording(self):
for recorder in self._recorders.values():
if self._pluginmanager.isregistered(recorder):
self._pluginmanager.unregister(recorder)
self._recorders.clear()
def _makecallparser(self, method):
name = method.__name__
args, varargs, varkw, default = inspect.getargspec(method)
if not args or args[0] != "self":
args.insert(0, 'self')
fspec = inspect.formatargspec(args, varargs, varkw, default)
# we use exec because we want to have early type
# errors on wrong input arguments, using
# *args/**kwargs delays this and gives errors
# elsewhere
exec (py.code.compile("""
def %(name)s%(fspec)s:
self._recorder.calls.append(
ParsedCall(%(name)r, locals()))
""" % locals()))
return locals()[name]
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
for name in names:
for cls in self._recorders:
if name in vars(cls):
break
else:
raise ValueError("callname %r not found in %r" %(
name, self._recorders.keys()))
l = []
for call in self.calls:
if call._name in names:
l.append(call)
return l
def contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print_("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print_("CHECKERMATCH", repr(check), "->", call)
else:
print_("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print_("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
l = self.getcalls(name)
assert len(l) == 1, (name, l)
return l[0]
def pytest_funcarg__linecomp(request):
return LineComp()
def pytest_funcarg__LineMatcher(request):
return LineMatcher
def pytest_funcarg__testdir(request):
tmptestdir = TmpTestdir(request)
return tmptestdir
rex_outcome = re.compile("(\d+) (\w+)")
class RunResult:
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
for line in reversed(self.outlines):
if 'seconds' in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
class TmpTestdir:
def __init__(self, request):
self.request = request
self.Config = request.config.__class__
self._pytest = request.getfuncargvalue("_pytest")
# XXX remove duplication with tmpdir plugin
basetmp = request.config._tmpdirhandler.ensuretemp("testdir")
name = request.function.__name__
for i in range(100):
try:
tmpdir = basetmp.mkdir(name + str(i))
except py.error.EEXIST:
continue
break
self.tmpdir = tmpdir
self.plugins = []
self._syspathremove = []
self.chdir() # always chdir
self.request.addfinalizer(self.finalize)
def __repr__(self):
return "<TmpTestdir %r>" % (self.tmpdir,)
def finalize(self):
for p in self._syspathremove:
sys.path.remove(p)
if hasattr(self, '_olddir'):
self._olddir.chdir()
# delete modules that have been loaded from tmpdir
for name, mod in list(sys.modules.items()):
if mod:
fn = getattr(mod, '__file__', None)
if fn and fn.startswith(str(self.tmpdir)):
del sys.modules[name]
def getreportrecorder(self, obj):
if hasattr(obj, 'config'):
obj = obj.config
if hasattr(obj, 'hook'):
obj = obj.hook
assert hasattr(obj, '_hookspecs'), obj
reprec = ReportRecorder(obj)
reprec.hookrecorder = self._pytest.gethookrecorder(obj)
reprec.hook = reprec.hookrecorder.hook
return reprec
def chdir(self):
old = self.tmpdir.chdir()
if not hasattr(self, '_olddir'):
self._olddir = old
def _makefile(self, ext, args, kwargs):
items = list(kwargs.items())
if args:
source = py.builtin._totext("\n").join(
map(py.builtin._totext, args)) + py.builtin._totext("\n")
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for name, value in items:
p = self.tmpdir.join(name).new(ext=ext)
source = py.code.Source(value)
def my_totext(s, encoding="utf-8"):
if py.builtin._isbytes(s):
s = py.builtin._totext(s, encoding=encoding)
return s
source_unicode = "\n".join([my_totext(line) for line in source.lines])
source = py.builtin._totext(source_unicode)
content = source.strip().encode("utf-8") # + "\n"
#content = content.rstrip() + "\n"
p.write(content, "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
return self.makepyfile(conftest=source)
def makeini(self, source):
return self.makefile('.ini', tox=source)
def getinicfg(self, source):
p = self.makeini(source)
return py.iniconfig.IniConfig(p)['pytest']
def makepyfile(self, *args, **kwargs):
return self._makefile('.py', args, kwargs)
def maketxtfile(self, *args, **kwargs):
return self._makefile('.txt', args, kwargs)
def syspathinsert(self, path=None):
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
self._syspathremove.append(str(path))
def mkdir(self, name):
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
p = self.mkdir(name)
p.ensure("__init__.py")
return p
Session = Session
def getnode(self, config, arg):
session = Session(config)
assert '::' not in str(arg)
p = py.path.local(arg)
x = session.fspath.bestrelpath(p)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
p = self.makepyfile(source)
l = list(cmdlineargs) + [p]
return self.inline_run(*l)
def inline_runsource1(self, *args):
args = list(args)
source = args.pop()
p = self.makepyfile(source)
l = list(args) + [p]
reprec = self.inline_run(*l)
reports = reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
def inline_genitems(self, *args):
return self.inprocess_run(list(args) + ['--collectonly'])
def inline_run(self, *args):
items, rec = self.inprocess_run(args)
return rec
def inprocess_run(self, args, plugins=None):
rec = []
items = []
class Collect:
def pytest_configure(x, config):
rec.append(self.getreportrecorder(config))
def pytest_itemcollected(self, item):
items.append(item)
if not plugins:
plugins = []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
reprec = rec[0]
reprec.ret = ret
assert len(rec) == 1
return items, reprec
def parseconfig(self, *args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith('--basetemp'):
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
def ensure_unconfigure():
if hasattr(config.pluginmanager, "_config"):
config.pluginmanager.do_unconfigure(config)
config.pluginmanager.ensure_shutdown()
self.request.addfinalizer(ensure_unconfigure)
return config
def parseconfigure(self, *args):
config = self.parseconfig(*args)
config.do_configure()
self.request.addfinalizer(lambda:
config.do_unconfigure())
return config
def getitem(self, source, funcname="test_func"):
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" %(
funcname, source, items)
def getitems(self, source):
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
kw = {self.request.function.__name__: py.code.Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__ = "#")
self.config = config = self.parseconfigure(path, *configargs)
node = self.getnode(config, path)
return node
def collect_by_name(self, modcol, name):
for colitem in modcol._memocollect():
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
str(os.getcwd()), env.get('PYTHONPATH', '')]))
kw['env'] = env
return subprocess.Popen(cmdargs,
stdout=stdout, stderr=stderr, **kw)
def run(self, *cmdargs):
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print_("running", cmdargs, "curdir=", py.path.local())
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
close_fds=(sys.platform != "win32"))
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time()-now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
py.builtin.print_(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def runpybin(self, scriptname, *args):
fullargs = self._getpybinargs(scriptname) + args
return self.run(*fullargs)
def _getpybinargs(self, scriptname):
if not self.request.config.getvalue("notoolsonpath"):
# XXX we rely on script referring to the correct environment
# we cannot use "(sys.executable,script)"
# because on windows the script is e.g. a py.test.exe
return (sys.executable, _pytest_fullpath,) # noqa
else:
pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
def runpython(self, script, prepend=True):
if prepend:
s = self._getsysprepend()
if s:
script.write(s + "\n" + script.read())
return self.run(sys.executable, script)
def _getsysprepend(self):
if self.request.config.getvalue("notoolsonpath"):
s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
else:
s = ""
return s
def runpython_c(self, command):
command = self._getsysprepend() + command
return self.run(sys.executable, "-c", command)
def runpytest(self, *args):
p = py.path.local.make_numbered_dir(prefix="runpytest-",
keep=None, rootdir=self.tmpdir)
args = ('--basetemp=%s' % p, ) + args
#for x in args:
# if '--confcutdir' in str(x):
# break
#else:
# pass
# args = ('--confcutdir=.',) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ('-p', plugins[0]) + args
return self.runpybin("py.test", *args)
def spawn_pytest(self, string, expect_timeout=10.0):
if self.request.config.getvalue("notoolsonpath"):
pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests")
basetemp = self.tmpdir.mkdir("pexpect")
invoke = " ".join(map(str, self._getpybinargs("py.test")))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform == "darwin":
pytest.xfail("pexpect does not work reliably on darwin?!")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),)
class ReportRecorder(object):
def __init__(self, hook):
self.hook = hook
self.pluginmanager = hook._pm
self.pluginmanager.register(self)
def getcall(self, name):
return self.hookrecorder.getcall(name)
def popcall(self, name):
return self.hookrecorder.popcall(name)
def getcalls(self, names):
""" return list of ParsedCall instances matching the given eventname. """
return self.hookrecorder.getcalls(names)
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(self, inamepart="",
names="pytest_runtest_logreport pytest_collectreport", when=None):
""" return a testreport whose dotted import path matches """
l = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, 'when', None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
l.append(rep)
if not l:
raise ValueError("could not find test report matching %r: no test reports at all!" %
(inamepart,))
if len(l) > 1:
raise ValueError("found more than one testreport matching %r: %s" %(
inamepart, l))
return l[0]
def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures('pytest_collectreport')
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports(
"pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.hookrecorder.calls[:] = []
def unregister(self):
self.pluginmanager.unregister(self)
self.hookrecorder.finish_recording()
class LineComp:
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
""" assert that lines2 are contained (linearly) in lines1.
return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher:
def __init__(self, lines):
self.lines = lines
def str(self):
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = py.code.Source(lines2)
if isinstance(lines2, py.code.Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or fnmatch(x, line):
print_("matched: ", repr(line))
break
else:
raise ValueError("line %r not found in output" % line)
def get_lines_after(self, fnline):
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i+1:]
raise ValueError("line %r not found in output" % fnline)
def fnmatch_lines(self, lines2):
def show(arg1, arg2):
py.builtin.print_(arg1, arg2, file=sys.stderr)
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
show("exact match:", repr(line))
break
elif fnmatch(nextline, line):
show("fnmatch:", repr(line))
show(" with:", repr(nextline))
break
else:
if not nomatchprinted:
show("nomatch:", repr(line))
nomatchprinted = True
show(" and:", repr(nextline))
extralines.append(nextline)
else:
pytest.fail("remains unmatched: %r, see stderr" % (line,))
| {
"content_hash": "f224ac0e967ee3e3bc602feecff50be2",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 96,
"avg_line_length": 35.03703703703704,
"alnum_prop": 0.5642380874939015,
"repo_name": "mitodl/xanalytics",
"id": "9e987ae039ecd15f11cd112ddd07e53aecb859c1",
"size": "24596",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pytest/pytester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93098"
},
{
"name": "HTML",
"bytes": "148637"
},
{
"name": "JavaScript",
"bytes": "4636968"
},
{
"name": "Python",
"bytes": "2675424"
}
],
"symlink_target": ""
} |
"""Tools for testing."""
# pylint: disable=too-many-lines
from __future__ import absolute_import, print_function, division
import time
import gzip
import struct
import traceback
import numbers
import subprocess
import sys
import os
import errno
import logging
import bz2
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import numpy.random as rnd
try:
import requests
except ImportError:
# in rare cases requests may be not installed
pass
import mxnet as mx
from .context import Context
from .ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
from .ndarray import array
from .symbol import Symbol
_rng = np.random.RandomState(1234)
def default_context():
"""Get default context for regression test."""
# _TODO: get context from environment variable to support
# testing with GPUs
return Context.default_ctx
def set_default_context(ctx):
"""Set default context."""
Context.default_ctx = ctx
def default_dtype():
"""Get default data type for regression test."""
# _TODO: get default dtype from environment variable
return np.float32
def get_atol(atol=None):
"""Get default numerical threshold for regression test."""
# _TODO: get from env variable, different threshold might
# be needed for different device and dtype
return 1e-20 if atol is None else atol
def get_rtol(rtol=None):
"""Get default numerical threshold for regression test."""
# _TODO: get from env variable, different threshold might
# be needed for different device and dtype
return 1e-5 if rtol is None else rtol
def random_arrays(*shapes):
"""Generate some random numpy arrays."""
arrays = [np.random.randn(*s).astype(default_dtype())
for s in shapes]
if len(arrays) == 1:
return arrays[0]
return arrays
def random_sample(population, k):
"""Return a k length list of the elements chosen from the population sequence."""
assert 0 <= k <= len(population)
population_copy = population[:]
np.random.shuffle(population_copy)
return population_copy[0:k]
def _validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="uniform"):
"""Validates inputs for csr generation helper functions
"""
total_nnz = int(num_rows * num_cols * density)
if density < 0 or density > 1:
raise ValueError("density has to be between 0 and 1")
if num_rows <= 0 or num_cols <= 0:
raise ValueError("num_rows or num_cols should be greater than 0")
if distribution == "powerlaw":
if total_nnz < 2 * num_rows:
raise ValueError("not supported for this density: %s"
" for this shape (%s, %s)"
" Please keep :"
" num_rows * num_cols * density >= 2 * num_rows"
% (density, num_rows, num_cols))
def shuffle_csr_column_indices(csr):
"""Shuffle CSR column indices per row
This allows validation of unordered column indices, which is not a requirement
for a valid CSR matrix
"""
row_count = len(csr.indptr) - 1
for i in range(row_count):
start_index = csr.indptr[i]
end_index = csr.indptr[i + 1]
sublist = np.array(csr.indices[start_index : end_index])
np.random.shuffle(sublist)
csr.indices[start_index : end_index] = sublist
def _get_uniform_dataset_csr(num_rows, num_cols, density=0.1, dtype=None,
data_init=None, shuffle_csr_indices=False):
"""Returns CSRNDArray with uniform distribution
This generates a csr matrix with totalnnz unique randomly chosen numbers
from num_rows*num_cols and arranges them in the 2d array in the
following way:
row_index = (random_number_generated / num_rows)
col_index = random_number_generated - row_index * num_cols
"""
_validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="uniform")
try:
from scipy import sparse as spsp
csr = spsp.rand(num_rows, num_cols, density, dtype=dtype, format="csr")
if data_init is not None:
csr.data.fill(data_init)
if shuffle_csr_indices is True:
shuffle_csr_column_indices(csr)
result = mx.nd.sparse.csr_matrix(csr.data, csr.indptr, csr.indices,
(num_rows, num_cols), dtype=dtype)
except ImportError:
assert(data_init is None), \
"data_init option is not supported when scipy is absent"
assert(not shuffle_csr_indices), \
"shuffle_csr_indices option is not supported when scipy is absent"
# scipy not available. try to generate one from a dense array
dns = mx.nd.random.uniform(shape=(num_rows, num_cols), dtype=dtype)
masked_dns = dns * (dns < density)
result = masked_dns.tostype('csr')
return result
def _get_powerlaw_dataset_csr(num_rows, num_cols, density=0.1, dtype=None):
"""Returns CSRNDArray with powerlaw distribution
with exponentially increasing number of non zeros in each row.
Not supported for cases where total_nnz < 2*num_rows. This is because
the algorithm first tries to ensure that there are rows with no zeros by
putting non zeros at beginning of each row.
"""
_validate_csr_generation_inputs(num_rows, num_cols, density,
distribution="powerlaw")
total_nnz = int(num_rows * num_cols * density)
unused_nnz = total_nnz
output_arr = np.zeros((num_rows, num_cols), dtype=dtype)
# Start with ones on each row so that no row is empty
for row in range(num_rows):
output_arr[row][0] = 1 + rnd.uniform(0.001, 2)
unused_nnz = unused_nnz - 1
if unused_nnz <= 0:
return mx.nd.array(output_arr).tostype("csr")
# Populate rest of matrix with 2^i items in ith row.
# if we have used all total nnz return the sparse matrix
# else if we reached max column size then fill up full columns until we use all nnz
col_max = 2
for row in range(num_rows):
col_limit = min(num_cols, col_max)
# In case col_limit reached assign same value to all elements, which is much faster
if col_limit == num_cols and unused_nnz > col_limit:
output_arr[row] = 1 + rnd.uniform(0.001, 2)
unused_nnz = unused_nnz - col_limit + 1
if unused_nnz <= 0:
return mx.nd.array(output_arr).tostype("csr")
else:
continue
for col_index in range(1, col_limit):
output_arr[row][col_index] = 1 + rnd.uniform(0.001, 2)
unused_nnz = unused_nnz - 1
if unused_nnz <= 0:
return mx.nd.array(output_arr).tostype("csr")
col_max = col_max * 2
if unused_nnz > 0:
raise ValueError("not supported for this density: %s"
" for this shape (%s,%s)" % (density, num_rows, num_cols))
else:
return mx.nd.array(output_arr).tostype("csr")
def assign_each(the_input, function):
"""Return ndarray composed of passing each array value through some function"""
if function is not None:
it_input = np.nditer(the_input, flags=['f_index'])
output = np.zeros(the_input.shape)
it_out = np.nditer(output, flags=['f_index'], op_flags=['writeonly'])
while not it_input.finished:
val_input = it_input[0]
it_out[0] = function(val_input)
it_input.iternext()
it_out.iternext()
return output
else:
return np.array(the_input)
def assign_each2(input1, input2, function):
"""Return ndarray composed of passing two array values through some function"""
if function is not None:
assert input1.shape == input2.shape
it_input1 = np.nditer(input1, flags=['f_index'])
it_input2 = np.nditer(input2, flags=['f_index'])
output = np.zeros(input1.shape)
it_out = np.nditer(output, flags=['f_index'], op_flags=['writeonly'])
while not it_input1.finished:
val_input1 = it_input1[0]
val_input2 = it_input2[0]
it_out[0] = function(val_input1, val_input2)
it_input1.iternext()
it_input2.iternext()
it_out.iternext()
return output
else:
return np.array(input1)
# TODO(haibin) also include types in arguments
def rand_sparse_ndarray(shape, stype, density=None, dtype=None, distribution=None,
data_init=None, rsp_indices=None, modifier_func=None,
shuffle_csr_indices=False):
"""Generate a random sparse ndarray. Returns the ndarray, value(np) and indices(np)
Parameters
----------
shape: list or tuple
stype: str, valid values: "csr" or "row_sparse"
density, optional: float, should be between 0 and 1
distribution, optional: str, valid values: "uniform" or "powerlaw"
dtype, optional: numpy.dtype, default value is None
Returns
-------
Result of type CSRNDArray or RowSparseNDArray
Examples
--------
Below is an example of the powerlaw distribution with csr as the stype.
It calculates the nnz using the shape and density.
It fills up the ndarray with exponentially increasing number of elements.
If there are enough unused_nnzs, n+1th row will have twice more nnzs compared to nth row.
else, remaining unused_nnzs will be used in n+1th row
If number of cols is too small and we have already reached column size it will fill up
all following columns in all followings rows until we reach the required density.
>>> csr_arr, _ = rand_sparse_ndarray(shape=(5, 16), stype="csr",
density=0.50, distribution="powerlaw")
>>> indptr = csr_arr.indptr.asnumpy()
>>> indices = csr_arr.indices.asnumpy()
>>> data = csr_arr.data.asnumpy()
>>> row2nnz = len(data[indptr[1]:indptr[2]])
>>> row3nnz = len(data[indptr[2]:indptr[3]])
>>> assert(row3nnz == 2*row2nnz)
>>> row4nnz = len(data[indptr[3]:indptr[4]])
>>> assert(row4nnz == 2*row3nnz)
"""
density = rnd.rand() if density is None else density
dtype = default_dtype() if dtype is None else dtype
distribution = "uniform" if distribution is None else distribution
if stype == 'row_sparse':
assert (distribution == "uniform"), \
"Distribution %s not supported for row_sparse" % (distribution)
# sample index
if rsp_indices is not None:
indices = rsp_indices
assert(len(indices) <= shape[0])
else:
idx_sample = rnd.rand(shape[0])
indices = np.argwhere(idx_sample < density).flatten()
if indices.shape[0] == 0:
result = mx.nd.zeros(shape, stype='row_sparse', dtype=dtype)
return result, (np.array([], dtype=dtype), np.array([], dtype='int64'))
# generate random values
val = rnd.rand(indices.shape[0], *shape[1:]).astype(dtype)
# Allow caller to override or adjust random values
if data_init is not None:
val.fill(data_init)
if modifier_func is not None:
val = assign_each(val, modifier_func)
arr = mx.nd.sparse.row_sparse_array(val, indices, shape, indices_type=np.int64, dtype=dtype)
return arr, (val, indices)
elif stype == 'csr':
assert len(shape) == 2
if distribution == "uniform":
csr = _get_uniform_dataset_csr(shape[0], shape[1], density,
data_init=data_init,
shuffle_csr_indices=shuffle_csr_indices, dtype=dtype)
return csr, (csr.indptr, csr.indices, csr.data)
elif distribution == "powerlaw":
csr = _get_powerlaw_dataset_csr(shape[0], shape[1], density=density, dtype=dtype)
return csr, (csr.indptr, csr.indices, csr.data)
else:
assert(False), "Distribution not supported: %s" % (distribution)
else:
assert(False), "unknown storage type"
def rand_ndarray(shape, stype, density=None, dtype=None,
modifier_func=None, shuffle_csr_indices=False, distribution=None):
if stype == 'default':
arr = mx.nd.array(random_arrays(shape), dtype=dtype)
else:
arr, _ = rand_sparse_ndarray(shape, stype, density=density,
modifier_func=modifier_func, dtype=dtype,
shuffle_csr_indices=shuffle_csr_indices,
distribution=distribution)
return arr
def create_sparse_array(shape, stype, data_init=None, rsp_indices=None,
dtype=None, modifier_func=None, density=.5,
shuffle_csr_indices=False):
"""Create a sparse array, For Rsp, assure indices are in a canonical format"""
if stype == 'row_sparse':
if rsp_indices is not None:
arr_indices = np.asarray(rsp_indices)
arr_indices.sort()
else:
arr_indices = None
arr_data, (_, _) = rand_sparse_ndarray(shape, stype,
density=density,
data_init=data_init,
rsp_indices=arr_indices,
dtype=dtype,
modifier_func=modifier_func)
elif stype == 'csr':
arr_data, (_, _, _) = rand_sparse_ndarray(shape,
stype,
density=density,
data_init=data_init,
dtype=dtype,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices)
else:
msg = "Unknown storage type: " + stype
raise AssertionError(msg)
return arr_data
def create_sparse_array_zd(shape, stype, density, data_init=None,
rsp_indices=None, dtype=None, modifier_func=None,
shuffle_csr_indices=False):
"""Create sparse array, using only rsp_indices to determine density"""
if stype == 'row_sparse':
density = 0.0
if rsp_indices is not None:
assert len(rsp_indices) <= shape[0]
return create_sparse_array(shape, stype,
data_init=data_init,
rsp_indices=rsp_indices,
dtype=dtype,
modifier_func=modifier_func,
density=density,
shuffle_csr_indices=shuffle_csr_indices)
def rand_shape_2d(dim0=10, dim1=10):
return rnd.randint(1, dim0 + 1), rnd.randint(1, dim1 + 1)
def rand_shape_3d(dim0=10, dim1=10, dim2=10):
return rnd.randint(1, dim0 + 1), rnd.randint(1, dim1 + 1), rnd.randint(1, dim2 + 1)
def rand_shape_nd(num_dim, dim=10):
return tuple(rnd.randint(1, dim+1, size=num_dim))
def np_reduce(dat, axis, keepdims, numpy_reduce_func):
"""Compatible reduce for old version of NumPy.
Parameters
----------
dat : np.ndarray
Same as NumPy.
axis : None or int or list-like
Same as NumPy.
keepdims : bool
Same as NumPy.
numpy_reduce_func : function
A NumPy reducing function like ``np.sum`` or ``np.max``.
"""
if isinstance(axis, int):
axis = [axis]
else:
axis = list(axis) if axis is not None else range(len(dat.shape))
ret = dat
for i in reversed(sorted(axis)):
ret = numpy_reduce_func(ret, axis=i)
if keepdims:
keepdims_shape = list(dat.shape)
for i in axis:
keepdims_shape[i] = 1
ret = ret.reshape(tuple(keepdims_shape))
return ret
def find_max_violation(a, b, rtol=None, atol=None):
"""Finds and returns the location of maximum violation."""
rtol = get_rtol(rtol)
atol = get_atol(atol)
diff = np.abs(a-b)
tol = atol + rtol*np.abs(b)
violation = diff/(tol+1e-20)
loc = np.argmax(violation)
idx = np.unravel_index(loc, violation.shape)
return idx, np.max(violation)
def same(a, b):
"""Test if two NumPy arrays are the same.
Parameters
----------
a : np.ndarray
b : np.ndarray
"""
return np.array_equal(a, b)
def almost_equal(a, b, rtol=None, atol=None, equal_nan=False):
"""Test if two numpy arrays are almost equal."""
return np.allclose(a, b, rtol=get_rtol(rtol), atol=get_atol(atol), equal_nan=equal_nan)
def assert_almost_equal(a, b, rtol=None, atol=None, names=('a', 'b'), equal_nan=False):
"""Test that two numpy arrays are almost equal. Raise exception message if not.
Parameters
----------
a : np.ndarray
b : np.ndarray
threshold : None or float
The checking threshold. Default threshold will be used if set to ``None``.
"""
rtol = get_rtol(rtol)
atol = get_atol(atol)
if almost_equal(a, b, rtol, atol, equal_nan=equal_nan):
return
index, rel = find_max_violation(a, b, rtol, atol)
np.set_printoptions(threshold=4, suppress=True)
msg = npt.build_err_msg([a, b],
err_msg="Error %f exceeds tolerance rtol=%f, atol=%f. "
" Location of maximum error:%s, a=%f, b=%f"
% (rel, rtol, atol, str(index), a[index], b[index]),
names=names)
raise AssertionError(msg)
def almost_equal_ignore_nan(a, b, rtol=None, atol=None):
"""Test that two NumPy arrays are almost equal (ignoring NaN in either array).
Combines a relative and absolute measure of approximate eqality.
If either the relative or absolute check passes, the arrays are considered equal.
Including an absolute check resolves issues with the relative check where all
array values are close to zero.
Parameters
----------
a : np.ndarray
b : np.ndarray
rtol : None or float
The relative threshold. Default threshold will be used if set to ``None``.
atol : None or float
The absolute threshold. Default threshold will be used if set to ``None``.
"""
a = np.copy(a)
b = np.copy(b)
nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
a[nan_mask] = 0
b[nan_mask] = 0
return almost_equal(a, b, rtol, atol)
def assert_almost_equal_ignore_nan(a, b, rtol=None, atol=None, names=('a', 'b')):
"""Test that two NumPy arrays are almost equal (ignoring NaN in either array).
Combines a relative and absolute measure of approximate eqality.
If either the relative or absolute check passes, the arrays are considered equal.
Including an absolute check resolves issues with the relative check where all
array values are close to zero.
Parameters
----------
a : np.ndarray
b : np.ndarray
rtol : None or float
The relative threshold. Default threshold will be used if set to ``None``.
atol : None or float
The absolute threshold. Default threshold will be used if set to ``None``.
"""
a = np.copy(a)
b = np.copy(b)
nan_mask = np.logical_or(np.isnan(a), np.isnan(b))
a[nan_mask] = 0
b[nan_mask] = 0
assert_almost_equal(a, b, rtol, atol, names)
def assert_exception(f, exception_type, *args, **kwargs):
"""Test that function f will throw an exception of type given by `exception_type`"""
try:
f(*args, **kwargs)
assert(False)
except exception_type:
return
def retry(n):
"""Retry n times before failing for stochastic test cases."""
assert n > 0
def decorate(f):
"""Decorate a test case."""
def wrapper(*args, **kwargs):
"""Wrapper for tests function."""
for _ in range(n):
try:
f(*args, **kwargs)
except AssertionError as e:
err = e
continue
return
raise err
return wrapper
return decorate
def simple_forward(sym, ctx=None, is_train=False, **inputs):
"""A simple forward function for a symbol.
Primarily used in doctest to test the functionality of a symbol.
Takes NumPy arrays as inputs and outputs are also converted to NumPy arrays.
Parameters
----------
ctx : Context
If ``None``, will take the default context.
inputs : keyword arguments
Mapping each input name to a NumPy array.
Returns
-------
The result as a numpy array. Multiple results will
be returned as a list of NumPy arrays.
"""
ctx = ctx or default_context()
inputs = {k: array(v) for k, v in inputs.items()}
exe = sym.bind(ctx, args=inputs)
exe.forward(is_train=is_train)
outputs = [x.asnumpy() for x in exe.outputs]
if len(outputs) == 1:
outputs = outputs[0]
return outputs
def _parse_location(sym, location, ctx, dtype=default_dtype()):
"""Parses the given location to a dictionary.
Arguments of the provided op `sym` are used as dictionary keys
and elements of `location` are used as values.
Parameters
----------
sym : Symbol
Symbol containing op
location : list or tuple or dict
Argument values location
- if type is list or tuple of `np.ndarray`
inner elements are arrays correspoding to
``sym.list_arguments()``.
- if type is dict of str -> `np.ndarray`
maps the name of arguments to the corresponding `np.ndarray`.
*In either case, value of all the arguments must be provided.*
ctx : Context
Device context.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
Returns
-------
dict
Dictionary with `sym` arguments as keys and `location` elements as
values.
Examples
-------
>>> a = mx.symbol.Variable('a')
>>> b = mx.symbol.Variable('b')
>>> l1 = np.ndarray([2,3])
>>> l2 = np.ndarray([3,4])
>>> _parse_location(a * b, [l1, l2], None)
{'a': <NDArray 2x3 @cpu(0)>, 'b': <NDArray 3x4 @cpu(0)>}
>>> _parse_location(a * b, {'a': l1, 'b': l2}, None)
{'a': <NDArray 2x3 @cpu(0)>, 'b': <NDArray 3x4 @cpu(0)>}
>>> _parse_location(a * b, {'a': l1}, None)
ValueError: Symbol arguments and keys of the given location do not match.
"""
assert isinstance(location, (dict, list, tuple))
assert dtype == np.float32 or dtype == np.float64
if isinstance(location, dict):
if set(location.keys()) != set(sym.list_arguments()):
raise ValueError("Symbol arguments and keys of the given location do not match."
"symbol args:%s, location.keys():%s"
% (str(set(sym.list_arguments())), str(set(location.keys()))))
else:
location = {k: v for k, v in zip(sym.list_arguments(), location)}
location = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) if isinstance(v, np.ndarray) \
else v for k, v in location.items()}
return location
def _parse_aux_states(sym, aux_states, ctx, dtype=default_dtype()):
"""Parses the given auxiliary states to a dictionary.
Auxiliary states of the provided op `sym` are used as dictionary
keys and elements of `aux_states` are used as values.
Parameters
----------
sym : Symbol
Symbol containing op
aux_states : None or list or dict
Aux states
- if type is list or tuple of `np.ndarray`
inner elements are arrays correspoding to
``sym.list_auxiliary_states()``.
- if type is dict of str -> `np.ndarray`
maps the name of arguments to the corresponding `np.ndarray`.
*In either case, all aux states of `sym` must be provided.*
ctx : Context
Device context.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
Returns
-------
dict
Dictionary with `sym` aux states as keys and `aux_states` elements
as values.
Examples
-------
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> mean_states = np.ones(3)
>>> var_states = np.ones(3)
>>> _parse_aux_states(fc2, [mean_states, var_states], None)
{'batchnorm0_moving_var': <NDArray 3 @cpu(0)>, 'batchnorm0_moving_mean': <NDArray 3 @cpu(0)>}
>>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states,
... 'batchnorm0_moving_mean': var_states}, None)
{'batchnorm0_moving_var': <NDArray 3 @cpu(0)>, 'batchnorm0_moving_mean': <NDArray 3 @cpu(0)>}
>>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states}, None)
ValueError: Symbol aux_states names and given aux_states do not match.
"""
assert dtype == np.float32 or dtype == np.float64
if aux_states is not None:
if isinstance(aux_states, dict):
if set(aux_states.keys()) != set(sym.list_auxiliary_states()):
raise ValueError("Symbol aux_states names and given aux_states do not match."
"symbol aux_names:%s, aux_states.keys:%s"
% (str(set(sym.list_auxiliary_states())),
str(set(aux_states.keys()))))
elif isinstance(aux_states, (list, tuple)):
aux_names = sym.list_auxiliary_states()
aux_states = {k:v for k, v in zip(aux_names, aux_states)}
aux_states = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) for k, v in aux_states.items()}
return aux_states
def numeric_grad(executor, location, aux_states=None, eps=1e-4,
use_forward_train=True, dtype=default_dtype()):
"""Calculates a numeric gradient via finite difference method.
Class based on Theano's `theano.gradient.numeric_grad` [1]
Parameters
----------
executor : Executor
Executor that computes the forward pass.
location : list of numpy.ndarray or dict of str to numpy.ndarray
Argument values used as location to compute gradient
Maps the name of arguments to the corresponding numpy.ndarray.
Value of all the arguments must be provided.
aux_states : None or list of numpy.ndarray or dict of str to numpy.ndarray, optional
Auxiliary states values used as location to compute gradient
Maps the name of aux_states to the corresponding numpy.ndarray.
Value of all the auxiliary arguments must be provided.
eps : float, optional
Epsilon for the finite-difference method.
use_forward_train : bool, optional
Whether to use `is_train=True` in testing.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
References
---------
..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py
"""
def as_stype(var, stype, dtype):
return mx.nd.cast_storage(mx.nd.array(var, dtype=dtype), stype=stype)
assert dtype == np.float32 or dtype == np.float64
approx_grads = {k: np.zeros(v.shape, dtype=dtype)
for k, v in location.items()}
for k, v in location.items():
stype = executor.arg_dict[k].stype
if stype == 'default':
executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)
for k in location:
location[k] = np.ascontiguousarray(location[k])
for k, v in location.items():
if v.dtype.kind != 'f':
continue
stype = executor.arg_dict[k].stype
old_value = v.copy()
for i in range(np.prod(v.shape)):
# inplace update
v.ravel()[i] += eps/2.0
executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)
if aux_states is not None:
for key, val in aux_states.items():
executor.aux_dict[key][:] = val
executor.forward(is_train=use_forward_train)
f_peps = executor.outputs[0].asnumpy()
v.ravel()[i] -= eps
executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)
if aux_states is not None:
for key, val in aux_states.items():
adstype = executor.aux_dict[key].stype
executor.aux_dict[key][:] = as_stype(val, adstype, dtype=dtype)
executor.forward(is_train=use_forward_train)
f_neps = executor.outputs[0].asnumpy()
approx_grad = (f_peps - f_neps).sum() / eps
approx_grads[k].ravel()[i] = approx_grad
v.ravel()[i] = old_value.ravel()[i]
# copy back the original value
executor.arg_dict[k][:] = as_stype(old_value, stype, dtype=dtype)
return approx_grads
def check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-3, rtol=1e-2,
atol=None, grad_nodes=None, use_forward_train=True, ctx=None,
grad_stype_dict=None, dtype=default_dtype()):
"""Verify an operation by checking backward pass via finite difference method.
Based on Theano's `theano.gradient.verify_grad` [1]
Parameters
----------
sym : Symbol
Symbol containing op to test
location : list or tuple or dict
Argument values used as location to compute gradient
- if type is list of numpy.ndarray
inner elements should have the same order as mxnet.sym.list_arguments().
- if type is dict of str -> numpy.ndarray
maps the name of arguments to the corresponding numpy.ndarray.
*In either case, value of all the arguments must be provided.*
aux_states : list or tuple or dict, optional
The auxiliary states required when generating the executor for the symbol.
numeric_eps : float, optional
Delta for the finite difference method that approximates the gradient.
check_eps : float, optional
relative error eps used when comparing numeric grad to symbolic grad.
grad_nodes : None or list or tuple or dict, optional
Names of the nodes to check gradient on
use_forward_train : bool
Whether to use is_train=True when computing the finite-difference.
ctx : Context, optional
Check the gradient computation on the specified device.
grad_stype_dict : dict of str->str, optional
Storage type dictionary for gradient ndarrays.
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
References
---------
..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py
"""
assert dtype == np.float32 or dtype == np.float64
if ctx is None:
ctx = default_context()
def random_projection(shape):
"""Get a random weight matrix with not too small elements
Parameters
----------
shape : list or tuple
"""
# random_projection should not have elements too small,
# otherwise too much precision is lost in numerical gradient
plain = _rng.rand(*shape) + 0.1
return plain
location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)
location_npy = {k:v.asnumpy() for k, v in location.items()}
aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,
dtype=dtype)
if aux_states is not None:
aux_states_npy = {k: v.asnumpy() for k, v in aux_states.items()}
else:
aux_states_npy = None
if grad_nodes is None:
grad_nodes = sym.list_arguments()
grad_req = {k: 'write' for k in grad_nodes}
elif isinstance(grad_nodes, (list, tuple)):
grad_nodes = list(grad_nodes)
grad_req = {k: 'write' for k in grad_nodes}
elif isinstance(grad_nodes, dict):
grad_req = grad_nodes.copy()
grad_nodes = grad_nodes.keys()
else:
raise ValueError
input_shape = {k: v.shape for k, v in location.items()}
_, out_shape, _ = sym.infer_shape(**input_shape)
proj = mx.sym.Variable("__random_proj")
out = sym * proj
out = mx.sym.MakeLoss(out)
location = dict(list(location.items()) +
[("__random_proj", mx.nd.array(random_projection(out_shape[0]),
ctx=ctx, dtype=dtype))])
args_grad_npy = dict([(k, _rng.normal(0, 0.01, size=location[k].shape)) for k in grad_nodes]
+ [("__random_proj", _rng.normal(0, 0.01, size=out_shape[0]))])
args_grad = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) for k, v in args_grad_npy.items()}
if grad_stype_dict is not None:
assert isinstance(grad_stype_dict, dict), "grad_stype_dict must be a dict"
for k, v in grad_stype_dict.items():
if k in args_grad and v in _STORAGE_TYPE_STR_TO_ID and v != 'default':
# create an uninitialized sparse ndarray for executor
# if the symbolic grad is expected to be zero, it should not be initialized at all
args_grad[k] = mx.nd.zeros(args_grad[k].shape, args_grad[k].context,
args_grad[k].dtype, v)
executor = out.bind(ctx, grad_req=grad_req,
args=location, args_grad=args_grad, aux_states=aux_states)
inps = executor.arg_arrays
if len(inps) != len(location):
raise ValueError("Executor arg_arrays and and location len do not match."
"Got %d inputs and %d locations"%(len(inps), len(location)))
assert len(executor.outputs) == 1
executor.forward(is_train=True)
executor.backward()
symbolic_grads = {k:executor.grad_dict[k].asnumpy() for k in grad_nodes}
numeric_gradients = numeric_grad(
executor, location_npy, aux_states_npy,
eps=numeric_eps, use_forward_train=use_forward_train, dtype=dtype)
for name in grad_nodes:
fd_grad = numeric_gradients[name]
orig_grad = args_grad_npy[name]
sym_grad = symbolic_grads[name]
if grad_req[name] == 'write':
assert_almost_equal(fd_grad, sym_grad, rtol, atol,
("NUMERICAL_%s"%name, "BACKWARD_%s"%name))
elif grad_req[name] == 'add':
assert_almost_equal(fd_grad, sym_grad - orig_grad, rtol, atol,
("NUMERICAL_%s"%name, "BACKWARD_%s"%name))
elif grad_req[name] == 'null':
assert_almost_equal(orig_grad, sym_grad, rtol, atol,
("NUMERICAL_%s"%name, "BACKWARD_%s"%name))
else:
raise ValueError("Invalid grad_req %s for argument %s"%(grad_req[name], name))
def check_symbolic_forward(sym, location, expected, rtol=1E-4, atol=None,
aux_states=None, ctx=None, equal_nan=False,
dtype=default_dtype()):
"""Compares a symbol's forward results with the expected ones.
Prints error messages if the forward results are not the same as the expected ones.
Parameters
---------
sym : Symbol
output symbol
location : list of np.ndarray or dict of str to np.ndarray
The evaluation point
- if type is list of np.ndarray
Contains all the numpy arrays corresponding to `sym.list_arguments()`.
- if type is dict of str to np.ndarray
Contains the mapping between argument names and their values.
expected : list of np.ndarray or dict of str to np.ndarray
The expected output value
- if type is list of np.ndarray
Contains arrays corresponding to exe.outputs.
- if type is dict of str to np.ndarray
Contains mapping between sym.list_output() and exe.outputs.
check_eps : float, optional
Relative error to check to.
aux_states : list of np.ndarray of dict, optional
- if type is list of np.ndarray
Contains all the NumPy arrays corresponding to sym.list_auxiliary_states
- if type is dict of str to np.ndarray
Contains the mapping between names of auxiliary states and their values.
ctx : Context, optional
running context
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
equal_nan: Boolean
if True, `nan` is a valid value for checking equivalency (ie `nan` == `nan`)
Example
-------
>>> shape = (2, 2)
>>> lhs = mx.symbol.Variable('lhs')
>>> rhs = mx.symbol.Variable('rhs')
>>> sym_dot = mx.symbol.dot(lhs, rhs)
>>> mat1 = np.array([[1, 2], [3, 4]])
>>> mat2 = np.array([[5, 6], [7, 8]])
>>> ret_expected = np.array([[19, 22], [43, 50]])
>>> check_symbolic_forward(sym_dot, [mat1, mat2], [ret_expected])
"""
assert dtype == np.float32 or dtype == np.float64
if ctx is None:
ctx = default_context()
location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)
aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,
dtype=dtype)
if isinstance(expected, dict):
expected = [expected[k] for k in sym.list_outputs()]
args_grad_data = {k:mx.nd.empty(v.shape, ctx=ctx, dtype=dtype) for k, v in location.items()}
executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data, aux_states=aux_states)
for g in executor.grad_arrays:
g[:] = 0
executor.forward(is_train=False)
outputs = [x.asnumpy() for x in executor.outputs]
for output_name, expect, output in zip(sym.list_outputs(), expected, outputs):
assert_almost_equal(expect, output, rtol, atol,
("EXPECTED_%s"%output_name, "FORWARD_%s"%output_name),
equal_nan=equal_nan)
return executor.outputs
def check_symbolic_backward(sym, location, out_grads, expected, rtol=1e-5, atol=None,
aux_states=None, grad_req='write', ctx=None, grad_stypes=None,
equal_nan=False, dtype=default_dtype()):
"""Compares a symbol's backward results with the expected ones.
Prints error messages if the backward results are not the same as the expected results.
Parameters
---------
sym : Symbol
output symbol
location : list of np.ndarray or dict of str to np.ndarray
The evaluation point
- if type is list of np.ndarray
Contains all the NumPy arrays corresponding to ``mx.sym.list_arguments``.
- if type is dict of str to np.ndarray
Contains the mapping between argument names and their values.
out_grads : None or list of np.ndarray or dict of str to np.ndarray
NumPys arrays corresponding to sym.outputs for incomming gradient.
- if type is list of np.ndarray
Contains arrays corresponding to ``exe.outputs``.
- if type is dict of str to np.ndarray
contains mapping between mxnet.sym.list_output() and Executor.outputs
expected : list of np.ndarray or dict of str to np.ndarray
expected gradient values
- if type is list of np.ndarray
Contains arrays corresponding to exe.grad_arrays
- if type is dict of str to np.ndarray
Contains mapping between ``sym.list_arguments()`` and exe.outputs.
check_eps: float, optional
Relative error to check to.
aux_states : list of np.ndarray or dict of str to np.ndarray
grad_req : str or list of str or dict of str to str, optional
Gradient requirements. 'write', 'add' or 'null'.
ctx : Context, optional
Running context.
grad_stypes: dict of str->str
dictionary of mapping argument name to stype for the gradient
equal_nan: Boolean
if True, `nan` is a valid value for checking equivalency (ie `nan` == `nan`)
dtype: np.float32 or np.float64
Datatype for mx.nd.array.
Example
-------
>>> lhs = mx.symbol.Variable('lhs')
>>> rhs = mx.symbol.Variable('rhs')
>>> sym_add = mx.symbol.elemwise_add(lhs, rhs)
>>> mat1 = np.array([[1, 2], [3, 4]])
>>> mat2 = np.array([[5, 6], [7, 8]])
>>> grad1 = mx.nd.zeros(shape)
>>> grad2 = mx.nd.zeros(shape)
>>> exec_add = sym_add.bind(default_context(), args={'lhs': mat1, 'rhs': mat2},
... args_grad={'lhs': grad1, 'rhs': grad2}, grad_req={'lhs': 'write', 'rhs': 'write'})
>>> exec_add.forward(is_train=True)
>>> ograd = mx.nd.ones(shape)
>>> grad_expected = ograd.copy().asnumpy()
>>> check_symbolic_backward(sym_add, [mat1, mat2], [ograd], [grad_expected, grad_expected])
"""
assert dtype == np.float32 or dtype == np.float64
if ctx is None:
ctx = default_context()
location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)
aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,
dtype=dtype)
if isinstance(expected, (list, tuple)):
expected = {k:v for k, v in zip(sym.list_arguments(), expected)}
args_grad_npy = {k:_rng.normal(size=v.shape) for k, v in expected.items()}
args_grad_data = {}
for k, v in args_grad_npy.items():
nd = mx.nd.array(v, ctx=ctx, dtype=dtype)
if grad_stypes is not None and k in grad_stypes:
stype = grad_stypes[k]
if stype is not None and stype != 'default':
out = create_sparse_array(v.shape, stype, density=0.0)
else:
out = nd
args_grad_data[k] = out
else:
args_grad_data[k] = nd
if isinstance(grad_req, str):
grad_req = {k:grad_req for k in sym.list_arguments()}
elif isinstance(grad_req, (list, tuple)):
grad_req = {k:v for k, v in zip(sym.list_arguments(), grad_req)}
executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data,
aux_states=aux_states, grad_req=grad_req)
executor.forward(is_train=True)
if isinstance(out_grads, (tuple, list)):
outg = list()
for arr in out_grads:
if isinstance(arr, np.ndarray):
outg.append(mx.nd.array(arr, ctx=ctx, dtype=dtype))
else:
outg.append(arr)
out_grads = outg
elif isinstance(out_grads, dict):
outg = dict()
for k, v in out_grads.items():
if isinstance(v, np.ndarray):
outg[k] = mx.nd.array(v, ctx=ctx, dtype=dtype)
else:
outg[k] = v
out_grads = outg
else:
assert out_grads is None
executor.backward(out_grads)
grads = {k: v.asnumpy() for k, v in args_grad_data.items()}
for name in expected:
if grad_req[name] == 'write':
assert_almost_equal(expected[name], grads[name], rtol, atol,
("EXPECTED_%s"%name, "BACKWARD_%s"%name),
equal_nan=equal_nan)
elif grad_req[name] == 'add':
assert_almost_equal(expected[name], grads[name] - args_grad_npy[name],
rtol, atol, ("EXPECTED_%s"%name, "BACKWARD_%s"%name),
equal_nan=equal_nan)
elif grad_req[name] == 'null':
assert_almost_equal(args_grad_npy[name], grads[name],
rtol, atol, ("EXPECTED_%s"%name, "BACKWARD_%s"%name),
equal_nan=equal_nan)
else:
raise ValueError("Invalid grad_req %s for argument %s"%(grad_req[name], name))
return args_grad_data
def check_speed(sym, location=None, ctx=None, N=20, grad_req=None, typ="whole",
**kwargs):
"""Check the running speed of a symbol.
Parameters
----------
sym : Symbol
Symbol to run the speed test.
location : none or dict of str to np.ndarray
Location to evaluate the inner executor.
ctx : Context
Running context.
N : int, optional
Repeat times.
grad_req : None or str or list of str or dict of str to str, optional
Gradient requirements.
typ : str, optional
"whole" or "forward"
- "whole"
Test the forward_backward speed.
- "forward"
Only test the forward speed.
"""
if ctx is None:
ctx = default_context()
if grad_req is None:
grad_req = 'write'
if location is None:
exe = sym.simple_bind(grad_req=grad_req, ctx=ctx, **kwargs)
location = {k: _rng.normal(size=arr.shape, scale=1.0) for k, arr in
exe.arg_dict.items()}
else:
assert isinstance(location, dict), "Expect dict, get \"location\"=%s" %str(location)
exe = sym.simple_bind(grad_req=grad_req, ctx=ctx,
**{k: v.shape for k, v in location.items()})
for name, iarr in location.items():
exe.arg_dict[name][:] = iarr.astype(exe.arg_dict[name].dtype)
if typ == "whole":
# Warm up
exe.forward(is_train=True)
exe.backward(out_grads=exe.outputs)
for output in exe.outputs:
output.wait_to_read()
# Test forward + backward
tic = time.time()
for _ in range(N):
exe.forward(is_train=True)
exe.backward(out_grads=exe.outputs)
mx.nd.waitall()
toc = time.time()
forward_backward_time = (toc - tic) * 1.0 / N
return forward_backward_time
elif typ == "forward":
# Warm up
exe.forward(is_train=False)
for output in exe.outputs:
output.wait_to_read()
# Test forward only
tic = time.time()
for _ in range(N):
exe.forward(is_train=False)
mx.nd.waitall()
toc = time.time()
forward_time = (toc - tic) * 1.0 / N
return forward_time
else:
raise ValueError('typ can only be "whole" or "forward".')
def check_consistency(sym, ctx_list, scale=1.0, grad_req='write',
arg_params=None, aux_params=None, tol=None,
raise_on_err=True, ground_truth=None, equal_nan=False):
"""Check symbol gives the same output for different running context
Parameters
----------
sym : Symbol or list of Symbols
Symbol(s) to run the consistency test.
ctx_list : list
Running context. See example for more detail.
scale : float, optional
Standard deviation of the inner normal distribution. Used in initialization.
grad_req : str or list of str or dict of str to str
Gradient requirement.
Examples
--------
>>> # create the symbol
>>> sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
>>> # initialize the running context
>>> ctx_list =\
[{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},\
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}]
>>> check_consistency(sym, ctx_list)
>>> sym = mx.sym.Concat(name='concat', num_args=2)
>>> ctx_list = \
[{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},\
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},\
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
>>> check_consistency(sym, ctx_list)
"""
if tol is None:
tol = {np.dtype(np.float16): 1e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
elif isinstance(tol, numbers.Number):
tol = {np.dtype(np.float16): tol,
np.dtype(np.float32): tol,
np.dtype(np.float64): tol,
np.dtype(np.uint8): tol,
np.dtype(np.int32): tol}
assert len(ctx_list) > 1
if isinstance(sym, Symbol):
sym = [sym]*len(ctx_list)
else:
assert len(sym) == len(ctx_list)
output_names = sym[0].list_outputs()
arg_names = sym[0].list_arguments()
exe_list = []
for s, ctx in zip(sym, ctx_list):
assert s.list_arguments() == arg_names
assert s.list_outputs() == output_names
exe_list.append(s.simple_bind(grad_req=grad_req, **ctx))
arg_params = {} if arg_params is None else arg_params
aux_params = {} if aux_params is None else aux_params
for n, arr in exe_list[0].arg_dict.items():
if n not in arg_params:
arg_params[n] = np.random.normal(size=arr.shape, scale=scale)
for n, arr in exe_list[0].aux_dict.items():
if n not in aux_params:
aux_params[n] = 0
for exe in exe_list:
for name, arr in exe.arg_dict.items():
arr[:] = arg_params[name]
for name, arr in exe.aux_dict.items():
arr[:] = aux_params[name]
dtypes = [np.dtype(exe.outputs[0].dtype) for exe in exe_list]
max_idx = np.argmax(dtypes)
gt = ground_truth
if gt is None:
gt = exe_list[max_idx].output_dict.copy()
if grad_req != 'null':
gt.update(exe_list[max_idx].grad_dict)
# test
for exe in exe_list:
exe.forward(is_train=False)
for i, exe in enumerate(exe_list):
if i == max_idx:
continue
for name, arr in zip(output_names, exe.outputs):
gtarr = gt[name].astype(dtypes[i]).asnumpy()
arr = arr.asnumpy()
try:
assert_almost_equal(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]],
equal_nan=equal_nan)
except AssertionError as e:
print('Predict Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))
traceback.print_exc()
if raise_on_err:
raise e
else:
print(str(e))
# train
if grad_req != 'null':
for exe in exe_list:
exe.forward(is_train=True)
exe.backward(exe.outputs)
for i, exe in enumerate(exe_list):
if i == max_idx:
continue
curr = zip(output_names + arg_names, exe.outputs + exe.grad_arrays)
for name, arr in curr:
if gt[name] is None:
assert arr is None
continue
gtarr = gt[name].astype(dtypes[i]).asnumpy()
arr = arr.asnumpy()
try:
assert_almost_equal(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]],
equal_nan=equal_nan)
except AssertionError as e:
print('Train Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))
traceback.print_exc()
if raise_on_err:
raise e
else:
print(str(e))
return gt
def list_gpus():
"""Return a list of GPUs
Returns
-------
list of int:
If there are n GPUs, then return a list [0,1,...,n-1]. Otherwise returns
[].
"""
re = ''
nvidia_smi = ['nvidia-smi', '/usr/bin/nvidia-smi', '/usr/local/nvidia/bin/nvidia-smi']
for cmd in nvidia_smi:
try:
re = subprocess.check_output([cmd, "-L"], universal_newlines=True)
except OSError:
pass
return range(len([i for i in re.split('\n') if 'GPU' in i]))
def download(url, fname=None, dirname=None, overwrite=False):
"""Download an given URL
Parameters
----------
url : str
URL to download
fname : str, optional
filename of the downloaded file. If None, then will guess a filename
from url.
dirname : str, optional
output directory name. If None, then guess from fname or use the current
directory
overwrite : bool, optional
Default is false, which means skipping download if the local file
exists. If true, then download the url to overwrite the local file if
exists.
Returns
-------
str
The filename of the downloaded file
"""
if fname is None:
fname = url.split('/')[-1]
if dirname is None:
dirname = os.path.dirname(fname)
else:
fname = os.path.join(dirname, fname)
if dirname != "":
if not os.path.exists(dirname):
try:
logging.info('create directory %s', dirname)
os.makedirs(dirname)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise OSError('failed to create ' + dirname)
if not overwrite and os.path.exists(fname):
logging.info("%s exists, skipping download", fname)
return fname
r = requests.get(url, stream=True)
assert r.status_code == 200, "failed to open %s" % url
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
logging.info("downloaded %s into %s successfully", url, fname)
return fname
def get_mnist():
"""Download and load the MNIST dataset
Returns
-------
dict
A dict containing the data
"""
def read_data(label_url, image_url):
with gzip.open(mx.test_utils.download(label_url)) as flbl:
struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(mx.test_utils.download(image_url), 'rb') as fimg:
_, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32)/255
return (label, image)
# changed to mxnet.io for more stable hosting
# path = 'http://yann.lecun.com/exdb/mnist/'
path = 'http://data.mxnet.io/data/mnist/'
(train_lbl, train_img) = read_data(
path+'train-labels-idx1-ubyte.gz', path+'train-images-idx3-ubyte.gz')
(test_lbl, test_img) = read_data(
path+'t10k-labels-idx1-ubyte.gz', path+'t10k-images-idx3-ubyte.gz')
return {'train_data':train_img, 'train_label':train_lbl,
'test_data':test_img, 'test_label':test_lbl}
def get_bz2_data(data_dir, data_name, url, data_origin_name):
"""Download and extract bz2 data."""
download(url, dirname=data_dir, overwrite=False)
os.chdir(data_dir)
if not os.path.exists(data_name):
bz_file = bz2.BZ2File(data_origin_name, 'rb')
with open(data_name, 'wb') as fout:
try:
content = bz_file.read()
fout.write(content)
finally:
bz_file.close()
os.remove(data_origin_name)
os.chdir("..")
def set_env_var(key, val, default_val=""):
"""Set environment variable
Parameters
----------
key : str
Env var to set
val : str
New value assigned to the env var
default_val : str, optional
Default value returned if the env var doesn't exist
Returns
-------
str
The value of env var before it is set to the new value
"""
prev_val = os.environ.get(key, default_val)
os.environ[key] = val
return prev_val
def same_array(array1, array2):
"""Check whether two NDArrays sharing the same memory block
Parameters
----------
array1 : NDArray
First NDArray to be checked
array2 : NDArray
Second NDArray to be checked
Returns
-------
bool
Whether two NDArrays share the same memory
"""
array1[:] += 1
if not same(array1.asnumpy(), array2.asnumpy()):
array1[:] -= 1
return False
array1[:] -= 1
return same(array1.asnumpy(), array2.asnumpy())
@contextmanager
def discard_stderr():
"""
Discards error output of a routine if invoked as:
with discard_stderr():
...
"""
try:
stderr_fileno = sys.stderr.fileno()
old_stderr = os.dup(stderr_fileno)
bit_bucket = open(os.devnull, 'w')
os.dup2(bit_bucket.fileno(), stderr_fileno)
yield
finally:
os.dup2(old_stderr, stderr_fileno)
bit_bucket.close()
| {
"content_hash": "693b5241005f03a6154b6399aa6446a7",
"timestamp": "",
"source": "github",
"line_count": 1504,
"max_line_length": 100,
"avg_line_length": 38.297872340425535,
"alnum_prop": 0.5834722222222222,
"repo_name": "ShownX/incubator-mxnet",
"id": "bc92257176bd5cc8d6347aa26cc1f63d4f70d95d",
"size": "58386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12255"
},
{
"name": "C",
"bytes": "105566"
},
{
"name": "C++",
"bytes": "4156491"
},
{
"name": "CMake",
"bytes": "54362"
},
{
"name": "Cuda",
"bytes": "755527"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "20406"
},
{
"name": "Jupyter Notebook",
"bytes": "1318817"
},
{
"name": "Makefile",
"bytes": "36228"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "1010152"
},
{
"name": "Perl 6",
"bytes": "48378"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "4254513"
},
{
"name": "R",
"bytes": "342965"
},
{
"name": "Scala",
"bytes": "904254"
},
{
"name": "Shell",
"bytes": "203921"
}
],
"symlink_target": ""
} |
"""Factory method for easily getting imdbs by name."""
__sets = {}
import datasets.pascal_voc
import datasets.nyud2_voc
import numpy as np
def _selective_search_IJCV_top_k(split, year, top_k):
"""Return an imdb that uses the top k proposals from the selective search
IJCV code.
"""
imdb = datasets.pascal_voc(split, year)
imdb.roidb_handler = imdb.selective_search_IJCV_roidb
imdb.config['top_k'] = top_k
return imdb
# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2015']:
for split in ['train', 'val', 'trainval', 'test']:
for typ in ['hha', 'images', 'images+hha']:
name = 'nyud2_{:s}_{:s}_{:s}'.format(typ, year, split)
__sets[name] = (lambda split=split, year=year, typ=typ :
datasets.nyud2_voc(split, year, image_type = typ))
# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year:
datasets.pascal_voc(split, year))
# Set up voc_<year>_<split>_top_<k> using selective search "quality" mode
# but only returning the first k boxes
for top_k in np.arange(1000, 11000, 1000):
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}_top_{:d}'.format(year, split, top_k)
__sets[name] = (lambda split=split, year=year, top_k=top_k:
_selective_search_IJCV_top_k(split, year, top_k))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return __sets.keys()
| {
"content_hash": "c789fc949e76a917dba4bf991dba347b",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 36.80392156862745,
"alnum_prop": 0.6004262120404902,
"repo_name": "s-gupta/fast-rcnn",
"id": "28099ac18616271e0b46ab33fe806c39ea509234",
"size": "2126",
"binary": false,
"copies": "2",
"ref": "refs/heads/distillation",
"path": "lib/datasets/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "56"
},
{
"name": "Matlab",
"bytes": "10660"
},
{
"name": "Python",
"bytes": "103061"
},
{
"name": "Shell",
"bytes": "8303"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.