repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
BirkbeckCTP/janeway
|
src/core/homepage_elements/carousel/plugin_settings.py
|
1
|
2057
|
from django.db.utils import OperationalError
from django.contrib.contenttypes.models import ContentType
PLUGIN_NAME = 'Carousel'
DESCRIPTION = 'This is a homepage element that renders a carousel.'
AUTHOR = 'Martin Paul Eve'
def install():
import core.models as core_models
import journal.models as journal_models
import press.models as press_models
# check whether this homepage element has already been installed for all journals
journals = journal_models.Journal.objects.all()
for journal in journals:
content_type = ContentType.objects.get_for_model(journal)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='carousel_settings',
template_path='journal/homepage_elements/carousel.html',
content_type=content_type,
object_id=journal.pk,
has_config=True,
defaults={'available_to_press': True})
element.save()
presses = press_models.Press.objects.all()
for press in presses:
content_type = ContentType.objects.get_for_model(press)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='carousel_settings',
template_path='journal/homepage_elements/carousel.html',
content_type=content_type,
object_id=press.pk,
has_config=True,
defaults={'available_to_press': True})
element.save()
def hook_registry():
try:
install()
return {
'yield_homepage_element_context': {
'module': 'core.homepage_elements.carousel.hooks',
'function': 'yield_homepage_element_context',
'name': PLUGIN_NAME,
}
}
except OperationalError:
# if we get here the database hasn't yet been created
return {}
except BaseException: # if we get here, well, something has gone very wrong
return {}
|
agpl-3.0
|
NeuralEnsemble/elephant
|
elephant/asset/asset.py
|
2
|
102992
|
# -*- coding: utf-8 -*-
"""
ASSET is a statistical method :cite:`asset-Torre16_e1004939` for the detection
of repeating sequences of synchronous spiking events in parallel spike trains.
ASSET analysis class object of finding patterns
-----------------------------------------------
.. autosummary::
:toctree: _toctree/asset/
ASSET
Patterns post-exploration
-------------------------
.. autosummary::
:toctree: _toctree/asset/
synchronous_events_intersection
synchronous_events_difference
synchronous_events_identical
synchronous_events_no_overlap
synchronous_events_contained_in
synchronous_events_contains_all
synchronous_events_overlap
Tutorial
--------
:doc:`View tutorial <../tutorials/asset>`
Run tutorial interactively:
.. image:: https://mybinder.org/badge.svg
:target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master
?filepath=doc/tutorials/asset.ipynb
Examples
--------
In this example we
* simulate two noisy synfire chains;
* shuffle the neurons to destroy visual appearance;
* run ASSET analysis to recover the original neurons arrangement.
1. Simulate two noise synfire chains, shuffle the neurons to destroy the
pattern visually, and store shuffled activations in neo.SpikeTrains.
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> np.random.seed(10)
>>> spiketrain = np.linspace(0, 50, num=10)
>>> np.random.shuffle(spiketrain)
>>> spiketrains = np.c_[spiketrain, spiketrain + 100]
>>> spiketrains += np.random.random_sample(spiketrains.shape) * 5
>>> spiketrains = [neo.SpikeTrain(st, units='ms', t_stop=1 * pq.s)
... for st in spiketrains]
2. Create `ASSET` class object that holds spike trains.
`ASSET` requires at least one argument - a list of spike trains. If
`spiketrains_y` is not provided, the same spike trains are used to build an
intersection matrix with.
>>> from elephant import asset
>>> asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms)
3. Build the intersection matrix `imat`:
>>> imat = asset_obj.intersection_matrix()
4. Estimate the probability matrix `pmat`, using the analytical method:
>>> pmat = asset_obj.probability_matrix_analytical(imat,
... kernel_width=50*pq.ms)
5. Compute the joint probability matrix `jmat`, using a suitable filter:
>>> jmat = asset_obj.joint_probability_matrix(pmat, filter_shape=(5, 1),
... n_largest=3)
6. Create the masked version of the intersection matrix, `mmat`, from `pmat`
and `jmat`:
>>> mmat = asset_obj.mask_matrices([pmat, jmat], thresholds=.9)
7. Cluster significant elements of imat into diagonal structures:
>>> cmat = asset_obj.cluster_matrix_entries(mmat, max_distance=11,
... min_neighbors=3, stretch=5)
9. Extract sequences of synchronous events:
>>> sses = asset_obj.extract_synchronous_events(cmat)
The ASSET found the following sequences of synchronous events:
>>> sses
{1: {(36, 2): {5},
(37, 4): {1},
(40, 6): {4},
(41, 7): {8},
(43, 9): {2},
(47, 14): {7},
(48, 15): {0},
(50, 17): {9}}}
To visualize them, refer to Viziphant documentation and an example plot
:func:`viziphant.asset.plot_synchronous_events`.
"""
from __future__ import division, print_function, unicode_literals
import math
import os
import subprocess
import sys
import tempfile
import warnings
from pathlib import Path
import neo
import numpy as np
import quantities as pq
import scipy.spatial
import scipy.stats
from sklearn.cluster import dbscan
from sklearn.metrics import pairwise_distances, pairwise_distances_chunked
from tqdm import trange, tqdm
import elephant.conversion as conv
from elephant import spike_train_surrogates
from elephant.utils import get_cuda_capability_major
try:
from mpi4py import MPI
mpi_accelerated = True
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
except ImportError:
mpi_accelerated = False
size = 1
rank = 0
__all__ = [
"ASSET",
"synchronous_events_intersection",
"synchronous_events_difference",
"synchronous_events_identical",
"synchronous_events_no_overlap",
"synchronous_events_contained_in",
"synchronous_events_contains_all",
"synchronous_events_overlap"
]
# =============================================================================
# Some Utility Functions to be dealt with in some way or another
# =============================================================================
def _signals_same_attribute(signals, attr_name):
"""
Check whether a list of signals (`neo.AnalogSignal` or `neo.SpikeTrain`)
have same attribute `attr_name`. If so, return that value. Otherwise,
raise ValueError.
Parameters
----------
signals : list
A list of signals (e.g. `neo.AnalogSignal` or `neo.SpikeTrain`) having
attribute `attr_name`.
Returns
-------
pq.Quantity
The value of the common attribute `attr_name` of the list of signals.
Raises
------
ValueError
If `signals` is an empty list.
If `signals` have different `attr_name` attribute values.
"""
if len(signals) == 0:
raise ValueError('Empty signals list')
attribute = getattr(signals[0], attr_name)
for sig in signals[1:]:
if getattr(sig, attr_name) != attribute:
raise ValueError(
"Signals have different '{}' values".format(attr_name))
return attribute
def _quantities_almost_equal(x, y):
"""
Returns True if two quantities are almost equal, i.e., if `x - y` is
"very close to 0" (not larger than machine precision for floats).
Parameters
----------
x : pq.Quantity
First Quantity to compare.
y : pq.Quantity
Second Quantity to compare. Must have same unit type as `x`, but not
necessarily the same shape. Any shapes of `x` and `y` for which `x - y`
can be calculated are permitted.
Returns
-------
np.ndarray
Array of `bool`, which is True at any position where `x - y` is almost
zero.
Notes
-----
Not the same as `numpy.testing.assert_allclose` (which does not work
with Quantities) and `numpy.testing.assert_almost_equal` (which works only
with decimals)
"""
eps = np.finfo(float).eps
relative_diff = (x - y).magnitude
return np.all([-eps <= relative_diff, relative_diff <= eps], axis=0)
def _transactions(spiketrains, bin_size, t_start, t_stop, ids=None):
"""
Transform parallel spike trains into a list of sublists, called
transactions, each corresponding to a time bin and containing the list
of spikes in `spiketrains` falling into that bin.
To compute each transaction, the spike trains are binned (with adjacent
exclusive binning) and clipped (i.e., spikes from the same train falling
in the same bin are counted as one event). The list of spike IDs within
each bin form the corresponding transaction.
Parameters
----------
spiketrains : list of neo.SpikeTrain or list of tuple
A list of `neo.SpikeTrain` objects, or list of pairs
(Train_ID, `neo.SpikeTrain`), where `Train_ID` can be any hashable
object.
bin_size : pq.Quantity
Width of each time bin. Time is binned to determine synchrony.
t_start : pq.Quantity
The starting time. Only spikes occurring at times `t >= t_start` are
considered. The first transaction contains spikes falling into the
time segment `[t_start, t_start+bin_size]`.
If None, takes the value of `spiketrain.t_start`, common for all
input `spiketrains` (raises ValueError if it's not the case).
Default: None
t_stop : pq.Quantity
The ending time. Only spikes occurring at times `t < t_stop` are
considered.
If None, takes the value of `spiketrain.t_stop`, common for all
input `spiketrains` (raises ValueError if it's not the case).
Default: None
ids : list of int, optional
List of spike train IDs.
If None, the IDs `0` to `N-1` are used, where `N` is the number of
input spike trains.
Default: None
Returns
-------
list of list
A list of transactions, where each transaction corresponds to a time
bin and represents the list of spike train IDs having a spike in that
time bin.
Raises
------
TypeError
If `spiketrains` is not a list of `neo.SpikeTrain` or a list of tuples
(id, `neo.SpikeTrain`).
"""
if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):
trains = spiketrains
if ids is None:
ids = range(len(spiketrains))
else:
# (id, SpikeTrain) pairs
try:
ids, trains = zip(*spiketrains)
except TypeError:
raise TypeError('spiketrains must be either a list of ' +
'SpikeTrains or a list of (id, SpikeTrain) pairs')
# Bin the spike trains and take for each of them the ids of filled bins
binned = conv.BinnedSpikeTrain(
trains, bin_size=bin_size, t_start=t_start, t_stop=t_stop)
filled_bins = binned.spike_indices
# Compute and return the transaction list
return [[train_id for train_id, b in zip(ids, filled_bins)
if bin_id in b] for bin_id in range(binned.n_bins)]
def _analog_signal_step_interp(signal, times):
"""
Compute the step-wise interpolation of a signal at desired times.
Given a signal (e.g. a `neo.AnalogSignal`) `s` taking values `s[t0]` and
`s[t1]` at two consecutive time points `t0` and `t1` (`t0 < t1`), the value
of the step-wise interpolation at time `t: t0 <= t < t1` is given by
`s[t] = s[t0]`.
Parameters
----------
signal : neo.AnalogSignal
The analog signal, containing the discretization of the function to
interpolate.
times : pq.Quantity
A vector of time points at which the step interpolation is computed.
Returns
-------
pq.Quantity
Object with same shape of `times` and containing
the values of the interpolated signal at the time points in `times`.
"""
dt = signal.sampling_period
# Compute the ids of the signal times to the left of each time in times
time_ids = np.floor(
((times - signal.t_start) / dt).rescale(
pq.dimensionless).magnitude).astype('i')
return (signal.magnitude[time_ids] * signal.units).rescale(signal.units)
# =============================================================================
# HERE ASSET STARTS
# =============================================================================
def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None):
r"""
Given a list of points on the real plane, identified by their abscissa `x`
and ordinate `y`, compute a stretched transformation of the Euclidean
distance among each of them.
The classical euclidean distance `d` between points `(x1, y1)` and
`(x2, y2)`, i.e., :math:`\sqrt((x1-x2)^2 + (y1-y2)^2)`, is multiplied by a
factor
.. math::
1 + (stretch - 1.) * \abs(\sin(ref_angle - \theta)),
where :math:`\theta` is the angle between the points and the 45 degree
direction (i.e., the line `y = x`).
The stretching factor thus steadily varies between 1 (if the line
connecting `(x1, y1)` and `(x2, y2)` has inclination `ref_angle`) and
`stretch` (if that line has inclination `90 + ref_angle`).
Parameters
----------
x : (n,) np.ndarray
Array of abscissas of all points among which to compute the distance.
y : (n,) np.ndarray
Array of ordinates of all points among which to compute the distance
(same shape as `x`).
stretch : float
Maximum stretching factor, applied if the line connecting the points
has inclination `90 + ref_angle`.
ref_angle : float
Reference angle in degrees (i.e., the inclination along which the
stretching factor is 1).
Returns
-------
D : (n,n) np.ndarray
Square matrix of distances between all pairs of points.
"""
alpha = np.deg2rad(ref_angle) # reference angle in radians
# Create the array of points (one per row) for which to compute the
# stretched distance
points = np.column_stack([x, y])
x_array = np.expand_dims(x, axis=0)
y_array = np.expand_dims(y, axis=0)
def calculate_stretch_mat(theta_mat, D_mat):
# Transform [-pi, pi] back to [-pi/2, pi/2]
theta_mat[theta_mat < -np.pi / 2] += np.pi
theta_mat[theta_mat > np.pi / 2] -= np.pi
# Compute the matrix of stretching factors for each pair of points.
# Equivalent to:
# stretch_mat = 1 + (stretch - 1.) * np.abs(np.sin(alpha - theta))
_stretch_mat = np.subtract(alpha, theta_mat, out=theta_mat)
_stretch_mat = np.sin(_stretch_mat, out=_stretch_mat)
_stretch_mat = np.abs(_stretch_mat, out=_stretch_mat)
_stretch_mat = np.multiply(stretch - 1, _stretch_mat, out=_stretch_mat)
_stretch_mat = np.add(1, _stretch_mat, out=_stretch_mat)
_stretch_mat = np.multiply(D_mat, _stretch_mat, out=_stretch_mat)
return _stretch_mat
if working_memory is None:
# Compute the matrix D[i, j] of euclidean distances among points
# i and j
D = pairwise_distances(points)
# Compute the angular coefficients of the line between each pair of
# points
# dX[i,j]: x difference between points i and j
# dY[i,j]: y difference between points i and j
dX = x_array.T - x_array
dY = y_array.T - y_array
# Compute the matrix Theta of angles between each pair of points
theta = np.arctan2(dY, dX, dtype=np.float32)
stretch_mat = calculate_stretch_mat(theta, D)
else:
start = 0
# x and y sizes are the same
stretch_mat = np.empty((len(x), len(y)), dtype=np.float32)
for D_chunk in pairwise_distances_chunked(
points, working_memory=working_memory):
chunk_size = D_chunk.shape[0]
dX = x_array[:, start: start + chunk_size].T - x_array
dY = y_array[:, start: start + chunk_size].T - y_array
theta_chunk = np.arctan2(
dY, dX, out=stretch_mat[start: start + chunk_size, :])
# stretch_mat (theta_chunk) is updated in-place here
calculate_stretch_mat(theta_chunk, D_chunk)
start += chunk_size
# Return the stretched distance matrix
return stretch_mat
def _interpolate_signals(signals, sampling_times, verbose=False):
"""
Interpolate signals at given sampling times.
"""
# Reshape all signals to one-dimensional array object (e.g. AnalogSignal)
for i, signal in enumerate(signals):
if signal.ndim == 2:
signals[i] = signal.flatten()
elif signal.ndim > 2:
raise ValueError('elements in fir_rates must have 2 dimensions')
if verbose:
print('create time slices of the rates...')
# Interpolate in the time bins
interpolated_signal = np.vstack([_analog_signal_step_interp(
signal, sampling_times).rescale('Hz').magnitude
for signal in signals]) * pq.Hz
return interpolated_signal
class _GPUBackend:
"""
Parameters
----------
max_chunk_size: int or None, optional
Defines the maximum chunk size used in the `_split_axis` function. The
users typically don't need to set this parameter manually - it's used
to simulate scenarios when the input matrix is so large that it cannot
fit into GPU memory. Setting this parameter manually can resolve GPU
memory errors in case automatic parameters adjustment fails.
Notes
-----
1. PyOpenCL backend takes some time to compile the kernel for the first
time - the caching will affect your benchmarks unless you run each
program twice.
2. Pinned Host Memory.
Host (CPU) data allocations are pageable by default. The GPU cannot
access data directly from pageable host memory, so when a data transfer
from pageable host memory to device memory is invoked, the CUDA driver
must first allocate a temporary page-locked, or "pinned", host array,
copy the host data to the pinned array, and then transfer the data from
the pinned array to device memory, as illustrated at
https://developer.nvidia.com/blog/how-optimize-data-transfers-cuda-cc/
Same for OpenCL. Therefore, Python memory analyzers show increments in
the used RAM each time an OpenCL/CUDA buffer is created. As with any
Python objects, PyOpenCL and PyCUDA clean up and free allocated memory
automatically when garbage collection is executed.
"""
def __init__(self, max_chunk_size=None):
self.max_chunk_size = max_chunk_size
def _choose_backend(self):
# If CUDA is detected, always use CUDA.
# If OpenCL is detected, don't use it by default to avoid the system
# becoming unresponsive until the program terminates.
use_cuda = int(os.getenv("ELEPHANT_USE_CUDA", '1'))
use_opencl = int(os.getenv("ELEPHANT_USE_OPENCL", '1'))
cuda_detected = get_cuda_capability_major() != 0
if use_cuda and cuda_detected:
return self.pycuda
if use_opencl:
return self.pyopencl
return self.cpu
def _split_axis(self, chunk_size, axis_size, min_chunk_size=None):
chunk_size = min(chunk_size, axis_size)
if self.max_chunk_size is not None:
chunk_size = min(chunk_size, self.max_chunk_size)
if min_chunk_size is not None and chunk_size < min_chunk_size:
raise ValueError(f"[GPU not enough memory] Impossible to split "
f"the array into chunks of size at least "
f"{min_chunk_size} to fit into GPU memory")
n_chunks = math.ceil(axis_size / chunk_size)
chunk_size = math.ceil(axis_size / n_chunks) # align in size
if min_chunk_size is not None:
chunk_size = max(chunk_size, min_chunk_size)
split_idx = list(range(0, axis_size, chunk_size))
last_id = split_idx[-1]
last_size = axis_size - last_id # last is the smallest
split_idx = list(zip(split_idx[:-1], split_idx[1:]))
if min_chunk_size is not None and last_size < min_chunk_size:
# Overlap the last chunk with the previous.
# The overlapped part (intersection) will be computed twice.
last_id = axis_size - min_chunk_size
split_idx.append((last_id, axis_size))
return chunk_size, split_idx
class _JSFUniformOrderStat3D(_GPUBackend):
def __init__(self, n, d, precision='float', verbose=False,
cuda_threads=64, cuda_cwr_loops=32, tolerance=1e-5,
max_chunk_size=None):
super().__init__(max_chunk_size=max_chunk_size)
if d > n:
raise ValueError(f"d ({d}) must be less or equal n ({n})")
self.n = n
self.d = d
self.precision = precision
self.verbose = verbose and rank == 0
self.cuda_threads = cuda_threads
self.cuda_cwr_loops = cuda_cwr_loops
self.map_iterations = self._create_iteration_table()
bits = 32 if precision == "float" else 64
self.dtype = np.dtype(f"float{bits}")
self.tolerance = tolerance
@property
def num_iterations(self):
# map_iterations table is populated with element indices, not counts;
# therefore, we add 1
return self.map_iterations[:, -1].sum() + 1
def _create_iteration_table(self):
# do not use numpy arrays - they are limited to uint64
map_iterations = [list(range(self.n))]
for row_id in range(1, self.d):
prev_row = map_iterations[row_id - 1]
curr_row = [0] * (row_id + 1)
for col_id in range(row_id + 1, self.n):
cumsum = prev_row[col_id] + curr_row[-1]
curr_row.append(cumsum)
map_iterations.append(curr_row)
# here we can wrap the resulting array in numpy:
# if at least one item is greater than 2<<63 - 1,
# the data type will be set to 'object'
map_iterations = np.vstack(map_iterations)
return map_iterations
def _combinations_with_replacement(self):
# Generate sequences of {a_i} such that
# a_0 >= a_1 >= ... >= a_(d-1) and
# d-i <= a_i <= n, for each i in [0, d-1].
#
# Almost equivalent to
# list(itertools.combinations_with_replacement(range(n, 0, -1), r=d))
# [::-1]
#
# Example:
# _combinations_with_replacement(n=13, d=3) -->
# (3, 2, 1), (3, 2, 2), (3, 3, 1), ... , (13, 13, 12), (13, 13, 13).
#
# The implementation follows the insertion sort algorithm:
# insert a new element a_i from right to left to keep the reverse
# sorted order. Now substitute increment operation for insert.
if self.d > self.n:
return
if self.d == 1:
for matrix_entry in range(1, self.n + 1):
yield (matrix_entry,)
return
sequence_sorted = list(range(self.d, 0, -1))
input_order = tuple(sequence_sorted) # fixed
while sequence_sorted[0] != self.n + 1:
for last_element in range(1, sequence_sorted[-2] + 1):
sequence_sorted[-1] = last_element
yield tuple(sequence_sorted)
increment_id = self.d - 2
while increment_id > 0 and sequence_sorted[increment_id - 1] == \
sequence_sorted[increment_id]:
increment_id -= 1
sequence_sorted[increment_id + 1:] = input_order[increment_id + 1:]
sequence_sorted[increment_id] += 1
def cpu(self, log_du):
log_1 = np.log(1.)
# Compute the log of the integral's coefficient
logK = np.sum(np.log(np.arange(1, self.n + 1)))
# Add to the 3D matrix u a bottom layer equal to 0 and a
# top layer equal to 1. Then compute the difference du along
# the first dimension.
# prepare arrays for usage inside the loop
di_scratch = np.empty_like(log_du, dtype=np.int32)
log_du_scratch = np.empty_like(log_du)
# precompute log(factorial)s
# pad with a zero to get 0! = 1
log_factorial = np.hstack((0, np.cumsum(np.log(range(1, self.n + 1)))))
# compute the probabilities for each unique row of du
# only loop over the indices and do all du entries at once
# using matrix algebra
# initialise probabilities to 0
P_total = np.zeros(
log_du.shape[0],
dtype=np.float32 if self.precision == 'float' else np.float64
)
for iter_id, matrix_entries in enumerate(
tqdm(self._combinations_with_replacement(),
total=self.num_iterations,
desc="Joint survival function",
disable=not self.verbose)):
# if we are running with MPI
if mpi_accelerated and iter_id % size != rank:
continue
# we only need the differences of the indices:
di = -np.diff((self.n,) + matrix_entries + (0,))
# reshape the matrix to be compatible with du
di_scratch[:, range(len(di))] = di
# use precomputed factorials
sum_log_di_factorial = log_factorial[di].sum()
# Compute for each i,j the contribution to the probability
# given by this step, and add it to the total probability
# Use precomputed log
np.copyto(log_du_scratch, log_du)
# for each a=0,1,...,A-1 and b=0,1,...,B-1, replace du with 1
# whenever di_scratch = 0, so that du ** di_scratch = 1 (this
# avoids nans when both du and di_scratch are 0, and is
# mathematically correct)
log_du_scratch[di_scratch == 0] = log_1
di_log_du = di_scratch * log_du_scratch
sum_di_log_du = di_log_du.sum(axis=1)
logP = sum_di_log_du - sum_log_di_factorial
P_total += np.exp(logP + logK)
if mpi_accelerated:
totals = np.zeros_like(P_total)
# exchange all the results
mpi_float_type = MPI.FLOAT \
if self.precision == 'float' else MPI.DOUBLE
comm.Allreduce(
[P_total, mpi_float_type],
[totals, mpi_float_type],
op=MPI.SUM)
# We need to return the collected totals instead of the local
# P_total
P_total = totals
return P_total
def _compile_template(self, template_name, **kwargs):
from jinja2 import Template
cu_template_path = Path(__file__).parent / template_name
cu_template = Template(cu_template_path.read_text())
asset_cu = cu_template.render(
precision=self.precision,
CWR_LOOPS=self.cuda_cwr_loops,
N=self.n, D=self.d, **kwargs)
return asset_cu
def pyopencl(self, log_du, device_id=0):
import pyopencl as cl
import pyopencl.array as cl_array
self._check_input(log_du)
it_todo = self.num_iterations
u_length = log_du.shape[0]
context = cl.create_some_context(interactive=False)
if self.verbose:
print("Available OpenCL devices:\n", context.devices)
device = context.devices[device_id]
# A queue bounded to the device
queue = cl.CommandQueue(context)
max_l_block = device.local_mem_size // (
self.dtype.itemsize * (self.d + 2))
n_threads = min(self.cuda_threads, max_l_block,
device.max_work_group_size)
if n_threads > 32:
# It's more efficient to make the number of threads
# a multiple of the warp size (32).
n_threads -= n_threads % 32
iteration_table_str = ", ".join(f"{val}LU" for val in
self.map_iterations.flatten())
iteration_table_str = "{%s}" % iteration_table_str
log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))]
logK = log_factorial[-1]
log_factorial_str = ", ".join(f"{val:.10f}" for val in log_factorial)
log_factorial_str = "{%s}" % log_factorial_str
atomic_int = 'int' if self.precision == 'float' else 'long'
# GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default
mem_avail = min(device.max_mem_alloc_size, device.global_mem_size,
1 << 31)
# 4 * (D + 1) * size + 8 * size == mem_avail
chunk_size = mem_avail // (4 * log_du.shape[1] + self.dtype.itemsize)
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=u_length)
P_total = np.empty(u_length, dtype=self.dtype)
P_total_gpu = cl_array.Array(queue, shape=chunk_size, dtype=self.dtype)
for i_start, i_end in split_idx:
log_du_gpu = cl_array.to_device(queue, log_du[i_start: i_end],
async_=True)
P_total_gpu.fill(0, queue=queue)
chunk_size = i_end - i_start
l_block = min(n_threads, chunk_size)
l_num_blocks = math.ceil(chunk_size / l_block)
grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops))
if grid_size > l_num_blocks:
# make grid_size divisible by l_num_blocks
grid_size -= grid_size % l_num_blocks
else:
# grid_size must be at least l_num_blocks
grid_size = l_num_blocks
if self.verbose:
print(f"[Joint prob. matrix] it_todo={it_todo}, "
f"grid_size={grid_size}, L_BLOCK={l_block}, "
f"N_THREADS={n_threads}")
# OpenCL defines unsigned long as uint64, therefore we're adding
# the LU suffix, not LLU, which would indicate unsupported uint128
# data type format.
asset_cl = self._compile_template(
template_name="joint_pmat.cl",
L=f"{chunk_size}LU",
L_BLOCK=l_block,
L_NUM_BLOCKS=l_num_blocks,
ITERATIONS_TODO=f"{it_todo}LU",
logK=f"{logK:.10f}f",
iteration_table=iteration_table_str,
log_factorial=log_factorial_str,
ATOMIC_UINT=f"unsigned {atomic_int}",
ASSET_ENABLE_DOUBLE_SUPPORT=int(self.precision == "double")
)
program = cl.Program(context, asset_cl).build()
# synchronize
cl.enqueue_barrier(queue)
kernel = program.jsf_uniform_orderstat_3d_kernel
kernel(queue, (grid_size,), (n_threads,),
P_total_gpu.data, log_du_gpu.data, g_times_l=True)
P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end])
return P_total
def pycuda(self, log_du):
try:
# PyCuda should not be in requirements-extra because CPU limited
# users won't be able to install Elephant.
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ImportError as err:
raise ImportError(
"Install pycuda with 'pip install pycuda'") from err
self._check_input(log_du)
it_todo = self.num_iterations
u_length = log_du.shape[0]
device = pycuda.autoinit.device
max_l_block = device.MAX_SHARED_MEMORY_PER_BLOCK // (
self.dtype.itemsize * (self.d + 2))
n_threads = min(self.cuda_threads, max_l_block,
device.MAX_THREADS_PER_BLOCK)
if n_threads > device.WARP_SIZE:
# It's more efficient to make the number of threads
# a multiple of the warp size (32).
n_threads -= n_threads % device.WARP_SIZE
log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))]
log_factorial = log_factorial.astype(self.dtype)
logK = log_factorial[-1]
free, total = drv.mem_get_info()
# 4 * (D + 1) * size + 8 * size == mem_avail
chunk_size = free // (4 * log_du.shape[1] + self.dtype.itemsize)
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=u_length)
P_total = np.empty(u_length, dtype=self.dtype)
P_total_gpu = gpuarray.GPUArray(chunk_size, dtype=self.dtype)
log_du_gpu = drv.mem_alloc(4 * chunk_size * log_du.shape[1])
for i_start, i_end in split_idx:
drv.memcpy_htod_async(dest=log_du_gpu, src=log_du[i_start: i_end])
P_total_gpu.fill(0)
chunk_size = i_end - i_start
l_block = min(n_threads, chunk_size)
l_num_blocks = math.ceil(chunk_size / l_block)
grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops))
grid_size = min(grid_size, device.MAX_GRID_DIM_X)
if grid_size > l_num_blocks:
# make grid_size divisible by l_num_blocks
grid_size -= grid_size % l_num_blocks
else:
# grid_size must be at least l_num_blocks
grid_size = l_num_blocks
if self.verbose:
print(f"[Joint prob. matrix] it_todo={it_todo}, "
f"grid_size={grid_size}, L_BLOCK={l_block}, "
f"N_THREADS={n_threads}")
asset_cu = self._compile_template(
template_name="joint_pmat.cu",
L=f"{chunk_size}LLU",
L_BLOCK=l_block,
L_NUM_BLOCKS=l_num_blocks,
ITERATIONS_TODO=f"{it_todo}LLU",
logK=f"{logK:.10f}f",
)
module = SourceModule(asset_cu)
iteration_table_gpu, _ = module.get_global("iteration_table")
iteration_table = self.map_iterations.astype(np.uint64)
drv.memcpy_htod(iteration_table_gpu, iteration_table)
log_factorial_gpu, _ = module.get_global("log_factorial")
drv.memcpy_htod(log_factorial_gpu, log_factorial)
drv.Context.synchronize()
kernel = module.get_function("jsf_uniform_orderstat_3d_kernel")
kernel(P_total_gpu.gpudata, log_du_gpu, grid=(grid_size, 1),
block=(n_threads, 1, 1))
P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end])
return P_total
def _cuda(self, log_du):
# Compile a self-contained joint_pmat_old.cu file and run it
# in a terminal. Having this function is useful to debug ASSET CUDA
# application because it's self-contained and the logic is documented.
# Don't use this backend when the 'log_du' arrays are huge because
# of the disk I/O operations.
# A note to developers: remove this backend in half a year once the
# pycuda backend proves to be stable.
self._check_input(log_du)
asset_cu = self._compile_template(
template_name="joint_pmat_old.cu",
L=f"{log_du.shape[0]}LLU",
N_THREADS=self.cuda_threads,
ITERATIONS_TODO=f"{self.num_iterations}LLU",
ASSET_DEBUG=int(self.verbose)
)
with tempfile.TemporaryDirectory() as asset_tmp_folder:
asset_cu_path = os.path.join(asset_tmp_folder, 'asset.cu')
asset_bin_path = os.path.join(asset_tmp_folder, 'asset.o')
with open(asset_cu_path, 'w') as f:
f.write(asset_cu)
# -O3 optimization flag is for the host code only;
# by default, GPU device code is optimized with -O3.
# -w to ignore warnings.
compile_cmd = ['nvcc', '-w', '-O3', '-o', asset_bin_path,
asset_cu_path]
if self.precision == 'double' and get_cuda_capability_major() >= 6:
# atomicAdd(double) requires compute capability 6.x
compile_cmd.extend(['-arch', 'sm_60'])
compile_status = subprocess.run(
compile_cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.verbose:
print(compile_status.stdout.decode())
print(compile_status.stderr.decode(), file=sys.stderr)
compile_status.check_returncode()
log_du_path = os.path.join(asset_tmp_folder, "log_du.dat")
P_total_path = os.path.join(asset_tmp_folder, "P_total.dat")
with open(log_du_path, 'wb') as f:
log_du.tofile(f)
run_status = subprocess.run(
[asset_bin_path, log_du_path, P_total_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.verbose:
print(run_status.stdout.decode())
print(run_status.stderr.decode(), file=sys.stderr)
run_status.check_returncode()
with open(P_total_path, 'rb') as f:
P_total = np.fromfile(f, dtype=self.dtype)
return P_total
def _check_input(self, log_du):
it_todo = self.num_iterations
if it_todo > np.iinfo(np.uint64).max:
raise ValueError(f"it_todo ({it_todo}) is larger than MAX_UINT64."
" Only Python backend is supported.")
# Don't convert log_du to float32 transparently for the user to avoid
# situations when the user accidentally passes an array with float64.
# Doing so wastes memory for nothing.
if log_du.dtype != np.float32:
raise ValueError("'log_du' must be a float32 array")
if log_du.shape[1] != self.d + 1:
raise ValueError(f"log_du.shape[1] ({log_du.shape[1]}) must be "
f"equal to D+1 ({self.d + 1})")
def compute(self, u):
if u.shape[1] != self.d:
raise ValueError("Invalid input data shape axis 1: expected {}, "
"got {}".format(self.d, u.shape[1]))
# A faster and memory efficient implementation of
# du = np.diff(u, prepend=0, append=1, axis=1).astype(np.float32)
du = np.empty((u.shape[0], u.shape[1] + 1), dtype=np.float32)
du[:, 0] = u[:, 0]
np.subtract(u[:, 1:], u[:, :-1], out=du[:, 1:-1])
np.subtract(1, u[:, -1], out=du[:, -1])
# precompute logarithms
# ignore warnings about infinities, see inside the loop:
# we replace 0 * ln(0) by 1 to get exp(0 * ln(0)) = 0 ** 0 = 1
# the remaining infinities correctly evaluate to
# exp(ln(0)) = exp(-inf) = 0
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
log_du = np.log(du, out=du)
jsf_backend = self._choose_backend()
P_total = jsf_backend(log_du)
# Captures non-finite values like NaN, inf
inside = (P_total > -self.tolerance) & (P_total < 1 + self.tolerance)
outside_vals = P_total[~inside]
if len(outside_vals) > 0:
# A watchdog for unexpected results.
warnings.warn(f"{len(outside_vals)}/{P_total.shape[0]} values of "
"the computed joint prob. matrix lie outside of the "
f"valid [0, 1] interval:\n{outside_vals}\nIf you're "
"using PyOpenCL backend, make sure you've disabled "
"GPU Hangcheck as described here https://"
"software.intel.com/content/www/us/en/develop/"
"documentation/get-started-with-intel-oneapi-"
"base-linux/top/before-you-begin.html\n"
"Clipping the output array to 0 and 1.")
P_total = np.clip(P_total, a_min=0., a_max=1., out=P_total)
return P_total
class _PMatNeighbors(_GPUBackend):
"""
Parameters
----------
filter_shape : tuple of int
A pair of integers representing the kernel shape `(l, w)`.
n_largest : int
The number of largest neighbors to collect for each entry in `mat`.
"""
def __init__(self, filter_shape, n_largest, max_chunk_size=None):
super().__init__(max_chunk_size=max_chunk_size)
self.n_largest = n_largest
self.max_chunk_size = max_chunk_size
filter_size, filter_width = filter_shape
if filter_width >= filter_size:
raise ValueError('filter_shape width must be lower than length')
if not ((filter_width % 2) and (filter_size % 2)):
warnings.warn(
'The kernel is not centered on the datapoint in whose'
'calculation it is used. Consider using odd values'
'for both entries of filter_shape.')
# Construct the kernel
filt = np.ones((filter_size, filter_size), dtype=bool)
filt = np.triu(filt, -filter_width)
filt = np.tril(filt, filter_width)
if n_largest > len(filt.nonzero()[0]):
raise ValueError(f"Too small filter shape {filter_shape} to "
f"select {n_largest} largest elements.")
self.filter_kernel = filt
def _check_input(self, mat):
symmetric = np.all(np.diagonal(mat) == 0.5)
# Check consistent arguments
filter_size = self.filter_kernel.shape[0]
if (symmetric and mat.shape[0] < 2 * filter_size - 1) \
or (not symmetric and min(mat.shape) < filter_size):
raise ValueError(f"'filter_shape' {self.filter_kernel.shape} is "
f"too large for the input matrix of shape "
f"{mat.shape}")
if mat.dtype != np.float32:
raise ValueError("The input matrix dtype must be float32.")
def pyopencl(self, mat):
import pyopencl as cl
import pyopencl.array as cl_array
from jinja2 import Template
context = cl.create_some_context(interactive=False)
device = context.devices[0]
queue = cl.CommandQueue(context)
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
filt_size = self.filter_kernel.shape[0] # filt is a square matrix
filt_rows, filt_cols = self.filter_kernel.nonzero()
filt_rows = "{%s}" % ", ".join(f"{row}U" for row in filt_rows)
filt_cols = "{%s}" % ", ".join(f"{col}U" for col in filt_cols)
lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
if symmetric:
mat = mat[filt_size:]
lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1]
else:
lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1]
# GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default
mem_avail = min(device.max_mem_alloc_size, device.global_mem_size,
1 << 31)
# 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols
chunk_size = (mem_avail // 4 - filt_size * lmat.shape[1]) // (
lmat.shape[1] * (self.n_largest + 1))
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=lmat.shape[0],
min_chunk_size=filt_size)
pmat_cl_path = Path(__file__).parent / "pmat_neighbors.cl"
pmat_cl_template = Template(pmat_cl_path.read_text())
lmat_gpu = cl_array.Array(
queue, shape=(chunk_size, lmat.shape[1], self.n_largest),
dtype=np.float32
)
for i_start, i_end in split_idx:
mat_gpu = cl_array.to_device(queue,
mat[i_start: i_end + filt_size],
async_=True)
lmat_gpu.fill(0, queue=queue)
chunk_size = i_end - i_start
it_todo = chunk_size * (lmat.shape[1] - filt_size + 1)
pmat_neighbors_cl = pmat_cl_template.render(
FILT_SIZE=filt_size,
N_LARGEST=self.n_largest,
PMAT_COLS=f"{lmat.shape[1]}LU",
Y_OFFSET=f"{i_start}LU",
NONZERO_SIZE=self.filter_kernel.sum(),
SYMMETRIC=int(symmetric),
filt_rows=filt_rows,
filt_cols=filt_cols
)
program = cl.Program(context, pmat_neighbors_cl).build()
# synchronize
cl.enqueue_barrier(queue)
kernel = program.pmat_neighbors
# When the grid size is set to the total number of work items to
# execute and the local size is set to None, PyOpenCL chooses the
# number of threads automatically such that the total number of
# work items exactly matches the desired number of iterations.
kernel(queue, (it_todo,), None, lmat_gpu.data, mat_gpu.data)
lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end])
return lmat_padded
def pycuda(self, mat):
from jinja2 import Template
try:
# PyCuda should not be in requirements-extra because CPU limited
# users won't be able to install Elephant.
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
from pycuda.compiler import SourceModule
except ImportError as err:
raise ImportError(
"Install pycuda with 'pip install pycuda'") from err
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
device = pycuda.autoinit.device
n_threads = device.MAX_THREADS_PER_BLOCK
filt_size = self.filter_kernel.shape[0]
filt_rows, filt_cols = self.filter_kernel.nonzero()
lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
if symmetric:
mat = mat[filt_size:]
lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1]
else:
lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1]
free, total = drv.mem_get_info()
# 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols
chunk_size = (free // 4 - filt_size * lmat.shape[1]) // (
lmat.shape[1] * (self.n_largest + 1))
chunk_size, split_idx = self._split_axis(chunk_size=chunk_size,
axis_size=lmat.shape[0],
min_chunk_size=filt_size)
pmat_cu_path = Path(__file__).parent / "pmat_neighbors.cu"
pmat_cu_template = Template(pmat_cu_path.read_text())
lmat_gpu = gpuarray.GPUArray(
(chunk_size, lmat.shape[1], self.n_largest), dtype=np.float32)
mat_gpu = drv.mem_alloc(4 * (chunk_size + filt_size) * mat.shape[1])
for i_start, i_end in split_idx:
drv.memcpy_htod_async(dest=mat_gpu,
src=mat[i_start: i_end + filt_size])
lmat_gpu.fill(0)
chunk_size = i_end - i_start
it_todo = chunk_size * (lmat.shape[1] - filt_size + 1)
pmat_neighbors_cu = pmat_cu_template.render(
FILT_SIZE=filt_size,
N_LARGEST=self.n_largest,
PMAT_COLS=f"{lmat.shape[1]}LLU",
Y_OFFSET=f"{i_start}LLU",
NONZERO_SIZE=self.filter_kernel.sum(),
SYMMETRIC=int(symmetric),
IT_TODO=it_todo,
)
module = SourceModule(pmat_neighbors_cu)
filt_rows_gpu, _ = module.get_global("filt_rows")
drv.memcpy_htod(filt_rows_gpu, filt_rows.astype(np.uint32))
filt_cols_gpu, _ = module.get_global("filt_cols")
drv.memcpy_htod(filt_cols_gpu, filt_cols.astype(np.uint32))
drv.Context.synchronize()
grid_size = math.ceil(it_todo / n_threads)
if grid_size > device.MAX_GRID_DIM_X:
raise ValueError("Cannot launch a CUDA kernel with "
f"{grid_size} num. of blocks. Adjust the "
"'max_chunk_size' parameter.")
kernel = module.get_function("pmat_neighbors")
kernel(lmat_gpu.gpudata, mat_gpu, grid=(grid_size, 1),
block=(n_threads, 1, 1))
lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end])
return lmat_padded
def compute(self, mat):
"""
Build the 3D matrix `L` of largest neighbors of elements in a 2D matrix
`mat`.
For each entry `mat[i, j]`, collects the `n_largest` elements with
largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`,
and assigns them to `L[i, j, :]`.
The zone around `mat[i, j]` where largest neighbors are collected from
is a rectangular area (kernel) of shape `(l, w) = filter_shape`
centered around `mat[i, j]` and aligned along the diagonal.
If `mat` is symmetric, only the triangle below the diagonal is
considered.
Parameters
----------
mat : np.ndarray
A square matrix of real-valued elements.
Returns
-------
lmat : np.ndarray
A matrix of shape `(l, w, n_largest)` containing along the last
dimension `lmat[i, j, :]` the largest neighbors of `mat[i, j]`.
"""
backend = self._choose_backend()
lmat = backend(mat)
return lmat
def cpu(self, mat):
# if the matrix is symmetric the diagonal was set to 0.5
# when computing the probability matrix
symmetric = np.all(np.diagonal(mat) == 0.5)
self._check_input(mat)
filter_size = self.filter_kernel.shape[0]
# Initialize the matrix of d-largest values as a matrix of zeroes
lmat = np.zeros((mat.shape[0], mat.shape[1], self.n_largest),
dtype=np.float32)
N_bin_y = mat.shape[0]
N_bin_x = mat.shape[1]
# if the matrix is symmetric do not use kernel positions intersected
# by the diagonal
if symmetric:
bin_range_y = range(filter_size, N_bin_y - filter_size + 1)
else:
bin_range_y = range(N_bin_y - filter_size + 1)
bin_range_x = range(N_bin_x - filter_size + 1)
# compute matrix of largest values
for y in bin_range_y:
if symmetric:
# x range depends on y position
bin_range_x = range(y - filter_size + 1)
for x in bin_range_x:
patch = mat[y: y + filter_size, x: x + filter_size]
mskd = patch[self.filter_kernel]
largest_vals = np.sort(mskd)[-self.n_largest:]
lmat[y + (filter_size // 2), x + (filter_size // 2), :] = \
largest_vals
return lmat
def synchronous_events_intersection(sse1, sse2, intersection='linkwise'):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of positions `(iK, jK)` of matrix entries and
associated synchronous events `SK`, finds the intersection among them.
The intersection can be performed 'pixelwise' or 'linkwise'.
* if 'pixelwise', it yields a new SSE which retains only events in
`sse1` whose pixel position matches a pixel position in `sse2`. This
operation is not symmetric:
`intersection(sse1, sse2) != intersection(sse2, sse1)`.
* if 'linkwise', an additional step is performed where each retained
synchronous event `SK` in `sse1` is intersected with the
corresponding event in `sse2`. This yields a symmetric operation:
`intersection(sse1, sse2) = intersection(sse2, sse1)`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Each is a dictionary of pixel positions `(i, j)` as keys and sets `S`
of synchronous events as values (see above).
intersection : {'pixelwise', 'linkwise'}, optional
The type of intersection to perform among the two SSEs (see above).
Default: 'linkwise'
Returns
-------
sse_new : dict
A new SSE (same structure as `sse1` and `sse2`) which retains only the
events of `sse1` associated to keys present both in `sse1` and `sse2`.
If `intersection = 'linkwise'`, such events are additionally
intersected with the associated events in `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
sse_new = sse1.copy()
for pixel1 in sse1.keys():
if pixel1 not in sse2.keys():
del sse_new[pixel1]
if intersection == 'linkwise':
for pixel1, link1 in sse_new.items():
sse_new[pixel1] = link1.intersection(sse2[pixel1])
if len(sse_new[pixel1]) == 0:
del sse_new[pixel1]
elif intersection == 'pixelwise':
pass
else:
raise ValueError(
"intersection (=%s) can only be" % intersection +
" 'pixelwise' or 'linkwise'")
return sse_new
def synchronous_events_difference(sse1, sse2, difference='linkwise'):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), computes the difference between `sse1` and `sse2`.
The difference can be performed 'pixelwise' or 'linkwise':
* if 'pixelwise', it yields a new SSE which contains all (and only) the
events in `sse1` whose pixel position doesn't match any pixel in
`sse2`.
* if 'linkwise', for each pixel `(i, j)` in `sse1` and corresponding
synchronous event `S1`, if `(i, j)` is a pixel in `sse2`
corresponding to the event `S2`, it retains the set difference
`S1 - S2`. If `(i, j)` is not a pixel in `sse2`, it retains the full
set `S1`.
Note that in either case the difference is a non-symmetric operation:
`intersection(sse1, sse2) != intersection(sse2, sse1)`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values (see above).
difference : {'pixelwise', 'linkwise'}, optional
The type of difference to perform between `sse1` and `sse2` (see
above).
Default: 'linkwise'
Returns
-------
sse_new : dict
A new SSE (same structure as `sse1` and `sse2`) which retains the
difference between `sse1` and `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
sse_new = sse1.copy()
for pixel1 in sse1.keys():
if pixel1 in sse2.keys():
if difference == 'pixelwise':
del sse_new[pixel1]
elif difference == 'linkwise':
sse_new[pixel1] = sse_new[pixel1].difference(sse2[pixel1])
if len(sse_new[pixel1]) == 0:
del sse_new[pixel1]
else:
raise ValueError(
"difference (=%s) can only be" % difference +
" 'pixelwise' or 'linkwise'")
return sse_new
def _remove_empty_events(sse):
"""
Given a sequence of synchronous events (SSE) `sse` consisting of a pool of
pixel positions and associated synchronous events (see below), returns a
copy of `sse` where all empty events have been removed.
`sse` must be provided as a dictionary of type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse : dict
A dictionary of pixel positions `(i, j)` as keys, and sets `S` of
synchronous events as values (see above).
Returns
-------
sse_new : dict
A copy of `sse` where all empty events have been removed.
"""
sse_new = sse.copy()
for pixel, link in sse.items():
if link == set([]):
del sse_new[pixel]
return sse_new
def synchronous_events_identical(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` is strictly contained in `sse2`.
`sse1` is strictly contained in `sse2` if all its pixels are pixels of
`sse2`,
if its associated events are subsets of the corresponding events
in `sse2`, and if `sse2` contains events, or neuron IDs in some event,
which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical).
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is identical to `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# Return whether sse11 == sse22
return sse11 == sse22
def synchronous_events_no_overlap(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` and `sse2` are disjoint.
Two SSEs are disjoint if they don't share pixels, or if the events
associated to common pixels are disjoint.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is disjoint from `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# If both SSEs are empty, return False (we consider them equal)
if sse11 == {} and sse22 == {}:
return False
common_pixels = set(sse11.keys()).intersection(set(sse22.keys()))
if len(common_pixels) == 0:
return True
if all(sse11[p].isdisjoint(sse22[p]) for p in common_pixels):
return True
return False
def synchronous_events_contained_in(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` is strictly contained in `sse2`.
`sse1` is strictly contained in `sse2` if all its pixels are pixels of
`sse2`, if its associated events are subsets of the corresponding events
in `sse2`, and if `sse2` contains non-empty events, or neuron IDs in some
event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not
identical).
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` is a subset of `sse2`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
# Remove empty links from sse11 and sse22, if any
sse11 = _remove_empty_events(sse1)
sse22 = _remove_empty_events(sse2)
# Return False if sse11 and sse22 are disjoint
if synchronous_events_identical(sse11, sse22):
return False
# Return False if any pixel in sse1 is not contained in sse2, or if any
# link of sse1 is not a subset of the corresponding link in sse2.
# Otherwise (if sse1 is a subset of sse2) continue
for pixel1, link1 in sse11.items():
if pixel1 not in sse22.keys():
return False
if not link1.issubset(sse22[pixel1]):
return False
# Check that sse1 is a STRICT subset of sse2, i.e. that sse2 contains at
# least one pixel or neuron id not present in sse1.
return not synchronous_events_identical(sse11, sse22)
def synchronous_events_contains_all(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether `sse1` strictly contains `sse2`.
`sse1` strictly contains `sse2` if it contains all pixels of `sse2`, if all
associated events in `sse1` contain those in `sse2`, and if `sse1`
additionally contains other pixels / events not contained in `sse2`.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` strictly contains `sse2`.
Notes
-----
`synchronous_events_contains_all(sse1, sse2)` is identical to
`synchronous_events_is_subsequence(sse2, sse1)`.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
return synchronous_events_contained_in(sse2, sse1)
def synchronous_events_overlap(sse1, sse2):
"""
Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each
consisting of a pool of pixel positions and associated synchronous events
(see below), determines whether the two SSEs overlap.
The SSEs overlap if they are not equal and none of them is a superset of
the other one but they are also not disjoint.
Both `sse1` and `sse2` must be provided as dictionaries of the type
.. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
where each `i`, `j` is an integer and each `S` is a set of neuron IDs.
Parameters
----------
sse1, sse2 : dict
Dictionaries of pixel positions `(i, j)` as keys and sets `S` of
synchronous events as values.
Returns
-------
bool
True if `sse1` and `sse2` overlap.
See Also
--------
ASSET.extract_synchronous_events : extract SSEs from given spike trains
"""
contained_in = synchronous_events_contained_in(sse1, sse2)
contains_all = synchronous_events_contains_all(sse1, sse2)
identical = synchronous_events_identical(sse1, sse2)
is_disjoint = synchronous_events_no_overlap(sse1, sse2)
return not (contained_in or contains_all or identical or is_disjoint)
def _signals_t_start_stop(signals, t_start=None, t_stop=None):
if t_start is None:
t_start = _signals_same_attribute(signals, 't_start')
if t_stop is None:
t_stop = _signals_same_attribute(signals, 't_stop')
return t_start, t_stop
def _intersection_matrix(spiketrains, spiketrains_y, bin_size, t_start_x,
t_start_y, t_stop_x, t_stop_y, normalization=None):
if spiketrains_y is None:
spiketrains_y = spiketrains
# Compute the binned spike train matrices, along both time axes
spiketrains_binned = conv.BinnedSpikeTrain(
spiketrains, bin_size=bin_size,
t_start=t_start_x, t_stop=t_stop_x)
spiketrains_binned_y = conv.BinnedSpikeTrain(
spiketrains_y, bin_size=bin_size,
t_start=t_start_y, t_stop=t_stop_y)
# Compute imat by matrix multiplication
bsts_x = spiketrains_binned.sparse_matrix
bsts_y = spiketrains_binned_y.sparse_matrix
# Compute the number of spikes in each bin, for both time axes
# 'A1' property returns self as a flattened ndarray.
spikes_per_bin_x = bsts_x.sum(axis=0).A1
spikes_per_bin_y = bsts_y.sum(axis=0).A1
# Compute the intersection matrix imat
imat = bsts_x.T.dot(bsts_y).toarray().astype(np.float32)
for ii in range(bsts_x.shape[1]):
# Normalize the row
col_sum = bsts_x[:, ii].sum()
if normalization is None or col_sum == 0:
norm_coef = 1.
elif normalization == 'intersection':
norm_coef = np.minimum(
spikes_per_bin_x[ii], spikes_per_bin_y)
elif normalization == 'mean':
# geometric mean
norm_coef = np.sqrt(
spikes_per_bin_x[ii] * spikes_per_bin_y)
elif normalization == 'union':
norm_coef = np.array([(bsts_x[:, ii]
+ bsts_y[:, jj]).count_nonzero()
for jj in range(bsts_y.shape[1])])
else:
raise ValueError(
"Invalid parameter 'norm': {}".format(normalization))
# If normalization required, for each j such that bsts_y[j] is
# identically 0 the code above sets imat[:, j] to identically nan.
# Substitute 0s instead.
imat[ii, :] = np.divide(imat[ii, :], norm_coef,
out=np.zeros(imat.shape[1],
dtype=np.float32),
where=norm_coef != 0)
# Return the intersection matrix and the edges of the bins used for the
# x and y axes, respectively.
return imat
class ASSET(object):
"""
Analysis of Sequences of Synchronous EvenTs class.
Parameters
----------
spiketrains_i, spiketrains_j : list of neo.SpikeTrain
Input spike trains for the first and second time dimensions,
respectively, to compute the p-values from.
If `spiketrains_y` is None, it's set to `spiketrains`.
bin_size : pq.Quantity, optional
The width of the time bins used to compute the probability matrix.
t_start_i, t_start_j : pq.Quantity, optional
The start time of the binning for the first and second axes,
respectively.
If None, the attribute `t_start` of the spike trains is used
(if the same for all spike trains).
Default: None
t_stop_i, t_stop_j : pq.Quantity, optional
The stop time of the binning for the first and second axes,
respectively.
If None, the attribute `t_stop` of the spike trains is used
(if the same for all spike trains).
Default: None
verbose : bool, optional
If True, print messages and show progress bar.
Default: True
Raises
------
ValueError
If the `t_start` & `t_stop` times are not (one of):
perfectly aligned;
fully disjoint.
"""
def __init__(self, spiketrains_i, spiketrains_j=None, bin_size=3 * pq.ms,
t_start_i=None, t_start_j=None, t_stop_i=None, t_stop_j=None,
verbose=True):
self.spiketrains_i = spiketrains_i
if spiketrains_j is None:
spiketrains_j = spiketrains_i
self.spiketrains_j = spiketrains_j
self.bin_size = bin_size
self.t_start_i, self.t_stop_i = _signals_t_start_stop(
spiketrains_i,
t_start=t_start_i,
t_stop=t_stop_i)
self.t_start_j, self.t_stop_j = _signals_t_start_stop(
spiketrains_j,
t_start=t_start_j,
t_stop=t_stop_j)
self.verbose = verbose and rank == 0
msg = 'The time intervals for x and y need to be either identical ' \
'or fully disjoint, but they are:\n' \
'x: ({}, {}) and y: ({}, {}).'.format(self.t_start_i,
self.t_stop_i,
self.t_start_j,
self.t_stop_j)
# the starts have to be perfectly aligned for the binning to work
# the stops can differ without impacting the binning
if self.t_start_i == self.t_start_j:
if not _quantities_almost_equal(self.t_stop_i, self.t_stop_j):
raise ValueError(msg)
elif (self.t_start_i < self.t_start_j < self.t_stop_i) \
or (self.t_start_i < self.t_stop_j < self.t_stop_i):
raise ValueError(msg)
# Compute the binned spike train matrices, along both time axes
self.spiketrains_binned_i = conv.BinnedSpikeTrain(
self.spiketrains_i, bin_size=self.bin_size,
t_start=self.t_start_i, t_stop=self.t_stop_i)
self.spiketrains_binned_j = conv.BinnedSpikeTrain(
self.spiketrains_j, bin_size=self.bin_size,
t_start=self.t_start_j, t_stop=self.t_stop_j)
@property
def x_edges(self):
"""
A Quantity array of `n+1` edges of the bins used for the horizontal
axis of the intersection matrix, where `n` is the number of bins that
time was discretized in.
"""
return self.spiketrains_binned_i.bin_edges.rescale(self.bin_size.units)
@property
def y_edges(self):
"""
A Quantity array of `n+1` edges of the bins used for the vertical axis
of the intersection matrix, where `n` is the number of bins that
time was discretized in.
"""
return self.spiketrains_binned_j.bin_edges.rescale(self.bin_size.units)
def is_symmetric(self):
"""
Returns
-------
bool
Whether the intersection matrix is symmetric or not.
See Also
--------
ASSET.intersection_matrix
"""
return _quantities_almost_equal(self.x_edges[0], self.y_edges[0])
def intersection_matrix(self, normalization=None):
"""
Generates the intersection matrix from a list of spike trains.
Given a list of `neo.SpikeTrain`, consider two binned versions of them
differing for the starting and ending times of the binning:
`t_start_x`, `t_stop_x`, `t_start_y` and `t_stop_y` respectively (the
time intervals can be either identical or completely disjoint). Then
calculate the intersection matrix `M` of the two binned data, where
`M[i,j]` is the overlap of bin `i` in the first binned data and bin `j`
in the second binned data (i.e., the number of spike trains spiking at
both bin `i` and bin `j`).
The matrix entries can be normalized to values between `0` and `1` via
different normalizations (see "Parameters" section).
Parameters
----------
normalization : {'intersection', 'mean', 'union'} or None, optional
The normalization type to be applied to each entry `M[i,j]` of the
intersection matrix `M`. Given the sets `s_i` and `s_j` of neuron
IDs in the bins `i` and `j` respectively, the normalization
coefficient can be:
* None: no normalisation (row counts)
* 'intersection': `len(intersection(s_i, s_j))`
* 'mean': `sqrt(len(s_1) * len(s_2))`
* 'union': `len(union(s_i, s_j))`
Default: None
Returns
-------
imat : (n,n) np.ndarray
The floating point intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
"""
imat = _intersection_matrix(self.spiketrains_i, self.spiketrains_j,
self.bin_size,
self.t_start_i, self.t_start_j,
self.t_stop_i, self.t_stop_j,
normalization=normalization)
return imat
def probability_matrix_montecarlo(self, n_surrogates, imat=None,
surrogate_method='dither_spikes',
surrogate_dt=None):
"""
Given a list of parallel spike trains, estimate the cumulative
probability of each entry in their intersection matrix by a Monte Carlo
approach using surrogate data.
Contrarily to the analytical version (see
:func:`ASSET.probability_matrix_analytical`) the Monte Carlo one does
not incorporate the assumptions of Poissonianity in the null
hypothesis.
The method produces surrogate spike trains (using one of several
methods at disposal, see "Parameters" section) and calculates their
intersection matrix `M`. For each entry `(i, j)`, the intersection CDF
`P[i, j]` is then given by:
.. centered:: P[i, j] = #(spike_train_surrogates such that
M[i, j] < I[i, j]) / #(spike_train_surrogates)
If `P[i, j]` is large (close to 1), `I[i, j]` is statistically
significant: the probability to observe an overlap equal to or larger
than `I[i, j]` under the null hypothesis is `1 - P[i, j]`, very small.
Parameters
----------
n_surrogates : int
The number of spike train surrogates to generate for the bootstrap
procedure.
imat : (n,n) np.ndarray or None, optional
The floating point intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
If None, the output of :func:`ASSET.intersection_matrix` is used.
Default: None
surrogate_method : {'dither_spike_train', 'dither_spikes',
'jitter_spikes',
'randomise_spikes', 'shuffle_isis',
'joint_isi_dithering'}, optional
The method to generate surrogate spike trains. Refer to the
:func:`spike_train_surrogates.surrogates` documentation for more
information about each surrogate method. Note that some of these
methods need `surrogate_dt` parameter, others ignore it.
Default: 'dither_spike_train'
surrogate_dt : pq.Quantity, optional
For surrogate methods shifting spike times randomly around their
original time ('dither_spike_train', 'dither_spikes') or replacing
them randomly within a certain window ('jitter_spikes'),
`surrogate_dt` represents the size of that shift (window). For
other methods, `surrogate_dt` is ignored.
If None, it's set to `self.bin_size * 5`.
Default: None
Returns
-------
pmat : np.ndarray
The cumulative probability matrix. `pmat[i, j]` represents the
estimated probability of having an overlap between bins `i` and `j`
STRICTLY LOWER than the observed overlap, under the null hypothesis
of independence of the input spike trains.
Notes
-----
We recommend playing with `surrogate_dt` parameter to see how it
influences the result matrix. For this, refer to the ASSET tutorial.
See Also
--------
ASSET.probability_matrix_analytical : analytical derivation of the
matrix
"""
if imat is None:
# Compute the intersection matrix of the original data
imat = self.intersection_matrix()
if surrogate_dt is None:
surrogate_dt = self.bin_size * 5
symmetric = self.is_symmetric()
# Generate surrogate spike trains as a list surrs
# Compute the p-value matrix pmat; pmat[i, j] counts the fraction of
# surrogate data whose intersection value at (i, j) is lower than or
# equal to that of the original data
pmat = np.zeros(imat.shape, dtype=np.int32)
for surr_id in trange(n_surrogates, desc="pmat_bootstrap",
disable=not self.verbose):
if mpi_accelerated and surr_id % size != rank:
continue
surrogates = [spike_train_surrogates.surrogates(
st, n_surrogates=1,
method=surrogate_method,
dt=surrogate_dt,
decimals=None,
edges=True)[0]
for st in self.spiketrains_i]
if symmetric:
surrogates_y = surrogates
else:
surrogates_y = [spike_train_surrogates.surrogates(
st, n_surrogates=1, method=surrogate_method,
dt=surrogate_dt, decimals=None, edges=True)[0]
for st in self.spiketrains_j]
imat_surr = _intersection_matrix(surrogates, surrogates_y,
self.bin_size,
self.t_start_i, self.t_start_j,
self.t_stop_i, self.t_stop_j)
pmat += (imat_surr <= (imat - 1))
del imat_surr
if mpi_accelerated:
pmat = comm.allreduce(pmat, op=MPI.SUM)
pmat = pmat * 1. / n_surrogates
if symmetric:
np.fill_diagonal(pmat, 0.5)
return pmat
def probability_matrix_analytical(self, imat=None,
firing_rates_x='estimate',
firing_rates_y='estimate',
kernel_width=100 * pq.ms):
r"""
Given a list of spike trains, approximates the cumulative probability
of each entry in their intersection matrix.
The approximation is analytical and works under the assumptions that
the input spike trains are independent and Poisson. It works as
follows:
* Bin each spike train at the specified `bin_size`: this yields a
binary array of 1s (spike in bin) and 0s (no spike in bin;
clipping used);
* If required, estimate the rate profile of each spike train by
convolving the binned array with a boxcar kernel of user-defined
length;
* For each neuron `k` and each pair of bins `i` and `j`, compute
the probability :math:`p_ijk` that neuron `k` fired in both bins
`i` and `j`.
* Approximate the probability distribution of the intersection
value at `(i, j)` by a Poisson distribution with mean parameter
:math:`l = \sum_k (p_ijk)`,
justified by Le Cam's approximation of a sum of independent
Bernouilli random variables with a Poisson distribution.
Parameters
----------
imat : (n,n) np.ndarray or None, optional
The intersection matrix of a list of spike trains.
It has the shape `(n, n)`, where `n` is the number of bins that
time was discretized in.
If None, the output of :func:`ASSET.intersection_matrix` is used.
Default: None
firing_rates_x, firing_rates_y : list of neo.AnalogSignal or 'estimate'
If a list, `firing_rates[i]` is the firing rate of the spike train
`spiketrains[i]`.
If 'estimate', firing rates are estimated by simple boxcar kernel
convolution, with the specified `kernel_width`.
Default: 'estimate'
kernel_width : pq.Quantity, optional
The total width of the kernel used to estimate the rate profiles
when `firing_rates` is 'estimate'.
Default: 100 * pq.ms
Returns
-------
pmat : np.ndarray
The cumulative probability matrix. `pmat[i, j]` represents the
estimated probability of having an overlap between bins `i` and `j`
STRICTLY LOWER than the observed overlap, under the null hypothesis
of independence of the input spike trains.
"""
if imat is None:
# Compute the intersection matrix of the original data
imat = self.intersection_matrix()
symmetric = self.is_symmetric()
bsts_x_matrix = self.spiketrains_binned_i.to_bool_array()
if symmetric:
bsts_y_matrix = bsts_x_matrix
else:
bsts_y_matrix = self.spiketrains_binned_j.to_bool_array()
# Check that the nr. neurons is identical between the two axes
if bsts_x_matrix.shape[0] != bsts_y_matrix.shape[0]:
raise ValueError(
'Different number of neurons along the x and y axis!')
# Define the firing rate profiles
if firing_rates_x == 'estimate':
# If rates are to be estimated, create the rate profiles as
# Quantity objects obtained by boxcar-kernel convolution
fir_rate_x = self._rate_of_binned_spiketrain(bsts_x_matrix,
kernel_width)
elif isinstance(firing_rates_x, list):
# If rates provided as lists of AnalogSignals, create time slices
# for both axes, interpolate in the time bins of interest and
# convert to Quantity
fir_rate_x = _interpolate_signals(
firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1],
self.verbose)
else:
raise ValueError(
'fir_rates_x must be a list or the string "estimate"')
if symmetric:
fir_rate_y = fir_rate_x
elif firing_rates_y == 'estimate':
fir_rate_y = self._rate_of_binned_spiketrain(bsts_y_matrix,
kernel_width)
elif isinstance(firing_rates_y, list):
# If rates provided as lists of AnalogSignals, create time slices
# for both axes, interpolate in the time bins of interest and
# convert to Quantity
fir_rate_y = _interpolate_signals(
firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1],
self.verbose)
else:
raise ValueError(
'fir_rates_y must be a list or the string "estimate"')
# For each neuron, compute the prob. that that neuron spikes in any bin
if self.verbose:
print('compute the prob. that each neuron fires in each pair of '
'bins...')
rate_bins_x = (fir_rate_x * self.bin_size).simplified.magnitude
spike_probs_x = 1. - np.exp(-rate_bins_x)
if symmetric:
spike_probs_y = spike_probs_x
else:
rate_bins_y = (fir_rate_y * self.bin_size).simplified.magnitude
spike_probs_y = 1. - np.exp(-rate_bins_y)
# Compute the matrix Mu[i, j] of parameters for the Poisson
# distributions which describe, at each (i, j), the approximated
# overlap probability. This matrix is just the sum of the probability
# matrices p_ijk computed for each neuron k:
# p_ijk is the probability that neuron k spikes in both bins i and j.
# The sum of outer products is equivalent to a dot product.
if self.verbose:
print(
"compute the probability matrix by Le Cam's approximation...")
Mu = spike_probs_x.T.dot(spike_probs_y)
# A straightforward implementation is:
# pmat_shape = spike_probs_x.shape[1], spike_probs_y.shape[1]
# Mu = np.zeros(pmat_shape, dtype=np.float64)
# for probx, proby in zip(spike_probs_x, spike_probs_y):
# Mu += np.outer(probx, proby)
# Compute the probability matrix obtained from imat using the Poisson
# pdfs
pmat = scipy.stats.poisson.cdf(imat - 1, Mu)
if symmetric:
# Substitute 0.5 to the elements along the main diagonal
if self.verbose:
print("substitute 0.5 to elements along the main diagonal...")
np.fill_diagonal(pmat, 0.5)
return pmat
def joint_probability_matrix(self, pmat, filter_shape, n_largest,
min_p_value=1e-5, precision='float',
cuda_threads=64, cuda_cwr_loops=32,
tolerance=1e-5):
"""
Map a probability matrix `pmat` to a joint probability matrix `jmat`,
where `jmat[i, j]` is the joint p-value of the largest neighbors of
`pmat[i, j]`.
The values of `pmat` are assumed to be uniformly distributed in the
range [0, 1]. Centered a rectangular kernel of shape
`filter_shape=(l, w)` around each entry `pmat[i, j]`,
aligned along the diagonal where `pmat[i, j]` lies into, extracts the
`n_largest` values falling within the kernel and computes their joint
p-value `jmat[i, j]`.
Parameters
----------
pmat : np.ndarray
A square matrix, the output of
:func:`ASSET.probability_matrix_montecarlo` or
:func:`ASSET.probability_matrix_analytical`, of cumulative
probability values between 0 and 1. The values are assumed
to be uniformly distributed in the said range.
filter_shape : tuple of int
A pair of integers representing the kernel shape `(l, w)`.
n_largest : int
The number of the largest neighbors to collect for each entry in
`jmat`.
min_p_value : float, optional
The minimum p-value in range `[0, 1)` for individual entries in
`pmat`. Each `pmat[i, j]` is set to
`min(pmat[i, j], 1-p_value_min)` to avoid that a single highly
significant value in `pmat` (extreme case: `pmat[i, j] = 1`) yields
joint significance of itself and its neighbors.
Default: 1e-5
<<<<<<< HEAD:elephant/asset.py
=======
precision : {'float', 'double'}, optional
Single or double floating-point precision for the resulting `jmat`
matrix.
* `'float'`: 32 bits; the tolerance error is ``≲1e-3``.
* `'double'`: 64 bits; the tolerance error is ``<1e-5``.
Double floating-point precision is typically x4 times slower than
the single floating-point equivalent.
Default: 'float'
cuda_threads : int, optional
[CUDA/OpenCL performance parameter that does not influence the
result.]
The number of CUDA/OpenCL threads per block (in X axis) between 1
and 1024 and is used only if CUDA or OpenCL backend is enabled.
For performance reasons, it should be a multiple of 32.
Old GPUs (Tesla K80) perform faster with `cuda_threads` larger
than 64 while new series (Tesla T4) with capabilities 6.x and more
work best with 32 threads.
Default: 64
cuda_cwr_loops : int, optional
[CUDA/OpenCL performance parameter that does not influence the
result.]
A positive integer that defines the number of fast
'combinations_with_replacement' loops to run to reduce branch
divergence. This parameter influences the performance when the
number of iterations is huge (`>1e8`); in such cases, increase
the value.
Default: 32
tolerance : float, optional
Tolerance is used to catch unexpected behavior of billions of
floating point additions, when the number of iterations is huge
or the data arrays are large. A warning is thrown when the
resulting joint prob. matrix values are outside of the acceptable
range ``[-tolerance, 1.0 + tolerance]``.
Default: 1e-5
>>>>>>> master:elephant/asset/asset.py
Returns
-------
jmat : np.ndarray
The joint probability matrix associated to `pmat`.
Notes
-----
1. By default, if CUDA is detected, CUDA acceleration is used. CUDA
backend is **~X1000** faster than the Python implementation.
To turn off CUDA features, set the environment flag
``ELEPHANT_USE_CUDA`` to ``0``. Otherwise
2. If PyOpenCL is installed and detected, PyOpenCL backend is used.
PyOpenCL backend is **~X100** faster than the Python implementation.
To turn off OpenCL features, set the environment flag
``ELEPHANT_USE_OPENCL`` to ``0``.
When using PyOpenCL backend, make sure you've disabled GPU Hangcheck
as described in the `Intel GPU developers documentation
<https://software.intel.com/content/www/us/en/develop/
documentation/get-started-with-intel-oneapi-base-linux/top/
before-you-begin.html>`_. Do it with caution - using your built-in
Intel graphics card to perform computations may make the system
unresponsive until the compute program terminates.
"""
l, w = filter_shape
# Find for each P_ij in the probability matrix its neighbors and
# maximize them by the maximum value 1-p_value_min
pmat = np.asarray(pmat, dtype=np.float32)
pmat_neighb_obj = _PMatNeighbors(filter_shape=filter_shape,
n_largest=n_largest)
pmat_neighb = pmat_neighb_obj.compute(pmat)
pmat_neighb = np.minimum(pmat_neighb, 1. - min_p_value,
out=pmat_neighb)
# in order to avoid doing the same calculation multiple times:
# find all unique sets of values in pmat_neighb
# and store the corresponding indices
# flatten the second and third dimension in order to use np.unique
pmat_neighb = pmat_neighb.reshape(pmat.size, n_largest)
pmat_neighb, pmat_neighb_indices = np.unique(pmat_neighb, axis=0,
return_inverse=True)
# Compute the joint p-value matrix jpvmat
n = l * (1 + 2 * w) - w * (
w + 1) # number of entries covered by kernel
jsf = _JSFUniformOrderStat3D(n=n, d=pmat_neighb.shape[1],
precision=precision,
verbose=self.verbose,
cuda_threads=cuda_threads,
cuda_cwr_loops=cuda_cwr_loops,
tolerance=tolerance)
jpvmat = jsf.compute(u=pmat_neighb)
# restore the original shape using the stored indices
jpvmat = jpvmat[pmat_neighb_indices].reshape(pmat.shape)
return 1. - jpvmat
@staticmethod
def mask_matrices(matrices, thresholds):
"""
Given a list of `matrices` and a list of `thresholds`, return a boolean
matrix `B` ("mask") such that `B[i,j]` is True if each input matrix in
the list strictly exceeds the corresponding threshold at that position.
If multiple matrices are passed along with only one threshold the same
threshold is applied to all matrices.
Parameters
----------
matrices : list of np.ndarray
The matrices which are compared to the respective thresholds to
build the mask. All matrices must have the same shape.
Typically, it is a list `[pmat, jmat]`, i.e., the (cumulative)
probability and joint probability matrices.
thresholds : float or list of float
The significance thresholds for each matrix in `matrices`.
Returns
-------
mask : np.ndarray
Boolean mask matrix with the shape of the input matrices.
Raises
------
ValueError
If `matrices` or `thresholds` is an empty list.
If `matrices` and `thresholds` have different lengths.
See Also
--------
ASSET.probability_matrix_montecarlo : for `pmat` generation
ASSET.probability_matrix_analytical : for `pmat` generation
ASSET.joint_probability_matrix : for `jmat` generation
"""
if len(matrices) == 0:
raise ValueError("Empty list of matrices")
if isinstance(thresholds, float):
thresholds = np.full(shape=len(matrices), fill_value=thresholds)
if len(matrices) != len(thresholds):
raise ValueError(
'`matrices` and `thresholds` must have same length')
mask = np.ones_like(matrices[0], dtype=bool)
for (mat, thresh) in zip(matrices, thresholds):
mask &= mat > thresh
# Replace nans, coming from False * np.inf, with zeros
mask[np.isnan(mask)] = False
return mask
@staticmethod
def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors,
stretch, working_memory=None):
r"""
Given a matrix `mask_matrix`, replaces its positive elements with
integers representing different cluster IDs. Each cluster comprises
close-by elements.
In ASSET analysis, `mask_matrix` is a thresholded ("masked") version
of the intersection matrix `imat`, whose values are those of `imat`
only if considered statistically significant, and zero otherwise.
A cluster is built by pooling elements according to their distance,
via the DBSCAN algorithm (see `sklearn.cluster.DBSCAN` class). Elements
form a neighbourhood if at least one of them has a distance not larger
than `max_distance` from the others, and if they are at least
`min_neighbors`. Overlapping neighborhoods form a cluster:
* Clusters are assigned integers from `1` to the total number `k`
of clusters;
* Unclustered ("isolated") positive elements of `mask_matrix` are
assigned value `-1`;
* Non-positive elements are assigned the value `0`.
The distance between the positions of two positive elements in
`mask_matrix` is given by a Euclidean metric which is stretched if the
two positions are not aligned along the 45 degree direction (the main
diagonal direction), as more, with maximal stretching along the
anti-diagonal. Specifically, the Euclidean distance between positions
`(i1, j1)` and `(i2, j2)` is stretched by a factor
.. math::
1 + (\mathtt{stretch} - 1.) *
\left|\sin((\pi / 4) - \theta)\right|,
where :math:`\theta` is the angle between the pixels and the 45 degree
direction. The stretching factor thus varies between 1 and `stretch`.
Parameters
----------
mask_matrix : np.ndarray
The boolean matrix, whose elements with positive values are to be
clustered. The output of :func:`ASSET.mask_matrices`.
max_distance : float
The maximum distance between two elements in `mask_matrix` to be
a part of the same neighbourhood in the DBSCAN algorithm.
min_neighbors : int
The minimum number of elements to form a neighbourhood.
stretch : float
The stretching factor of the euclidean metric for elements aligned
along the 135 degree direction (anti-diagonal). The actual
stretching increases from 1 to `stretch` as the direction of the
two elements moves from the 45 to the 135 degree direction.
`stretch` must be greater than 1.
working_memory : int or None, optional
The sought maximum memory in MiB for temporary distance matrix
chunks. When None (default), no chunking is performed. This
parameter is passed directly to
``sklearn.metrics.pairwise_distances_chunked`` function and it
has no influence on the outcome matrix. Instead, it control the
memory VS speed trade-off.
Default: None
Returns
-------
cluster_mat : np.ndarray
A matrix with the same shape of `mask_matrix`, each of whose
elements is either:
* a positive integer (cluster ID) if the element is part of a
cluster;
* `0` if the corresponding element in `mask_matrix` is
non-positive;
* `-1` if the element does not belong to any cluster.
See Also
--------
sklearn.cluster.DBSCAN
"""
# Don't do anything if mat is identically zero
if np.all(mask_matrix == 0):
return mask_matrix
# List the significant pixels of mat in a 2-columns array
xpos_sgnf, ypos_sgnf = np.where(mask_matrix > 0)
# Compute the matrix D[i, j] of euclidean distances between pixels i
# and j
try:
D = _stretched_metric_2d(
xpos_sgnf, ypos_sgnf, stretch=stretch, ref_angle=45,
working_memory=working_memory
)
except MemoryError as err:
raise MemoryError("Set 'working_memory=100' or another value to "
"chunk the data") from err
# Cluster positions of significant pixels via dbscan
core_samples, config = dbscan(
D, eps=max_distance, min_samples=min_neighbors,
metric='precomputed')
# Construct the clustered matrix, where each element has value
# * i = 1 to k if it belongs to a cluster i,
# * 0 if it is not significant,
# * -1 if it is significant but does not belong to any cluster
cluster_mat = np.zeros_like(mask_matrix, dtype=np.int32)
cluster_mat[xpos_sgnf, ypos_sgnf] = \
config * (config == -1) + (config + 1) * (config >= 0)
return cluster_mat
def extract_synchronous_events(self, cmat, ids=None):
"""
Given a list of spike trains, a bin size, and a clustered
intersection matrix obtained from those spike trains via ASSET
analysis, extracts the sequences of synchronous events (SSEs)
corresponding to clustered elements in the cluster matrix.
Parameters
----------
cmat : (n,n) np.ndarray
The cluster matrix, the output of
:func:`ASSET.cluster_matrix_entries`.
ids : list, optional
A list of spike train IDs. If provided, `ids[i]` is the identity
of `spiketrains[i]`. If None, the IDs `0,1,...,n-1` are used.
Default: None
Returns
-------
sse_dict : dict
A dictionary `D` of SSEs, where each SSE is a sub-dictionary `Dk`,
`k=1,...,K`, where `K` is the max positive integer in `cmat` (i.e.,
the total number of clusters in `cmat`):
.. centered:: D = {1: D1, 2: D2, ..., K: DK}
Each sub-dictionary `Dk` represents the k-th diagonal structure
(i.e., the k-th cluster) in `cmat`, and is of the form
.. centered:: Dk = {(i1, j1): S1, (i2, j2): S2, ..., (iL, jL): SL}.
The keys `(i, j)` represent the positions (time bin IDs) of all
elements in `cmat` that compose the SSE (i.e., that take value `l`
and therefore belong to the same cluster), and the values `Sk` are
sets of neuron IDs representing a repeated synchronous event (i.e.,
spiking at time bins `i` and `j`).
"""
nr_worms = cmat.max() # number of different clusters ("worms") in cmat
if nr_worms <= 0:
return {}
# Compute the transactions associated to the two binnings
tracts_x = _transactions(
self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i,
t_stop=self.t_stop_i,
ids=ids)
if self.spiketrains_j is self.spiketrains_i:
diag_id = 0
tracts_y = tracts_x
else:
if self.is_symmetric():
diag_id = 0
tracts_y = tracts_x
else:
diag_id = None
tracts_y = _transactions(
self.spiketrains_j, bin_size=self.bin_size,
t_start=self.t_start_j, t_stop=self.t_stop_j, ids=ids)
# Reconstruct each worm, link by link
sse_dict = {}
for k in range(1, nr_worms + 1): # for each worm
# worm k is a list of links (each link will be 1 sublist)
worm_k = {}
pos_worm_k = np.array(
np.where(cmat == k)).T # position of all links
# if no link lies on the reference diagonal
if all([y - x != diag_id for (x, y) in pos_worm_k]):
for bin_x, bin_y in pos_worm_k: # for each link
# reconstruct the link
link_l = set(tracts_x[bin_x]).intersection(
tracts_y[bin_y])
# and assign it to its pixel
worm_k[(bin_x, bin_y)] = link_l
sse_dict[k] = worm_k
return sse_dict
def _rate_of_binned_spiketrain(self, binned_spiketrains, kernel_width):
"""
Calculate the rate of binned spiketrains using convolution with
a boxcar kernel.
"""
if self.verbose:
print('compute rates by boxcar-kernel convolution...')
# Create the boxcar kernel and convolve it with the binned spike trains
k = int((kernel_width / self.bin_size).simplified.item())
kernel = np.full(k, fill_value=1. / k)
rate = np.vstack([np.convolve(bst, kernel, mode='same')
for bst in binned_spiketrains])
# The convolution results in an array decreasing at the borders due
# to absence of spikes beyond the borders. Replace the first and last
# (k//2) elements with the (k//2)-th / (n-k//2)-th ones, respectively
k2 = k // 2
for i in range(rate.shape[0]):
rate[i, :k2] = rate[i, k2]
rate[i, -k2:] = rate[i, -k2 - 1]
# Multiply the firing rates by the proper unit
rate = rate * (1. / self.bin_size).rescale('Hz')
return rate
|
bsd-3-clause
|
tronlogistics/micro
|
flask/lib/python3.4/site-packages/pip/commands/show.py
|
344
|
2767
|
import os
from pip.basecommand import Command
from pip.log import logger
from pip._vendor import pkg_resources
class ShowCommand(Command):
"""Show information about one or more installed packages."""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warn('ERROR: Please provide a package name or names.')
return
query = args
results = search_packages_info(query)
print_results(results, options.files)
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed_packages = dict(
[(p.project_name.lower(), p) for p in pkg_resources.working_set])
for name in query:
normalized_name = name.lower()
if normalized_name in installed_packages:
dist = installed_packages[normalized_name]
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
filelist = os.path.join(
dist.location,
dist.egg_name() + '.egg-info',
'installed-files.txt')
if os.path.isfile(filelist):
package['files'] = filelist
yield package
def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
for dist in distributions:
logger.notify("---")
logger.notify("Name: %s" % dist['name'])
logger.notify("Version: %s" % dist['version'])
logger.notify("Location: %s" % dist['location'])
logger.notify("Requires: %s" % ', '.join(dist['requires']))
if list_all_files:
logger.notify("Files:")
if 'files' in dist:
for line in open(dist['files']):
logger.notify(" %s" % line.strip())
else:
logger.notify("Cannot locate installed-files.txt")
|
bsd-3-clause
|
3nids/QGIS
|
tests/src/python/test_qgsdelimitedtextprovider_wanted.py
|
12
|
73024
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsdelimitedtextprovider_wanted.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Chris Crook
Email : ccrook at linz dot govt dot nz
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Chris Crook'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Chris Crook'
def test_002_load_csv_file():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_003_field_naming():
wanted = {}
wanted['uri'] = 'file://testfields.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Generation of field names',
'data': 'Some data',
'field_4': 'Some info',
'data_2': 'NULL',
'28': 'NULL',
'24.5': 'NULL',
'field_3_1': 'NULL',
'data_1': 'NULL',
'field_10': 'NULL',
'field_11': 'NULL',
'field_12': 'last data',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_004_max_fields():
wanted = {}
wanted['uri'] = 'file://testfields.csv?geomType=none&maxFields=7&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Generation of field names',
'data': 'Some data',
'field_4': 'Some info',
'data_1': 'NULL',
'28': 'NULL',
'24.5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_005_load_whitespace():
wanted = {}
wanted['uri'] = 'file://test.space?geomType=none&type=whitespace'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Simple_whitespace_file',
'data': 'data1',
'info': 'info1',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Whitespace_at_start_of_line',
'data': 'data2',
'info': 'info2',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Tab_whitespace',
'data': 'data3',
'info': 'info3',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Multiple_whitespace_characters',
'data': 'data4',
'info': 'info4',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': 'Extra_fields',
'data': 'data5',
'info': 'info5',
'field_5': 'message5',
'field_6': 'rubbish5',
'#fid': 6,
'#geometry': 'None',
},
7: {
'id': '6',
'description': 'Missing_fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 7,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_006_quote_escape():
wanted = {}
wanted['uri'] = 'file://test.pipe?geomType=none"e="&delimiter=|&escape=\\'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Using pipe delimiter',
'data': 'data 1',
'info': 'info 1',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Using backslash escape on pipe',
'data': 'data 2 | piped',
'info': 'info2',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Backslash escaped newline',
'data': 'data3 \nline2 \nline3',
'info': 'info3',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
7: {
'id': '4',
'description': 'Empty field',
'data': 'NULL',
'info': 'info4',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 7,
'#geometry': 'None',
},
8: {
'id': '5',
'description': 'Quoted field',
'data': 'More | piped data',
'info': 'info5',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 8,
'#geometry': 'None',
},
9: {
'id': '6',
'description': 'Escaped quote',
'data': 'Field "citation" ',
'info': 'info6',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '7',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'field_6': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
11: {
'id': '8',
'description': 'Extra fields',
'data': 'data8',
'info': 'info8',
'field_5': 'message8',
'field_6': 'more',
'#fid': 11,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_007_multiple_quote():
wanted = {}
wanted['uri'] = 'file://test.quote?geomType=none"e=\'"&type=csv&escape="\''
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Multiple quotes 1',
'data': 'Quoted,data1',
'info': 'info1',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Multiple quotes 2',
'data': 'Quoted,data2',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Leading and following whitespace',
'data': 'Quoted, data3',
'info': 'info3',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Embedded quotes 1',
'data': 'Quoted \'\'"\'\' data4',
'info': 'info4',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': 'Embedded quotes 2',
'data': 'Quoted \'""\' data5',
'info': 'info5',
'#fid': 6,
'#geometry': 'None',
},
10: {
'id': '9',
'description': 'Final record',
'data': 'date9',
'info': 'info9',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file test.quote',
'3 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 7',
'Invalid record format at line 8',
'Invalid record format at line 9',
]
return wanted
def test_008_badly_formed_quotes():
wanted = {}
wanted['uri'] = 'file://test.badquote?geomType=none"e="&type=csv&escape="'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
4: {
'id': '3',
'description': 'Recovered after unclosed quore',
'data': 'Data ok',
'info': 'inf3',
'#fid': 4,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file test.badquote',
'2 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 2',
'Invalid record format at line 5',
]
return wanted
def test_009_skip_lines():
wanted = {}
wanted['uri'] = 'file://test2.csv?geomType=none&skipLines=2&type=csv&useHeader=no'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '3',
'description': 'Less data',
'field_1': '3',
'field_2': 'Less data',
'field_3': 'data3',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_010_read_coordinates():
wanted = {}
wanted['uri'] = 'file://testpt.csv?yField=geom_y&xField=geom_x&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic point',
'geom_x': '10.5',
'geom_y': '20.82',
'#fid': 2,
'#geometry': 'Point (10.5 20.82)',
},
3: {
'id': '2',
'description': 'Integer point',
'geom_x': '11.0',
'geom_y': '22.0',
'#fid': 3,
'#geometry': 'Point (11 22)',
},
5: {
'id': '4',
'description': 'Final point',
'geom_x': '13.0',
'geom_y': '23.0',
'#fid': 5,
'#geometry': 'Point (13 23)',
},
}
wanted['log'] = [
'Errors in file testpt.csv',
'1 records discarded due to invalid geometry definitions',
'The following lines were not loaded into QGIS due to errors:',
'Invalid X or Y fields at line 4',
]
return wanted
def test_011_read_wkt():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Point wkt',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Multipoint wkt',
'#fid': 3,
'#geometry': 'MultiPoint ((10 20),(11 21))',
},
9: {
'id': '8',
'description': 'EWKT prefix',
'#fid': 9,
'#geometry': 'Point (10 10)',
},
10: {
'id': '9',
'description': 'Informix prefix',
'#fid': 10,
'#geometry': 'Point (10 10)',
},
11: {
'id': '10',
'description': 'Measure in point',
'#fid': 11,
'#geometry': 'PointM (10 20 30)',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'10 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_012_read_wkt_point():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=point&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Point wkt',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Multipoint wkt',
'#fid': 3,
'#geometry': 'MultiPoint ((10 20),(11 21))',
},
9: {
'id': '8',
'description': 'EWKT prefix',
'#fid': 9,
'#geometry': 'Point (10 10)',
},
10: {
'id': '9',
'description': 'Informix prefix',
'#fid': 10,
'#geometry': 'Point (10 10)',
},
11: {
'id': '10',
'description': 'Measure in point',
'#fid': 11,
'#geometry': 'PointM (10 20 30)',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'10 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_013_read_wkt_line():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=line&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
4: {
'id': '3',
'description': 'Linestring wkt',
'#fid': 4,
'#geometry': 'LineString (10 20, 11 21)',
},
5: {
'id': '4',
'description': 'Multiline string wkt',
'#fid': 5,
'#geometry': 'MultiLineString ((10 20, 11 21), (20 30, 21 31))',
},
12: {
'id': '11',
'description': 'Measure in line',
'#fid': 12,
'#geometry': 'LineStringM (10 20 30, 11 21 31)',
},
13: {
'id': '12',
'description': 'Z in line',
'#fid': 13,
'#geometry': 'LineStringZ (10 20 30, 11 21 31)',
},
14: {
'id': '13',
'description': 'Measure and Z in line',
'#fid': 14,
'#geometry': 'LineStringZM (10 20 30 40, 11 21 31 41)',
},
15: {
'id': '14',
'description': 'CircularString',
'#fid': 15,
'#geometry': 'CircularString (268 415, 227 505, 227 406)',
},
17: {
'id': '16',
'description': 'CompoundCurve',
'#fid': 17,
'#geometry': 'CompoundCurve ((5 3, 5 13), CircularString(5 13, 7 15, 9 13), (9 13, 9 3), CircularString(9 3, 7 1, 5 3))',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'8 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_014_read_wkt_polygon():
wanted = {}
wanted['uri'] = 'file://testwkt.csv?geomType=polygon&delimiter=|&type=csv&wktField=geom_wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 2
wanted['data'] = {
6: {
'id': '5',
'description': 'Polygon wkt',
'#fid': 6,
'#geometry': 'Polygon ((10 10,10 20,20 20,20 10,10 10),(14 14,14 16,16 16,14 14))',
},
7: {
'id': '6',
'description': 'MultiPolygon wkt',
'#fid': 7,
'#geometry': 'MultiPolygon (((10 10,10 20,20 20,20 10,10 10),(14 14,14 16,16 16,14 14)),((30 30,30 35,35 35,30 30)))',
},
16: {
'id': '15',
'description': 'CurvePolygon',
'#fid': 16,
'#geometry': 'CurvePolygon (CircularString (1 3, 3 5, 4 7, 7 3, 1 3))',
},
}
wanted['log'] = [
'Errors in file testwkt.csv',
'1 records discarded due to invalid geometry definitions',
'12 records discarded due to incompatible geometry types',
'The following lines were not loaded into QGIS due to errors:',
'Invalid WKT at line 8',
]
return wanted
def test_015_read_dms_xy():
wanted = {}
wanted['uri'] = 'file://testdms.csv?yField=lat&xField=lon&type=csv&xyDms=yes'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
3: {
'id': '1',
'description': 'Basic DMS string',
'lon': '1 5 30.6',
'lat': '35 51 20',
'#fid': 3,
'#geometry': 'Point (1.09183333 35.85555556)',
},
4: {
'id': '2',
'description': 'Basic DMS string 2',
'lon': '1 05 30.6005',
'lat': '035 51 20',
'#fid': 4,
'#geometry': 'Point (1.09183347 35.85555556)',
},
5: {
'id': '3',
'description': 'Basic DMS string 3',
'lon': '1 05 30.6',
'lat': '35 59 9.99',
'#fid': 5,
'#geometry': 'Point (1.09183333 35.98610833)',
},
7: {
'id': '4',
'description': 'Prefix sign 1',
'lon': 'n1 05 30.6',
'lat': 'e035 51 20',
'#fid': 7,
'#geometry': 'Point (1.09183333 35.85555556)',
},
8: {
'id': '5',
'description': 'Prefix sign 2',
'lon': 'N1 05 30.6',
'lat': 'E035 51 20',
'#fid': 8,
'#geometry': 'Point (1.09183333 35.85555556)',
},
9: {
'id': '6',
'description': 'Prefix sign 3',
'lon': 'N 1 05 30.6',
'lat': 'E 035 51 20',
'#fid': 9,
'#geometry': 'Point (1.09183333 35.85555556)',
},
10: {
'id': '7',
'description': 'Prefix sign 4',
'lon': 'S1 05 30.6',
'lat': 'W035 51 20',
'#fid': 10,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
11: {
'id': '8',
'description': 'Prefix sign 5',
'lon': '+1 05 30.6',
'lat': '+035 51 20',
'#fid': 11,
'#geometry': 'Point (1.09183333 35.85555556)',
},
12: {
'id': '9',
'description': 'Prefix sign 6',
'lon': '-1 05 30.6',
'lat': '-035 51 20',
'#fid': 12,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
14: {
'id': '10',
'description': 'Postfix sign 1',
'lon': '1 05 30.6n',
'lat': '035 51 20e',
'#fid': 14,
'#geometry': 'Point (1.09183333 35.85555556)',
},
15: {
'id': '11',
'description': 'Postfix sign 2',
'lon': '1 05 30.6N',
'lat': '035 51 20E',
'#fid': 15,
'#geometry': 'Point (1.09183333 35.85555556)',
},
16: {
'id': '12',
'description': 'Postfix sign 3',
'lon': '1 05 30.6 N',
'lat': '035 51 20 E',
'#fid': 16,
'#geometry': 'Point (1.09183333 35.85555556)',
},
17: {
'id': '13',
'description': 'Postfix sign 4',
'lon': '1 05 30.6S',
'lat': '035 51 20W',
'#fid': 17,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
18: {
'id': '14',
'description': 'Postfix sign 5',
'lon': '1 05 30.6+',
'lat': '035 51 20+',
'#fid': 18,
'#geometry': 'Point (1.09183333 35.85555556)',
},
19: {
'id': '15',
'description': 'Postfix sign 6',
'lon': '1 05 30.6-',
'lat': '035 51 20-',
'#fid': 19,
'#geometry': 'Point (-1.09183333 -35.85555556)',
},
21: {
'id': '16',
'description': 'Leading and trailing blanks 1',
'lon': ' 1 05 30.6',
'lat': '035 51 20 ',
'#fid': 21,
'#geometry': 'Point (1.09183333 35.85555556)',
},
22: {
'id': '17',
'description': 'Leading and trailing blanks 2',
'lon': ' N 1 05 30.6',
'lat': '035 51 20 E ',
'#fid': 22,
'#geometry': 'Point (1.09183333 35.85555556)',
},
24: {
'id': '18',
'description': 'Alternative characters for D,M,S',
'lon': '1d05m30.6s S',
'lat': "35d51'20",
'#fid': 24,
'#geometry': 'Point (-1.09183333 35.85555556)',
},
25: {
'id': '19',
'description': 'Degrees/minutes format',
'lon': '1 05.23',
'lat': '4 55.03',
'#fid': 25,
'#geometry': 'Point (1.08716667 4.91716667)',
},
}
wanted['log'] = [
'Errors in file testdms.csv',
'5 records discarded due to invalid geometry definitions',
'The following lines were not loaded into QGIS due to errors:',
'Invalid X or Y fields at line 27',
'Invalid X or Y fields at line 28',
'Invalid X or Y fields at line 29',
'Invalid X or Y fields at line 30',
'Invalid X or Y fields at line 31',
]
return wanted
def test_016_decimal_point():
wanted = {}
wanted['uri'] = 'file://testdp.csv?yField=geom_y&xField=geom_x&type=csv&delimiter=;&decimalPoint=,'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double', 'double', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Comma as decimal point 1',
'geom_x': '10.0',
'geom_y': '20.0',
'other': '30.0',
'text field': 'Field with , in it',
'#fid': 2,
'#geometry': 'Point (10 20)',
},
3: {
'id': '2',
'description': 'Comma as decimal point 2',
'geom_x': '12.0',
'geom_y': '25.003',
'other': '-38.55',
'text field': 'Plain text field',
'#fid': 3,
'#geometry': 'Point (12 25.003)',
},
}
wanted['log'] = []
return wanted
def test_017_regular_expression_1():
wanted = {}
wanted['uri'] = 'file://testre.txt?geomType=none&trimFields=Y&delimiter=RE(?:GEXP)?&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic regular expression test',
'data': 'data1',
'info': 'info',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Basic regular expression test 2',
'data': 'data2',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_018_regular_expression_2():
wanted = {}
wanted['uri'] = 'file://testre.txt?geomType=none&trimFields=Y&delimiter=(RE)((?:GEXP)?)&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'RE': 'RE',
'GEXP': 'GEXP',
'description': 'RE',
'RE_1': 'RE',
'GEXP_1': 'GEXP',
'data': 'data1',
'RE_2': 'RE',
'GEXP_2': 'GEXP',
'info': 'info',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'RE': 'RE',
'GEXP': 'GEXP',
'description': 'RE',
'RE_1': 'RE',
'GEXP_1': '',
'data': 'data2',
'RE_2': 'RE',
'GEXP_2': '',
'info': 'info2',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_019_regular_expression_3():
wanted = {}
wanted['uri'] = 'file://testre2.txt?geomType=none&trimFields=Y&delimiter=^(.{5})(.{30})(.{5,})&type=regexp'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Anchored regexp',
'information': 'Some data',
'#fid': 2,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Anchored regexp recovered',
'information': 'Some data',
'#fid': 4,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file testre2.txt',
'1 records discarded due to invalid format',
'The following lines were not loaded into QGIS due to errors:',
'Invalid record format at line 3',
]
return wanted
def test_020_regular_expression_4():
wanted = {}
wanted['uri'] = 'file://testre3.txt?geomType=none&delimiter=x?&type=regexp'
wanted['fieldTypes'] = ['text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'f',
'description': 'i',
's': 'f',
'm': 'i',
'a': '.',
'l': '.',
'l_1': 'i',
'field_6': 'l',
'field_7': 'e',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_021_regular_expression_5():
wanted = {}
wanted['uri'] = 'file://testre3.txt?geomType=none&delimiter=\\b&type=regexp'
wanted['fieldTypes'] = ['text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'fi',
'description': '..',
'small': 'fi',
'field_2': '..',
'field_3': 'ile',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_022_utf8_encoded_file():
wanted = {}
wanted['uri'] = 'file://testutf8.csv?geomType=none&delimiter=|&type=csv&encoding=utf-8'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Correctly read UTF8 encoding',
'name': 'Field has \u0101cc\xe8nt\xe9d text',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_023_latin1_encoded_file():
wanted = {}
wanted['uri'] = 'file://testlatin1.csv?geomType=none&delimiter=|&type=csv&encoding=latin1'
wanted['fieldTypes'] = ['integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Correctly read latin1 encoding',
'name': 'This test is \xa9',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_024_filter_rect_xy():
wanted = {}
wanted['uri'] = 'file://testextpt.txt?yField=y&delimiter=|&type=csv&xField=x'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'integer']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
10: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
1002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
1010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_025_filter_rect_wkt():
wanted = {}
wanted['uri'] = 'file://testextw.txt?delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_026_filter_fid():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
1009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
3003: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_027_filter_attributes():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': 'None',
'description': 'Basic unquoted record',
'data': 'None',
'info': 'Some info',
'field_5': 'None',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': 'None',
'description': 'Quoted field',
'data': 'None',
'info': 'Unquoted',
'field_5': 'None',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': 'None',
'description': 'Escaped quotes',
'data': 'None',
'info': 'Unquoted',
'field_5': 'None',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': 'None',
'description': 'Quoted newlines',
'data': 'None',
'info': 'No data',
'field_5': 'None',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': 'None',
'description': 'Missing fields',
'data': 'None',
'info': 'NULL',
'field_5': 'None',
'#fid': 10,
'#geometry': 'None',
},
1009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
2009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
3009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
4009: {
'id': 'None',
'description': 'Extra fields',
'data': 'None',
'info': 'info',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
5009: {
'id': 'None',
'description': 'None',
'data': 'None',
'info': 'None',
'field_5': 'None',
'#fid': 9,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_028_substring_test():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv&subset=id%20%25%202%20%3D%201'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_029_file_watcher():
wanted = {}
wanted['uri'] = 'file://file?geomType=none&type=csv&watchFile=yes'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
3: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
1002: {
'id': '1',
'description': 'rabbit',
'name': 'rabbit',
'#fid': 2,
'#geometry': 'None',
},
1003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
4003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
5004: {
'id': '3',
'description': 'tiger',
'name': 'tiger',
'#fid': 4,
'#geometry': 'None',
},
6002: {
'id': '1',
'description': 'rabbit',
'name': 'rabbit',
'#fid': 2,
'#geometry': 'None',
},
6003: {
'id': '2',
'description': 'pooh',
'name': 'pooh',
'#fid': 3,
'#geometry': 'None',
},
6004: {
'id': '3',
'description': 'tiger',
'name': 'tiger',
'#fid': 4,
'#geometry': 'None',
},
9002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
10002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
10003: {
'id': '6',
'description': 'mole',
'name': 'mole',
'#fid': 3,
'#geometry': 'None',
},
10004: {
'id': '7',
'description': 'badger',
'name': 'badger',
'#fid': 4,
'#geometry': 'None',
},
16002: {
'id': '5',
'description': 'toad',
'name': 'toad',
'#fid': 2,
'#geometry': 'None',
},
}
wanted['log'] = [
'Request 2 did not return any data',
'Request 7 did not return any data',
'Request 11 did not return any data',
'Request 13 did not return any data',
'Request 14 did not return any data',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
'Errors in file temp_file',
'The file has been updated by another application - reloading',
]
return wanted
def test_030_filter_rect_xy_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextpt.txt?spatialIndex=Y&yField=y&delimiter=|&type=csv&xField=x'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'integer']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
10: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
1002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
1010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
3002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
3003: {
'id': '2',
'description': 'Outside 1',
'x': '5',
'y': '35',
'#fid': 3,
'#geometry': 'Point (5 35)',
},
3004: {
'id': '3',
'description': 'Outside 2',
'x': '5',
'y': '55',
'#fid': 4,
'#geometry': 'Point (5 55)',
},
3005: {
'id': '4',
'description': 'Outside 3',
'x': '15',
'y': '55',
'#fid': 5,
'#geometry': 'Point (15 55)',
},
3006: {
'id': '5',
'description': 'Outside 4',
'x': '35',
'y': '55',
'#fid': 6,
'#geometry': 'Point (35 55)',
},
3007: {
'id': '6',
'description': 'Outside 5',
'x': '35',
'y': '45',
'#fid': 7,
'#geometry': 'Point (35 45)',
},
3008: {
'id': '7',
'description': 'Outside 7',
'x': '35',
'y': '25',
'#fid': 8,
'#geometry': 'Point (35 25)',
},
3009: {
'id': '8',
'description': 'Outside 8',
'x': '15',
'y': '25',
'#fid': 9,
'#geometry': 'Point (15 25)',
},
3010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
4002: {
'id': '1',
'description': 'Inside',
'x': '15',
'y': '35',
'#fid': 2,
'#geometry': 'Point (15 35)',
},
4003: {
'id': '2',
'description': 'Outside 1',
'x': '5',
'y': '35',
'#fid': 3,
'#geometry': 'Point (5 35)',
},
4004: {
'id': '3',
'description': 'Outside 2',
'x': '5',
'y': '55',
'#fid': 4,
'#geometry': 'Point (5 55)',
},
4005: {
'id': '4',
'description': 'Outside 3',
'x': '15',
'y': '55',
'#fid': 5,
'#geometry': 'Point (15 55)',
},
4006: {
'id': '5',
'description': 'Outside 4',
'x': '35',
'y': '55',
'#fid': 6,
'#geometry': 'Point (35 55)',
},
4007: {
'id': '6',
'description': 'Outside 5',
'x': '35',
'y': '45',
'#fid': 7,
'#geometry': 'Point (35 45)',
},
4008: {
'id': '7',
'description': 'Outside 7',
'x': '35',
'y': '25',
'#fid': 8,
'#geometry': 'Point (35 25)',
},
4009: {
'id': '8',
'description': 'Outside 8',
'x': '15',
'y': '25',
'#fid': 9,
'#geometry': 'Point (15 25)',
},
4010: {
'id': '9',
'description': 'Inside 2',
'x': '25',
'y': '45',
'#fid': 10,
'#geometry': 'Point (25 45)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_031_filter_rect_wkt_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextw.txt?spatialIndex=Y&delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
3003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
3004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
3005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
3006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
4002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
4004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
4005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
4006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
4007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
}
wanted['log'] = [
'Request 2 did not return any data',
]
return wanted
def test_032_filter_rect_wkt_create_spatial_index():
wanted = {}
wanted['uri'] = 'file://testextw.txt?delimiter=|&type=csv&wktField=wkt'
wanted['fieldTypes'] = ['integer', 'text']
wanted['geometryType'] = 1
wanted['data'] = {
2: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
5: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
1002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
1003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
1004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
1005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
1006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
1007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
3002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
3004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
3005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
3006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
3007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
4002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
4004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
4006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
6002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
6003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
6004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
6005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
6006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
6007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
7002: {
'id': '1',
'description': 'Inside',
'#fid': 2,
'#geometry': 'LineString (12 32, 28 48)',
},
7003: {
'id': '2',
'description': 'Outside',
'#fid': 3,
'#geometry': 'LineString (0 0, 0 10)',
},
7004: {
'id': '3',
'description': 'Crossing',
'#fid': 4,
'#geometry': 'LineString (5 30, 30 55)',
},
7005: {
'id': '4',
'description': 'Bounding box overlap',
'#fid': 5,
'#geometry': 'LineString (5 30, 5 55, 30 55)',
},
7006: {
'id': '5',
'description': 'Crossing 2',
'#fid': 6,
'#geometry': 'LineString (25 35, 35 35)',
},
7007: {
'id': '6',
'description': 'Bounding box overlap 2',
'#fid': 7,
'#geometry': 'LineString (28 29, 31 29, 31 33)',
},
}
wanted['log'] = [
'Request 5 did not return any data',
]
return wanted
def test_033_reset_subset_string():
wanted = {}
wanted['uri'] = 'file://test.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
4: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
9: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
2002: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
2004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
2009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
4010: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
6004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
8002: {
'id': '1',
'description': 'Basic unquoted record',
'data': 'Some data',
'info': 'Some info',
'field_5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
8004: {
'id': '3',
'description': 'Escaped quotes',
'data': 'Quoted "citation" data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
8009: {
'id': '5',
'description': 'Extra fields',
'data': 'data',
'info': 'info',
'field_5': 'message',
'#fid': 9,
'#geometry': 'None',
},
10003: {
'id': '2',
'description': 'Quoted field',
'data': 'Quoted data',
'info': 'Unquoted',
'field_5': 'NULL',
'#fid': 3,
'#geometry': 'None',
},
10005: {
'id': '4',
'description': 'Quoted newlines',
'data': 'Line 1\nLine 2\n\nLine 4',
'info': 'No data',
'field_5': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
10010: {
'id': '6',
'description': 'Missing fields',
'data': 'NULL',
'info': 'NULL',
'field_5': 'NULL',
'#fid': 10,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_034_csvt_file():
wanted = {}
wanted['uri'] = 'file://testcsvt.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'text', 'text', 'text', 'text', 'text', 'text', 'longlong', 'longlong']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'fint': '1',
'freal': '1.2',
'fstr': '1',
'fstr_1': 'text',
'fdatetime': '2015-03-02T12:30:00',
'fdate': '2014-12-30',
'ftime': '23:55',
'flong': '-456',
'flonglong': '-678',
'field_12': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'fint': '3',
'freal': '1.5',
'fstr': '99',
'fstr_1': '23.5',
'fdatetime': '80',
'fdate': '2015-03-28',
'ftime': '2014-12-30',
'flong': '01:55',
'flonglong': '9189304972279762602',
'field_12': '-3123724580211819352',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_035_csvt_file2():
wanted = {}
wanted['uri'] = 'file://testcsvt2.txt?geomType=none&delimiter=|&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'integer']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_036_csvt_file_invalid_types():
wanted = {}
wanted['uri'] = 'file://testcsvt3.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'times',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
'Errors in file testcsvt3.csv',
'File type string in testcsvt3.csvt is not correctly formatted',
]
return wanted
def test_037_csvt_file_invalid_file():
wanted = {}
wanted['uri'] = 'file://testcsvt4.csv?geomType=none&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'integer', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'f1': '1',
'f2': '1.2',
'f3': '1',
'f4': 'text',
'f5': 'times',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'f1': '3',
'f2': '1.5',
'f3': '99',
'f4': '23.5',
'f5': '80',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = []
return wanted
def test_038_type_inference():
wanted = {}
wanted['uri'] = 'file://testtypes.csv?yField=lat&xField=lon&type=csv'
wanted['fieldTypes'] = ['text', 'double', 'double', 'text', 'text', 'integer', 'longlong', 'double', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': 'line1',
'description': '1.0',
'lon': '1.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': '0',
'longlong': '0',
'real': 'NULL',
'text2': '1',
'#fid': 2,
'#geometry': 'Point (1 1)',
},
3: {
'id': 'line2',
'description': '1.0',
'lon': '1.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1',
'int': 'NULL',
'longlong': '9189304972279762602',
'real': '1.3',
'text2': '-4',
'#fid': 3,
'#geometry': 'Point (1 5)',
},
4: {
'id': 'line3',
'description': '5.0',
'lon': '5.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1xx',
'int': '2',
'longlong': '345',
'real': '2.0',
'text2': '1x',
'#fid': 4,
'#geometry': 'Point (5 5)',
},
5: {
'id': 'line4',
'description': '5.0',
'lon': '5.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'A string',
'int': '-3456',
'longlong': '-3123724580211819352',
'real': '-123.56',
'text2': 'NULL',
'#fid': 5,
'#geometry': 'Point (5 1)',
},
6: {
'id': 'line5',
'description': '3.0',
'lon': '3.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': 'NULL',
'longlong': 'NULL',
'real': '0.00023',
'text2': '23',
'#fid': 6,
'#geometry': 'Point (3 1)',
},
7: {
'id': 'line6',
'description': '1.0',
'lon': '1.0',
'lat': '3.0',
'empty': 'NULL',
'text': '1.5',
'int': '9',
'longlong': '42',
'real': '99.0',
'text2': '0',
'#fid': 7,
'#geometry': 'Point (1 3)',
},
}
wanted['log'] = []
return wanted
def test_039_issue_13749():
wanted = {}
wanted['uri'] = 'file://test13749.csv?yField=geom_y&xField=geom_x&type=csv'
wanted['fieldTypes'] = ['integer', 'text', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': 'No geom',
'geom_x': 'NULL',
'geom_y': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Point1',
'geom_x': '11.0',
'geom_y': '22.0',
'#fid': 3,
'#geometry': 'Point (11 22)',
},
4: {
'id': '3',
'description': 'Point2',
'geom_x': '15.0',
'geom_y': '23.0',
'#fid': 4,
'#geometry': 'Point (15 23)',
},
5: {
'id': '4',
'description': 'Point3',
'geom_x': '13.0',
'geom_y': '23.0',
'#fid': 5,
'#geometry': 'Point (13 23)',
},
}
wanted['log'] = [
'Errors in file test13749.csv',
'1 records have missing geometry definitions',
]
return wanted
def test_040_issue_14666():
wanted = {}
wanted['uri'] = 'file://test14666.csv?yField=y&xField=x&type=csv&delimiter=\\t'
wanted['fieldTypes'] = ['integer', 'double', 'double']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': '1',
'description': '7.15417',
'x': '7.15417',
'y': '50.680622',
'#fid': 2,
'#geometry': 'Point (7.1541699999999997 50.68062199999999962)',
},
3: {
'id': '2',
'description': '7.119219',
'x': '7.119219',
'y': '50.739814',
'#fid': 3,
'#geometry': 'Point (7.11921900000000019 50.73981400000000264)',
},
4: {
'id': '3',
'description': 'NULL',
'x': 'NULL',
'y': 'NULL',
'#fid': 4,
'#geometry': 'None',
},
5: {
'id': '4',
'description': 'NULL',
'x': 'NULL',
'y': 'NULL',
'#fid': 5,
'#geometry': 'None',
},
6: {
'id': '5',
'description': '7.129229',
'x': '7.129229',
'y': '50.703692',
'#fid': 6,
'#geometry': 'Point (7.12922899999999959 50.70369199999999665)',
},
}
wanted['log'] = [
'Errors in file test14666.csv',
'2 records have missing geometry definitions',
]
return wanted
def test_041_no_detect_type():
wanted = {}
wanted['uri'] = 'file://testtypes.csv?yField=lat&xField=lon&type=csv&detectTypes=no'
wanted['fieldTypes'] = ['text', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 0
wanted['data'] = {
2: {
'id': 'line1',
'description': '1.0',
'lon': '1.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': '0',
'longlong': '0',
'real': 'NULL',
'text2': '1',
'#fid': 2,
'#geometry': 'Point (1 1)',
},
3: {
'id': 'line2',
'description': '1.0',
'lon': '1.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1',
'int': 'NULL',
'longlong': '9189304972279762602',
'real': '1.3',
'text2': '-4',
'#fid': 3,
'#geometry': 'Point (1 5)',
},
4: {
'id': 'line3',
'description': '5.0',
'lon': '5.0',
'lat': '5.0',
'empty': 'NULL',
'text': '1xx',
'int': '2',
'longlong': '345',
'real': '2',
'text2': '1x',
'#fid': 4,
'#geometry': 'Point (5 5)',
},
5: {
'id': 'line4',
'description': '5.0',
'lon': '5.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'A string',
'int': '-3456',
'longlong': '-3123724580211819352',
'real': '-123.56',
'text2': 'NULL',
'#fid': 5,
'#geometry': 'Point (5 1)',
},
6: {
'id': 'line5',
'description': '3.0',
'lon': '3.0',
'lat': '1.0',
'empty': 'NULL',
'text': 'NULL',
'int': 'NULL',
'longlong': 'NULL',
'real': '23e-5',
'text2': '23',
'#fid': 6,
'#geometry': 'Point (3 1)',
},
7: {
'id': 'line6',
'description': '1.0',
'lon': '1.0',
'lat': '3.0',
'empty': 'NULL',
'text': '1.5',
'int': '9',
'longlong': '42',
'real': '99',
'text2': '0',
'#fid': 7,
'#geometry': 'Point (1 3)',
},
}
wanted['log'] = [
]
return wanted
def test_042_no_detect_types_csvt():
wanted = {}
wanted['uri'] = 'file://testcsvt.csv?geomType=none&type=csv&detectTypes=no'
wanted['fieldTypes'] = ['integer', 'text', 'integer', 'double', 'text', 'text', 'text', 'text', 'text', 'text', 'text', 'text']
wanted['geometryType'] = 4
wanted['data'] = {
2: {
'id': '1',
'description': 'Test csvt 1',
'fint': '1',
'freal': '1.2',
'fstr': '1',
'fstr_1': 'text',
'fdatetime': '2015-03-02T12:30:00',
'fdate': '2014-12-30',
'ftime': '23:55',
'flong': '-456',
'flonglong': '-678',
'field_12': 'NULL',
'#fid': 2,
'#geometry': 'None',
},
3: {
'id': '2',
'description': 'Test csvt 2',
'fint': '3',
'freal': '1.5',
'fstr': '99',
'fstr_1': '23.5',
'fdatetime': '80',
'fdate': '2015-03-28',
'ftime': '2014-12-30',
'flong': '01:55',
'flonglong': '9189304972279762602',
'field_12': '-3123724580211819352',
'#fid': 3,
'#geometry': 'None',
},
}
wanted['log'] = [
]
return wanted
|
gpl-2.0
|
zubair-arbi/edx-platform
|
lms/envs/aws_migrate.py
|
288
|
1256
|
"""
A Django settings file for use on AWS while running
database migrations, since we don't want to normally run the
LMS with enough privileges to modify the database schema.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Import everything from .aws so that our settings are based on those.
from .aws import *
import os
from django.core.exceptions import ImproperlyConfigured
DB_OVERRIDES = dict(
PASSWORD=os.environ.get('DB_MIGRATION_PASS', None),
ENGINE=os.environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=os.environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=os.environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=os.environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=os.environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
if DB_OVERRIDES['PASSWORD'] is None:
raise ImproperlyConfigured("No database password was provided for running "
"migrations. This is fatal.")
for override, value in DB_OVERRIDES.iteritems():
DATABASES['default'][override] = value
|
agpl-3.0
|
kennedyshead/home-assistant
|
homeassistant/components/websocket_api/http.py
|
2
|
8634
|
"""View to accept incoming websocket connection."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import datetime as dt
import logging
from typing import Any, Final
from aiohttp import WSMsgType, web
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from .auth import AuthPhase, auth_required_message
from .const import (
CANCELLATION_ERRORS,
DATA_CONNECTIONS,
MAX_PENDING_MSG,
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
URL,
)
from .error import Disconnect
from .messages import message_to_json
_WS_LOGGER: Final = logging.getLogger(f"{__name__}.connection")
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name: str = "websocketapi"
url: str = URL
requires_auth: bool = False
async def get(self, request: web.Request) -> web.WebSocketResponse:
"""Handle an incoming websocket connection."""
return await WebSocketHandler(request.app["hass"], request).async_handle()
class WebSocketAdapter(logging.LoggerAdapter):
"""Add connection id to websocket messages."""
def process(self, msg: str, kwargs: Any) -> tuple[str, Any]:
"""Add connid to websocket log messages."""
return f'[{self.extra["connid"]}] {msg}', kwargs
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass: HomeAssistant, request: web.Request) -> None:
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock: web.WebSocketResponse | None = None
self._to_write: asyncio.Queue = asyncio.Queue(maxsize=MAX_PENDING_MSG)
self._handle_task: asyncio.Task | None = None
self._writer_task: asyncio.Task | None = None
self._logger = WebSocketAdapter(_WS_LOGGER, {"connid": id(self)})
self._peak_checker_unsub: Callable[[], None] | None = None
async def _writer(self) -> None:
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
assert self.wsock is not None
with suppress(RuntimeError, ConnectionResetError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
await self.wsock.send_str(message)
# Clean up the peaker checker when we shut down the writer
if self._peak_checker_unsub is not None:
self._peak_checker_unsub()
self._peak_checker_unsub = None
@callback
def _send_message(self, message: str | dict[str, Any]) -> None:
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
if not isinstance(message, str):
message = message_to_json(message)
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error(
"Client exceeded max pending messages [2]: %s", MAX_PENDING_MSG
)
self._cancel()
if self._to_write.qsize() < PENDING_MSG_PEAK:
if self._peak_checker_unsub:
self._peak_checker_unsub()
self._peak_checker_unsub = None
return
if self._peak_checker_unsub is None:
self._peak_checker_unsub = async_call_later(
self.hass, PENDING_MSG_PEAK_TIME, self._check_write_peak
)
@callback
def _check_write_peak(self, _utc_time: dt.datetime) -> None:
"""Check that we are no longer above the write peak."""
self._peak_checker_unsub = None
if self._to_write.qsize() < PENDING_MSG_PEAK:
return
self._logger.error(
"Client unable to keep up with pending messages. Stayed over %s for %s seconds",
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
)
self._cancel()
@callback
def _cancel(self) -> None:
"""Cancel the connection."""
if self._handle_task is not None:
self._handle_task.cancel()
if self._writer_task is not None:
self._writer_task.cancel()
async def async_handle(self) -> web.WebSocketResponse:
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected from %s", request.remote)
self._handle_task = asyncio.current_task()
@callback
def handle_hass_stop(event: Event) -> None:
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop
)
# As the webserver is now started before the start
# event we do not want to block for websocket responses
self._writer_task = asyncio.create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError as err:
disconnect_warn = "Did not receive auth message within 10 seconds"
raise Disconnect from err
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
raise Disconnect
try:
msg_data = msg.json()
except ValueError as err:
disconnect_warn = "Received invalid JSON."
raise Disconnect from err
self._logger.debug("Received %s", msg_data)
connection = await auth.async_handle(msg_data)
self.hass.data[DATA_CONNECTIONS] = (
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED
)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
break
try:
msg_data = msg.json()
except ValueError:
disconnect_warn = "Received invalid JSON."
break
self._logger.debug("Received %s", msg_data)
connection.async_handle(msg_data)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
await wsock.close()
except asyncio.QueueFull: # can be raised by put_nowait
self._writer_task.cancel()
finally:
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
if connection is not None:
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED
)
return wsock
|
apache-2.0
|
prathik/thrift
|
lib/py/src/TSCons.py
|
237
|
1267
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from os import path
from SCons.Builder import Builder
def scons_env(env, add=''):
opath = path.dirname(path.abspath('$TARGET'))
lstr = 'thrift --gen cpp -o ' + opath + ' ' + add + ' $SOURCE'
cppbuild = Builder(action=lstr)
env.Append(BUILDERS={'ThriftCpp': cppbuild})
def gen_cpp(env, dir, file):
scons_env(env)
suffixes = ['_types.h', '_types.cpp']
targets = map(lambda s: 'gen-cpp/' + file + s, suffixes)
return env.ThriftCpp(targets, dir + file + '.thrift')
|
apache-2.0
|
ShakedY/ai-project
|
py2.5/lib/python2.5/curses/wrapper.py
|
19
|
1650
|
"""curses.wrapper
Contains one function, wrapper(), which runs another function which
should be the rest of your curses-based application. If the
application raises an exception, wrapper() will restore the terminal
to a sane state so you can read the resulting traceback.
"""
import sys, curses
def wrapper(func, *args, **kwds):
"""Wrapper function that initializes curses and calls another function,
restoring normal keyboard/screen behavior on error.
The callable object 'func' is then passed the main window 'stdscr'
as its first argument, followed by any other arguments passed to
wrapper().
"""
res = None
try:
# Initialize curses
stdscr=curses.initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho()
curses.cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
# Start color, too. Harmless if the terminal doesn't have
# color; user can test with has_color() later on. The try/catch
# works around a minor bit of over-conscientiousness in the curses
# module -- the error return from C start_color() is ignorable.
try:
curses.start_color()
except:
pass
return func(stdscr, *args, **kwds)
finally:
# Set everything back to normal
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
|
gpl-3.0
|
pbenner/adaptive-sampling
|
adaptive_sampling/policy.py
|
1
|
7380
|
#! /usr/bin/env python
# Copyright (C) 2012 Philipp Benner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import Queue
import copy
import interface
import random
import statistics
import sys
import threading
# call the interface
################################################################################
def utility(counts_v, data, bin_options):
"""Call the binning library."""
events = len(counts_v)
counts = statistics.countStatistic(counts_v)
alpha = data['alpha']
beta = data['beta']
gamma = data['gamma']
return interface.utility(events, counts, alpha, beta, gamma, bin_options)
def utilityAt(i, counts_v, data, bin_options):
"""Call the binning library."""
events = len(counts_v)
counts = statistics.countStatistic(counts_v)
alpha = data['alpha']
beta = data['beta']
gamma = data['gamma']
return interface.utilityAt(i, events, counts, alpha, beta, gamma, bin_options)
# tools
################################################################################
def computeKey(position, counts):
return tuple([position]+map(tuple, counts))
# determine the value of a sampling path
################################################################################
def value(path, counts, data, bin_options, hashutil):
if len(path) == 0:
return 0.0
# if necessary compute the local utility for this count statistic
key = computeKey(path[0], counts)
if not hashutil.get(key):
hashutil[key] = utilityAt(path[0], counts, data, bin_options)
# get the local utility from the hashmap
(expectation, utility) = hashutil.get(key)
for i in range(data['K']):
counts[i][path[0]] += 1
utility += expectation[i]*value(path[1:], counts, data, bin_options, hashutil)
counts[i][path[0]] -= 1
return utility
# optimize a sampling path similar to the policy iteration algorithm
################################################################################
def optimize_entry(i, path_value, path, counts, data, bin_options, hashutil):
changed = False
stimuli = range(len(counts[0]))
stimuli.remove(path[i])
path_prime = copy.deepcopy(path)
for x in stimuli:
path_prime[i] = x
path_value_prime = value(path_prime, counts, data, bin_options, hashutil)
if path_value_prime > path_value:
changed = True
path_value = path_value_prime
path[i] = x
return (path_value, path, changed)
def optimize(path, counts, data, bin_options, hashutil, full=False):
changed = True
path_value = value(path, counts, data, bin_options, hashutil)
decisions = range(len(path))
if not full:
decisions.remove(0)
while changed:
for i in decisions:
(path_value, path, changed) = optimize_entry(i, path_value, path, counts, data, bin_options, hashutil)
return (path_value, path)
def u_star(length, counts, data, bin_options):
if length <= 1:
return utility(counts, data, bin_options)[1]
stimuli = range(len(counts[0]))
path = [ random.choice(stimuli) for i in range(length) ]
utility = [ 0.0 for i in stimuli ]
hashutil = {}
for x in stimuli:
path[0] = x
(path_value, path) = optimize(path, counts, data, bin_options, hashutil)
utility[x] = path_value
return utility
# threaded optimization of sampling paths
################################################################################
class OptimizationThread(threading.Thread):
def __init__(self, length, counts, data, bin_options, queue_in, queue_out):
threading.Thread.__init__(self)
self.length = length
self.counts = copy.deepcopy(counts)
self.data = copy.deepcopy(data)
self.bin_options = bin_options
self.queue_in = queue_in
self.queue_out = queue_out
self.hashutil = {}
def run(self):
stimuli = range(len(self.counts[0]))
path = [ random.choice(stimuli) for i in range(self.length) ]
while True:
# get stimulus from queue
x = self.queue_in.get()
if self.bin_options['verbose']:
sys.stderr.write('Processing stimulus ' + str(x) + '.\n')
# set first element of the path to this stimulus
path[0] = x
# optimize all other elements of the path
(path_value, path) = optimize(path, self.counts, self.data, self.bin_options, self.hashutil)
# push result
self.queue_out.put((x, path_value))
self.queue_in.task_done()
def threaded_u_star(length, counts, data, bin_options):
if length <= 1:
return utility(counts, data, bin_options)[1]
utility_queue_in = Queue.Queue()
utility_queue_out = Queue.Queue()
utility_threads = []
stimuli = range(len(counts[0]))
utility_vector = [ 0.0 for i in stimuli ]
# launch daemon threads
for i in range(bin_options['threads']):
t = OptimizationThread(length, counts, data, bin_options,
utility_queue_in, utility_queue_out)
t.setDaemon(True)
t.start()
utility_threads += [t]
# fill queue and start computation
stimuli = range(len(counts[0]))
for x in stimuli:
utility_queue_in.put(x)
# wait for threads
utility_queue_in.join()
# process results
while not utility_queue_out.empty():
x, path_value = utility_queue_out.get()
utility_vector[x] = path_value
utility_queue_out.task_done()
return utility_vector
# test functions
################################################################################
def test1(counts, data, bin_options):
hashutil = {}
for i in range(data['L']):
print [i],
print ": ",
print value([i], counts, data, bin_options, hashutil)
for i in range(data['L']):
for j in range(data['L']):
print [i, j],
print ": ",
print value([i, j], counts, data, bin_options, hashutil)
for i in range(data['L']):
for j in range(data['L']):
for k in range(data['L']):
print [i, j, k],
print ": ",
print value([i, j, k], counts, data, bin_options, hashutil)
for i in range(data['L']):
for j in range(data['L']):
for k in range(data['L']):
for l in range(data['L']):
print [i, j, k, l],
print ": ",
print value([i, j, k, l], counts, data, bin_options, hashutil)
|
gpl-2.0
|
mglukhikh/intellij-community
|
python/lib/Lib/distutils/bcppcompiler.py
|
85
|
15086
|
"""distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bcppcompiler.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
|
apache-2.0
|
nttks/edx-platform
|
lms/djangoapps/course_wiki/tests/test_tab.py
|
158
|
2454
|
"""
Tests for wiki views.
"""
from django.conf import settings
from django.test.client import RequestFactory
from courseware.tabs import get_course_tab_list
from student.tests.factories import AdminFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class WikiTabTestCase(ModuleStoreTestCase):
"""Test cases for Wiki Tab."""
def setUp(self):
super(WikiTabTestCase, self).setUp()
self.course = CourseFactory.create()
self.instructor = AdminFactory.create()
self.user = UserFactory()
def get_wiki_tab(self, user, course):
"""Returns true if the "Wiki" tab is shown."""
request = RequestFactory().request()
request.user = user
all_tabs = get_course_tab_list(request, course)
wiki_tabs = [tab for tab in all_tabs if tab.name == 'Wiki']
return wiki_tabs[0] if len(wiki_tabs) == 1 else None
def test_wiki_enabled_and_public(self):
"""
Test wiki tab when Enabled setting is True and the wiki is open to
the public.
"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = True
self.assertIsNotNone(self.get_wiki_tab(self.user, self.course))
def test_wiki_enabled_and_not_public(self):
"""
Test wiki when it is enabled but not open to the public
"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = False
self.assertIsNone(self.get_wiki_tab(self.user, self.course))
self.assertIsNotNone(self.get_wiki_tab(self.instructor, self.course))
def test_wiki_enabled_false(self):
"""Test wiki tab when Enabled setting is False"""
settings.WIKI_ENABLED = False
self.assertIsNone(self.get_wiki_tab(self.user, self.course))
self.assertIsNone(self.get_wiki_tab(self.instructor, self.course))
def test_wiki_visibility(self):
"""Test toggling of visibility of wiki tab"""
settings.WIKI_ENABLED = True
self.course.allow_public_wiki_access = True
wiki_tab = self.get_wiki_tab(self.user, self.course)
self.assertIsNotNone(wiki_tab)
self.assertTrue(wiki_tab.is_hideable)
wiki_tab.is_hidden = True
self.assertTrue(wiki_tab['is_hidden'])
wiki_tab['is_hidden'] = False
self.assertFalse(wiki_tab.is_hidden)
|
agpl-3.0
|
salguarnieri/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/keepalive.py
|
91
|
25918
|
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
# This file is part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# Modified by Benoit Boissinot:
# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
# Modified by Dirkjan Ochtman:
# - import md5 function from a local util module
# Modified by Martin Geisler:
# - moved md5 function from local util module to this module
# Modified by Augie Fackler:
# - add safesend method and use it to prevent broken pipe errors
# on large POST requests
"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
>>> import urllib2
>>> from keepalive import HTTPHandler
>>> keepalive_handler = HTTPHandler()
>>> opener = urllib2.build_opener(keepalive_handler)
>>> urllib2.install_opener(opener)
>>>
>>> fo = urllib2.urlopen('http://www.python.org')
If a connection to a given host is requested, and all of the existing
connections are still in use, another connection will be opened. If
the handler tries to use an existing connection but it fails in some
way, it will be closed and removed from the pool.
To remove the handler, simply re-run build_opener with no arguments, and
install that opener.
You can explicitly close connections by using the close_connection()
method of the returned file-like object (described below) or you can
use the handler methods:
close_connection(host)
close_all()
open_connections()
NOTE: using the close_connection and close_all methods of the handler
should be done with care when using multiple threads.
* there is nothing that prevents another thread from creating new
connections immediately after connections are closed
* no checks are done to prevent in-use connections from being closed
>>> keepalive_handler.close_all()
EXTRA ATTRIBUTES AND METHODS
Upon a status of 200, the object returned has a few additional
attributes and methods, which should not be used if you want to
remain consistent with the normal urllib2-returned objects:
close_connection() - close the connection to the host
readlines() - you know, readlines()
status - the return status (i.e. 404)
reason - english translation of status (i.e. 'File not found')
If you want the best of both worlds, use this inside an
AttributeError-catching try:
>>> try: status = fo.status
>>> except AttributeError: status = None
Unfortunately, these are ONLY there if status == 200, so it's not
easy to distinguish between non-200 responses. The reason is that
urllib2 tries to do clever things with error codes 301, 302, 401,
and 407, and it wraps the object upon return.
For python versions earlier than 2.4, you can avoid this fancy error
handling by setting the module-level global HANDLE_ERRORS to zero.
You see, prior to 2.4, it's the HTTP Handler's job to determine what
to handle specially, and what to just pass up. HANDLE_ERRORS == 0
means "pass everything up". In python 2.4, however, this job no
longer belongs to the HTTP Handler and is now done by a NEW handler,
HTTPErrorProcessor. Here's the bottom line:
python version < 2.4
HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
errors
HANDLE_ERRORS == 0 pass everything up, error processing is
left to the calling code
python version >= 2.4
HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
HANDLE_ERRORS == 0 (default) pass everything up, let the
other handlers (specifically,
HTTPErrorProcessor) decide what to do
In practice, setting the variable either way makes little difference
in python 2.4, so for the most consistent behavior across versions,
you probably just want to use the defaults, which will give you
exceptions on errors.
"""
# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
import errno
import httplib
import socket
import thread
import urllib2
DEBUG = None
import sys
if sys.version_info < (2, 4):
HANDLE_ERRORS = 1
else: HANDLE_ERRORS = 0
class ConnectionManager(object):
"""
The connection manager must be able to:
* keep track of all existing
"""
def __init__(self):
self._lock = thread.allocate_lock()
self._hostmap = {} # map hosts to a list of connections
self._connmap = {} # map connections to host
self._readymap = {} # map connection to ready state
def add(self, host, connection, ready):
self._lock.acquire()
try:
if host not in self._hostmap:
self._hostmap[host] = []
self._hostmap[host].append(connection)
self._connmap[connection] = host
self._readymap[connection] = ready
finally:
self._lock.release()
def remove(self, connection):
self._lock.acquire()
try:
try:
host = self._connmap[connection]
except KeyError:
pass
else:
del self._connmap[connection]
del self._readymap[connection]
self._hostmap[host].remove(connection)
if not self._hostmap[host]: del self._hostmap[host]
finally:
self._lock.release()
def set_ready(self, connection, ready):
try:
self._readymap[connection] = ready
except KeyError:
pass
def get_ready_conn(self, host):
conn = None
self._lock.acquire()
try:
if host in self._hostmap:
for c in self._hostmap[host]:
if self._readymap[c]:
self._readymap[c] = 0
conn = c
break
finally:
self._lock.release()
return conn
def get_all(self, host=None):
if host:
return list(self._hostmap.get(host, []))
else:
return dict(self._hostmap)
class KeepAliveHandler(object):
def __init__(self):
self._cm = ConnectionManager()
#### Connection Management
def open_connections(self):
"""return a list of connected hosts and the number of connections
to each. [('foo.com:80', 2), ('bar.org', 1)]"""
return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
def close_connection(self, host):
"""close connection(s) to <host>
host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
no error occurs if there is no connection to that host."""
for h in self._cm.get_all(host):
self._cm.remove(h)
h.close()
def close_all(self):
"""close all open connections"""
for host, conns in self._cm.get_all().iteritems():
for h in conns:
self._cm.remove(h)
h.close()
def _request_closed(self, request, host, connection):
"""tells us that this request is now closed and that the
connection is ready for another request"""
self._cm.set_ready(connection, 1)
def _remove_connection(self, host, connection, close=0):
if close:
connection.close()
self._cm.remove(connection)
#### Transaction Execution
def http_open(self, req):
return self.do_open(HTTPConnection, req)
def do_open(self, http_class, req):
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
try:
h = self._cm.get_ready_conn(host)
while h:
r = self._reuse_connection(h, req, host)
# if this response is non-None, then it worked and we're
# done. Break out, skipping the else block.
if r:
break
# connection is bad - possibly closed by server
# discard it and ask for the next free connection
h.close()
self._cm.remove(h)
h = self._cm.get_ready_conn(host)
else:
# no (working) free connections were found. Create a new one.
h = http_class(host)
if DEBUG:
DEBUG.info("creating new connection to %s (%d)",
host, id(h))
self._cm.add(host, h, 0)
self._start_transaction(h, req)
r = h.getresponse()
except (socket.error, httplib.HTTPException), err:
raise urllib2.URLError(err)
# if not a persistent connection, don't try to reuse it
if r.will_close:
self._cm.remove(h)
if DEBUG:
DEBUG.info("STATUS: %s, %s", r.status, r.reason)
r._handler = self
r._host = host
r._url = req.get_full_url()
r._connection = h
r.code = r.status
r.headers = r.msg
r.msg = r.reason
if r.status == 200 or not HANDLE_ERRORS:
return r
else:
return self.parent.error('http', req, r,
r.status, r.msg, r.headers)
def _reuse_connection(self, h, req, host):
"""start the transaction with a re-used connection
return a response object (r) upon success or None on failure.
This DOES not close or remove bad connections in cases where
it returns. However, if an unexpected exception occurs, it
will close and remove the connection before re-raising.
"""
try:
self._start_transaction(h, req)
r = h.getresponse()
# note: just because we got something back doesn't mean it
# worked. We'll check the version below, too.
except (socket.error, httplib.HTTPException):
r = None
except: # re-raises
# adding this block just in case we've missed
# something we will still raise the exception, but
# lets try and close the connection and remove it
# first. We previously got into a nasty loop
# where an exception was uncaught, and so the
# connection stayed open. On the next try, the
# same exception was raised, etc. The trade-off is
# that it's now possible this call will raise
# a DIFFERENT exception
if DEBUG:
DEBUG.error("unexpected exception - closing "
"connection to %s (%d)", host, id(h))
self._cm.remove(h)
h.close()
raise
if r is None or r.version == 9:
# httplib falls back to assuming HTTP 0.9 if it gets a
# bad header back. This is most likely to happen if
# the socket has been closed by the server since we
# last used the connection.
if DEBUG:
DEBUG.info("failed to re-use connection to %s (%d)",
host, id(h))
r = None
else:
if DEBUG:
DEBUG.info("re-using connection to %s (%d)", host, id(h))
return r
def _start_transaction(self, h, req):
# What follows mostly reimplements HTTPConnection.request()
# except it adds self.parent.addheaders in the mix.
headers = req.headers.copy()
if sys.version_info >= (2, 4):
headers.update(req.unredirected_hdrs)
headers.update(self.parent.addheaders)
headers = dict((n.lower(), v) for n, v in headers.items())
skipheaders = {}
for n in ('host', 'accept-encoding'):
if n in headers:
skipheaders['skip_' + n.replace('-', '_')] = 1
try:
if req.has_data():
data = req.get_data()
h.putrequest('POST', req.get_selector(), **skipheaders)
if 'content-type' not in headers:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if 'content-length' not in headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector(), **skipheaders)
except (socket.error), err:
raise urllib2.URLError(err)
for k, v in headers.items():
h.putheader(k, v)
h.endheaders()
if req.has_data():
h.send(data)
class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
pass
class HTTPResponse(httplib.HTTPResponse):
# we need to subclass HTTPResponse in order to
# 1) add readline() and readlines() methods
# 2) add close_connection() methods
# 3) add info() and geturl() methods
# in order to add readline(), read must be modified to deal with a
# buffer. example: readline must read a buffer and then spit back
# one line at a time. The only real alternative is to read one
# BYTE at a time (ick). Once something has been read, it can't be
# put back (ok, maybe it can, but that's even uglier than this),
# so if you THEN do a normal read, you must first take stuff from
# the buffer.
# the read method wraps the original to accommodate buffering,
# although read() never adds to the buffer.
# Both readline and readlines have been stolen with almost no
# modification from socket.py
def __init__(self, sock, debuglevel=0, strict=0, method=None):
httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
self.fileno = sock.fileno
self.code = None
self._rbuf = ''
self._rbufsize = 8096
self._handler = None # inserted by the handler later
self._host = None # (same)
self._url = None # (same)
self._connection = None # (same)
_raw_read = httplib.HTTPResponse.read
def close(self):
if self.fp:
self.fp.close()
self.fp = None
if self._handler:
self._handler._request_closed(self, self._host,
self._connection)
def close_connection(self):
self._handler._remove_connection(self._host, self._connection, close=1)
self.close()
def info(self):
return self.headers
def geturl(self):
return self._url
def read(self, amt=None):
# the _rbuf test is only in this first if for speed. It's not
# logically necessary
if self._rbuf and not amt is None:
L = len(self._rbuf)
if amt > L:
amt -= L
else:
s = self._rbuf[:amt]
self._rbuf = self._rbuf[amt:]
return s
s = self._rbuf + self._raw_read(amt)
self._rbuf = ''
return s
# stolen from Python SVN #68532 to fix issue1088
def _read_chunked(self, amt):
chunk_left = self.chunk_left
value = ''
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronization is
# probably lost
self.close()
raise httplib.IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
self.close()
return value
def readline(self, limit=-1):
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self._raw_read(self._rbufsize)
if not new:
break
i = new.find('\n')
if i >= 0:
i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0:
i = len(self._rbuf)
else:
i = i + 1
if 0 <= limit < len(self._rbuf):
i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def safesend(self, str):
"""Send `str' to the server.
Shamelessly ripped off from httplib to patch a bad behavior.
"""
# _broken_pipe_resp is an attribute we set in this function
# if the socket is closed while we're sending data but
# the server sent us a response before hanging up.
# In that case, we want to pretend to send the rest of the
# outgoing data, and then let the user use getresponse()
# (which we wrap) to get this last response before
# opening a new socket.
if getattr(self, '_broken_pipe_resp', None) is not None:
return
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise httplib.NotConnected
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print "send:", repr(str)
try:
blocksize = 8192
read = getattr(str, 'read', None)
if read is not None:
if self.debuglevel > 0:
print "sending a read()able"
data = read(blocksize)
while data:
self.sock.sendall(data)
data = read(blocksize)
else:
self.sock.sendall(str)
except socket.error, v:
reraise = True
if v[0] == errno.EPIPE: # Broken pipe
if self._HTTPConnection__state == httplib._CS_REQ_SENT:
self._broken_pipe_resp = None
self._broken_pipe_resp = self.getresponse()
reraise = False
self.close()
if reraise:
raise
def wrapgetresponse(cls):
"""Wraps getresponse in cls with a broken-pipe sane version.
"""
def safegetresponse(self):
# In safesend() we might set the _broken_pipe_resp
# attribute, in which case the socket has already
# been closed and we just need to give them the response
# back. Otherwise, we use the normal response path.
r = getattr(self, '_broken_pipe_resp', None)
if r is not None:
return r
return cls.getresponse(self)
safegetresponse.__doc__ = cls.getresponse.__doc__
return safegetresponse
class HTTPConnection(httplib.HTTPConnection):
# use the modified response class
response_class = HTTPResponse
send = safesend
getresponse = wrapgetresponse(httplib.HTTPConnection)
#########################################################################
##### TEST FUNCTIONS
#########################################################################
def error_handler(url):
global HANDLE_ERRORS
orig = HANDLE_ERRORS
keepalive_handler = HTTPHandler()
opener = urllib2.build_opener(keepalive_handler)
urllib2.install_opener(opener)
pos = {0: 'off', 1: 'on'}
for i in (0, 1):
print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
HANDLE_ERRORS = i
try:
fo = urllib2.urlopen(url)
fo.read()
fo.close()
try:
status, reason = fo.status, fo.reason
except AttributeError:
status, reason = None, None
except IOError, e:
print " EXCEPTION: %s" % e
raise
else:
print " status = %s, reason = %s" % (status, reason)
HANDLE_ERRORS = orig
hosts = keepalive_handler.open_connections()
print "open connections:", hosts
keepalive_handler.close_all()
def md5(s):
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
global md5
md5 = _md5
return _md5(s)
def continuity(url):
format = '%25s: %s'
# first fetch the file with the normal http handler
opener = urllib2.build_opener()
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('normal urllib', m.hexdigest())
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
m = md5.new(foo)
print format % ('keepalive read', m.hexdigest())
fo = urllib2.urlopen(url)
foo = ''
while True:
f = fo.readline()
if f:
foo = foo + f
else: break
fo.close()
m = md5.new(foo)
print format % ('keepalive readline', m.hexdigest())
def comp(N, url):
print ' making %i connections to:\n %s' % (N, url)
sys.stdout.write(' first using the normal urllib handlers')
# first use normal opener
opener = urllib2.build_opener()
urllib2.install_opener(opener)
t1 = fetch(N, url)
print ' TIME: %.3f s' % t1
sys.stdout.write(' now using the keepalive handler ')
# now install the keepalive handler and try again
opener = urllib2.build_opener(HTTPHandler())
urllib2.install_opener(opener)
t2 = fetch(N, url)
print ' TIME: %.3f s' % t2
print ' improvement factor: %.2f' % (t1 / t2)
def fetch(N, url, delay=0):
import time
lens = []
starttime = time.time()
for i in range(N):
if delay and i > 0:
time.sleep(delay)
fo = urllib2.urlopen(url)
foo = fo.read()
fo.close()
lens.append(len(foo))
diff = time.time() - starttime
j = 0
for i in lens[1:]:
j = j + 1
if not i == lens[0]:
print "WARNING: inconsistent length on read %i: %i" % (j, i)
return diff
def test_timeout(url):
global DEBUG
dbbackup = DEBUG
class FakeLogger(object):
def debug(self, msg, *args):
print msg % args
info = warning = error = debug
DEBUG = FakeLogger()
print " fetching the file to establish a connection"
fo = urllib2.urlopen(url)
data1 = fo.read()
fo.close()
i = 20
print " waiting %i seconds for the server to close the connection" % i
while i > 0:
sys.stdout.write('\r %2i' % i)
sys.stdout.flush()
time.sleep(1)
i -= 1
sys.stderr.write('\r')
print " fetching the file a second time"
fo = urllib2.urlopen(url)
data2 = fo.read()
fo.close()
if data1 == data2:
print ' data are identical'
else:
print ' ERROR: DATA DIFFER'
DEBUG = dbbackup
def test(url, N=10):
print "checking error handler (do this on a non-200)"
try: error_handler(url)
except IOError:
print "exiting - exception will prevent further tests"
sys.exit()
print
print "performing continuity test (making sure stuff isn't corrupted)"
continuity(url)
print
print "performing speed comparison"
comp(N, url)
print
print "performing dropped-connection check"
test_timeout(url)
if __name__ == '__main__':
import time
import sys
try:
N = int(sys.argv[1])
url = sys.argv[2]
except (IndexError, ValueError):
print "%s <integer> <url>" % sys.argv[0]
else:
test(url, N)
|
apache-2.0
|
sincerefly/getEastmoneyReport
|
guping_new/statistics/sta-3.py
|
1
|
2384
|
#!/bin/env python
#encoding:utf-8
from pymongo import MongoClient
import datetime
# Settings
mongopath = "localhost" # 数据库地址
startDate = "20150104" # 检索数据开始日期
endDate = "20150529" # 检索数据结束日期
#endDate = "20150227" # 检索数据结束日期(三个月预留)
nowDate = datetime.datetime.now().strftime("%Y%m%d") # 当前日期
# Functions
def isNotWorkDay():
today = datetime.datetime.now().strftime("%w")
if today in [6, 0]: # 如果周六周日则退出脚本
exit(0)
print today
def clientMongo():
client = MongoClient(mongopath, 27017)
db = client.guping
return db if db else False
def getArticleInfo(db):
return db.dfcf_company.find({})
def startSta(art_list, db):
i = 0
author_dict = {}
for art in art_list:
company = art["company"].encode("utf-8")
author_list = art["author"]
#print author_list
for au in author_list:
au = au.encode("utf-8")
grow = art["grow"]
if author_dict.has_key(au):
author_dict[au]["count"] +=1
author_dict[au]["grow"].append(grow)
else:
author_dict[au] = {}
author_dict[au]["count"] = 1
author_dict[au]["grow"] = []
author_dict[au]["grow"].append(grow)
author_dict[au]["company"] = company
#print author_dict
for key in author_dict:
count = author_dict[key]["count"]
grow_list = author_dict[key]["grow"]
avgUp = round(sum(grow_list) / len(grow_list), 4)
company = author_dict[key]["company"]
print key + "\t" + str(count) + "\t" + str(avgUp) + "\t" + company
d = {
"author": key,
"count": count,
"avgUp": avgUp,
"company": company
}
#db.dfcf_author_f_test.insert(d)
db.dfcf_author_f.update({'author':author}, {'$set':d}, upsert = True)
return 0
# main function
if __name__ == "__main__":
if isNotWorkDay():
exit(0)
db = clientMongo()
if db:
print "Client Mongo Success"
else:
print "Client Mongo failed"
exit(0)
article_list = getArticleInfo(db)
# 获取日期区间内股票涨幅情况
startSta(article_list, db)
|
mit
|
AutomatedTester/selenium
|
py/test/selenium/test_prompts.py
|
65
|
2425
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium import selenium
import unittest
import time
class TestPrompts(unittest.TestCase):
def setUp(self):
self.selenium = selenium("localhost", \
4444, "*firefoxproxy", "http://www.w3schools.com")
self.selenium.start()
def test_alert(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_alert")
sel.select_frame("view")
sel.click("css=input[value='Show alert box']")
self.assertEqual(sel.get_alert(), "Hello! I am an alert box!")
def test_confirm_accept(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_confirm")
sel.select_frame("view")
sel.choose_ok_on_next_confirmation()
sel.click("css=input[value='Show a confirm box']")
self.assertEqual(sel.get_alert(), "You pressed OK!")
def test_confirm_cancel(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_confirm")
sel.select_frame("view")
sel.choose_ok_on_next_confirmation()
sel.click("css=input[value='Show a confirm box']")
self.assertEqual(sel.get_alert(), "You pressed OK!")
def test_prompt(self):
sel = self.selenium
sel.open("/js/tryit.asp?filename=tryjs_prompt")
sel.select_frame("view")
sel.answer_on_next_prompt('Flying Monkey')
sel.click("css=input[value='Show prompt box']")
self.assertEqual(sel.get_html_source(), '<head></head><body>Hello Flying Monkey! How are you today?</body>')
def tearDown(self):
self.selenium.stop()
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
sbalde/edx-platform
|
common/lib/xmodule/xmodule/modulestore/tests/factories.py
|
16
|
21208
|
"""
Factories for use in tests of XBlocks.
"""
import functools
import inspect
import pprint
import pymongo.message
import threading
import traceback
from collections import defaultdict
from decorator import contextmanager
from uuid import uuid4
from factory import Factory, Sequence, lazy_attribute_sequence, lazy_attribute
from factory.containers import CyclicDefinitionError
from mock import Mock, patch
from nose.tools import assert_less_equal, assert_greater_equal
import dogstats_wrapper as dog_stats_api
from opaque_keys.edx.locations import Location
from opaque_keys.edx.keys import UsageKey
from xblock.core import XBlock
from xmodule.modulestore import prefer_xmodules, ModuleStoreEnum
from xmodule.tabs import CourseTab
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT
class Dummy(object):
pass
class XModuleFactoryLock(threading.local):
"""
This class exists to store whether XModuleFactory can be accessed in a safe
way (meaning, in a context where the data it creates will be cleaned up).
Users of XModuleFactory (or its subclasses) should only call XModuleFactoryLock.enable
after ensuring that a) the modulestore will be cleaned up, and b) that XModuleFactoryLock.disable
will be called.
"""
def __init__(self):
super(XModuleFactoryLock, self).__init__()
self._enabled = False
def enable(self):
"""
Enable XModuleFactories. This should only be turned in a context
where the modulestore will be reset at the end of the test (such
as inside ModuleStoreTestCase).
"""
self._enabled = True
def disable(self):
"""
Disable XModuleFactories. This should be called once the data
from the factory has been cleaned up.
"""
self._enabled = False
def is_enabled(self):
"""
Return whether XModuleFactories are enabled.
"""
return self._enabled
XMODULE_FACTORY_LOCK = XModuleFactoryLock()
class XModuleFactory(Factory):
"""
Factory for XModules
"""
# We have to give a Factory a FACTORY_FOR.
# However, the class that we create is actually determined by the category
# specified in the factory
FACTORY_FOR = Dummy
@lazy_attribute
def modulestore(self):
msg = "XMODULE_FACTORY_LOCK not enabled. Please use ModuleStoreTestCase as your test baseclass."
assert XMODULE_FACTORY_LOCK.is_enabled(), msg
from xmodule.modulestore.django import modulestore
return modulestore()
last_course = threading.local()
class CourseFactory(XModuleFactory):
"""
Factory for XModule courses.
"""
org = Sequence('org.{}'.format)
number = Sequence('course_{}'.format)
display_name = Sequence('Run {}'.format)
# pylint: disable=unused-argument
@classmethod
def _create(cls, target_class, **kwargs):
# All class attributes (from this class and base classes) are
# passed in via **kwargs. However, some of those aren't actual field values,
# so pop those off for use separately
org = kwargs.pop('org', None)
# because the factory provides a default 'number' arg, prefer the non-defaulted 'course' arg if any
number = kwargs.pop('course', kwargs.pop('number', None))
store = kwargs.pop('modulestore')
name = kwargs.get('name', kwargs.get('run', Location.clean(kwargs.get('display_name'))))
run = kwargs.pop('run', name)
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
# Pass the metadata just as field=value pairs
kwargs.update(kwargs.pop('metadata', {}))
default_store_override = kwargs.pop('default_store', None)
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
if default_store_override is not None:
with store.default_store(default_store_override):
new_course = store.create_course(org, number, run, user_id, fields=kwargs)
else:
new_course = store.create_course(org, number, run, user_id, fields=kwargs)
last_course.loc = new_course.location
return new_course
class LibraryFactory(XModuleFactory):
"""
Factory for creating a content library
"""
org = Sequence('org{}'.format)
library = Sequence('lib{}'.format)
display_name = Sequence('Test Library {}'.format)
# pylint: disable=unused-argument
@classmethod
def _create(cls, target_class, **kwargs):
"""
Create a library with a unique name and key.
All class attributes (from this class and base classes) are automagically
passed in via **kwargs.
"""
# some of the kwargst actual field values, so pop those off for use separately:
org = kwargs.pop('org')
library = kwargs.pop('library')
store = kwargs.pop('modulestore')
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
# Pass the metadata just as field=value pairs
kwargs.update(kwargs.pop('metadata', {}))
default_store_override = kwargs.pop('default_store', ModuleStoreEnum.Type.split)
with store.default_store(default_store_override):
new_library = store.create_library(org, library, user_id, fields=kwargs)
return new_library
class ItemFactory(XModuleFactory):
"""
Factory for XModule items.
"""
category = 'chapter'
parent = None
@lazy_attribute_sequence
def display_name(self, n):
return "{} {}".format(self.category, n)
@lazy_attribute
def location(self):
if self.display_name is None:
dest_name = uuid4().hex
else:
dest_name = self.display_name.replace(" ", "_")
new_location = self.parent_location.course_key.make_usage_key(
self.category,
dest_name
)
return new_location
@lazy_attribute
def parent_location(self):
default_location = getattr(last_course, 'loc', None)
try:
parent = self.parent
# This error is raised if the caller hasn't provided either parent or parent_location
# In this case, we'll just return the default parent_location
except CyclicDefinitionError:
return default_location
if parent is None:
return default_location
return parent.location
@classmethod
def _create(cls, target_class, **kwargs):
"""
Uses ``**kwargs``:
:parent_location: (required): the location of the parent module
(e.g. the parent course or section)
:category: the category of the resulting item.
:data: (optional): the data for the item
(e.g. XML problem definition for a problem item)
:display_name: (optional): the display name of the item
:metadata: (optional): dictionary of metadata attributes
:boilerplate: (optional) the boilerplate for overriding field values
:publish_item: (optional) whether or not to publish the item (default is True)
:target_class: is ignored
"""
# All class attributes (from this class and base classes) are
# passed in via **kwargs. However, some of those aren't actual field values,
# so pop those off for use separately
# catch any old style users before they get into trouble
assert 'template' not in kwargs
parent_location = kwargs.pop('parent_location', None)
data = kwargs.pop('data', None)
category = kwargs.pop('category', None)
display_name = kwargs.pop('display_name', None)
metadata = kwargs.pop('metadata', {})
location = kwargs.pop('location')
user_id = kwargs.pop('user_id', ModuleStoreEnum.UserID.test)
publish_item = kwargs.pop('publish_item', True)
assert isinstance(location, UsageKey)
assert location != parent_location
store = kwargs.pop('modulestore')
# This code was based off that in cms/djangoapps/contentstore/views.py
parent = kwargs.pop('parent', None) or store.get_item(parent_location)
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
if 'boilerplate' in kwargs:
template_id = kwargs.pop('boilerplate')
clz = XBlock.load_class(category, select=prefer_xmodules)
template = clz.get_template(template_id)
assert template is not None
metadata.update(template.get('metadata', {}))
if not isinstance(data, basestring):
data.update(template.get('data'))
# replace the display name with an optional parameter passed in from the caller
if display_name is not None:
metadata['display_name'] = display_name
module = store.create_child(
user_id,
parent.location,
location.block_type,
block_id=location.block_id,
metadata=metadata,
definition_data=data,
runtime=parent.runtime,
fields=kwargs,
)
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=(
"location:itemfactory_create_static_tab",
u"block:{}".format(location.block_type),
)
)
course = store.get_course(location.course_key)
course.tabs.append(
CourseTab.load('static_tab', name='Static Tab', url_slug=location.name)
)
store.update_item(course, user_id)
# parent and publish the item, so it can be accessed
if 'detached' not in module._class_tags:
parent.children.append(location)
store.update_item(parent, user_id)
if publish_item:
published_parent = store.publish(parent.location, user_id)
# module is last child of parent
return published_parent.get_children()[-1]
else:
return store.get_item(location)
elif publish_item:
return store.publish(location, user_id)
else:
return module
@contextmanager
def check_exact_number_of_calls(object_with_method, method_name, num_calls):
"""
Instruments the given method on the given object to verify the number of calls to the
method is exactly equal to 'num_calls'.
"""
with check_number_of_calls(object_with_method, method_name, num_calls, num_calls):
yield
def check_number_of_calls(object_with_method, method_name, maximum_calls, minimum_calls=1):
"""
Instruments the given method on the given object to verify the number of calls to the method is
less than or equal to the expected maximum_calls and greater than or equal to the expected minimum_calls.
"""
return check_sum_of_calls(object_with_method, [method_name], maximum_calls, minimum_calls)
class StackTraceCounter(object):
"""
A class that counts unique stack traces underneath a particular stack frame.
"""
def __init__(self, stack_depth, include_arguments=True):
"""
Arguments:
stack_depth (int): The number of stack frames above this constructor to capture.
include_arguments (bool): Whether to store the arguments that are passed
when capturing a stack trace.
"""
self.include_arguments = include_arguments
self._top_of_stack = traceback.extract_stack(limit=stack_depth)[0]
if self.include_arguments:
self._stacks = defaultdict(lambda: defaultdict(int))
else:
self._stacks = defaultdict(int)
def capture_stack(self, args, kwargs):
"""
Record the stack frames starting at the caller of this method, and
ending at the top of the stack as defined by the ``stack_depth``.
Arguments:
args: The positional arguments to capture at this stack frame
kwargs: The keyword arguments to capture at this stack frame
"""
# pylint: disable=broad-except
stack = traceback.extract_stack()[:-2]
if self._top_of_stack in stack:
stack = stack[stack.index(self._top_of_stack):]
if self.include_arguments:
safe_args = []
for arg in args:
try:
safe_args.append(repr(arg))
except Exception as exc:
safe_args.append('<un-repr-able value: {}'.format(exc))
safe_kwargs = {}
for key, kwarg in kwargs.items():
try:
safe_kwargs[key] = repr(kwarg)
except Exception as exc:
safe_kwargs[key] = '<un-repr-able value: {}'.format(exc)
self._stacks[tuple(stack)][tuple(safe_args), tuple(safe_kwargs.items())] += 1
else:
self._stacks[tuple(stack)] += 1
@property
def total_calls(self):
"""
Return the total number of stacks recorded.
"""
return sum(self.stack_calls(stack) for stack in self._stacks)
def stack_calls(self, stack):
"""
Return the number of calls to the supplied ``stack``.
"""
if self.include_arguments:
return sum(self._stacks[stack].values())
else:
return self._stacks[stack]
def __iter__(self):
"""
Iterate over all unique captured stacks.
"""
return iter(sorted(self._stacks.keys(), key=lambda stack: (self.stack_calls(stack), stack), reverse=True))
def __getitem__(self, stack):
"""
Return the set of captured calls with the supplied stack.
"""
return self._stacks[stack]
@classmethod
def capture_call(cls, func, stack_depth, include_arguments=True):
"""
A decorator that wraps ``func``, and captures each call to ``func``,
recording the stack trace, and optionally the arguments that the function
is called with.
Arguments:
func: the function to wrap
stack_depth: how far up the stack to truncate the stored stack traces (
this is counted from the call to ``capture_call``, rather than calls
to the captured function).
"""
stacks = StackTraceCounter(stack_depth, include_arguments)
# pylint: disable=missing-docstring
@functools.wraps(func)
def capture(*args, **kwargs):
stacks.capture_stack(args, kwargs)
return func(*args, **kwargs)
capture.stack_counter = stacks
return capture
@contextmanager
def check_sum_of_calls(object_, methods, maximum_calls, minimum_calls=1, include_arguments=True):
"""
Instruments the given methods on the given object to verify that the total sum of calls made to the
methods falls between minumum_calls and maximum_calls.
"""
mocks = {
method: StackTraceCounter.capture_call(
getattr(object_, method),
stack_depth=7,
include_arguments=include_arguments
)
for method in methods
}
with patch.multiple(object_, **mocks):
yield
call_count = sum(capture_fn.stack_counter.total_calls for capture_fn in mocks.values())
# Assertion errors don't handle multi-line values, so pretty-print to std-out instead
if not minimum_calls <= call_count <= maximum_calls:
messages = ["Expected between {} and {} calls, {} were made.\n\n".format(
minimum_calls,
maximum_calls,
call_count,
)]
for method_name, capture_fn in mocks.items():
stack_counter = capture_fn.stack_counter
messages.append("{!r} was called {} times:\n".format(
method_name,
stack_counter.total_calls
))
for stack in stack_counter:
messages.append(" called {} times:\n\n".format(stack_counter.stack_calls(stack)))
messages.append(" " + " ".join(traceback.format_list(stack)))
messages.append("\n\n")
if include_arguments:
for (args, kwargs), count in stack_counter[stack].items():
messages.append(" called {} times with:\n".format(count))
messages.append(" args: {}\n".format(args))
messages.append(" kwargs: {}\n\n".format(dict(kwargs)))
print "".join(messages)
# verify the counter actually worked by ensuring we have counted greater than (or equal to) the minimum calls
assert_greater_equal(call_count, minimum_calls)
# now verify the number of actual calls is less than (or equal to) the expected maximum
assert_less_equal(call_count, maximum_calls)
def mongo_uses_error_check(store):
"""
Does mongo use the error check as a separate message?
"""
if hasattr(store, 'mongo_wire_version'):
return store.mongo_wire_version() <= 1
if hasattr(store, 'modulestores'):
return any([mongo_uses_error_check(substore) for substore in store.modulestores])
return False
@contextmanager
def check_mongo_calls_range(max_finds=float("inf"), min_finds=0, max_sends=None, min_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide num_sends). At the
end of the with statement, it compares the counts to the bounds provided in the arguments.
:param max_finds: the maximum number of find calls expected
:param min_finds: the minimum number of find calls expected
:param max_sends: If non-none, make sure number of send calls are <=max_sends
:param min_sends: If non-none, make sure number of send calls are >=min_sends
"""
with check_sum_of_calls(
pymongo.message,
['query', 'get_more'],
max_finds,
min_finds,
):
if max_sends is not None or min_sends is not None:
with check_sum_of_calls(
pymongo.message,
# mongo < 2.6 uses insert, update, delete and _do_batched_insert. >= 2.6 _do_batched_write
['insert', 'update', 'delete', '_do_batched_write_command', '_do_batched_insert', ],
max_sends if max_sends is not None else float("inf"),
min_sends if min_sends is not None else 0,
):
yield
else:
yield
@contextmanager
def check_mongo_calls(num_finds=0, num_sends=None):
"""
Instruments the given store to count the number of calls to find (incl find_one) and the number
of calls to send_message which is for insert, update, and remove (if you provide num_sends). At the
end of the with statement, it compares the counts to the num_finds and num_sends.
:param num_finds: the exact number of find calls expected
:param num_sends: If none, don't instrument the send calls. If non-none, count and compare to
the given int value.
"""
with check_mongo_calls_range(num_finds, num_finds, num_sends, num_sends):
yield
# This dict represents the attribute keys for a course's 'about' info.
# Note: The 'video' attribute is intentionally excluded as it must be
# handled separately; its value maps to an alternate key name.
# Reference : cms/djangoapps/models/settings/course_details.py
ABOUT_ATTRIBUTES = {
'effort': "Testing effort",
}
class CourseAboutFactory(XModuleFactory):
"""
Factory for XModule course about.
"""
@classmethod
def _create(cls, target_class, **kwargs): # pylint: disable=unused-argument
"""
Uses **kwargs:
effort: effor information
video : video link
"""
user_id = kwargs.pop('user_id', None)
course_id, course_runtime = kwargs.pop("course_id"), kwargs.pop("course_runtime")
store = kwargs.pop('modulestore')
for about_key in ABOUT_ATTRIBUTES:
about_item = store.create_xblock(course_runtime, course_id, 'about', about_key)
about_item.data = ABOUT_ATTRIBUTES[about_key]
store.update_item(about_item, user_id, allow_not_found=True)
about_item = store.create_xblock(course_runtime, course_id, 'about', 'video')
about_item.data = "www.youtube.com/embed/testing-video-link"
store.update_item(about_item, user_id, allow_not_found=True)
|
agpl-3.0
|
XiaodunServerGroup/medicalmooc
|
common/lib/capa/capa/tests/__init__.py
|
8
|
1737
|
"""Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def new_loncapa_problem(xml, capa_system=None):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=723, capa_system=capa_system or test_capa_system())
|
agpl-3.0
|
zsulocal/pycoin
|
pycoin/tx/script/check_signature.py
|
1
|
11210
|
# -*- coding: utf-8 -*-
"""
Parse, stream, create, sign and verify Bitcoin transactions as Tx structures.
The MIT License (MIT)
Copyright (c) 2015 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ... import ecdsa
from ...encoding import sec_to_public_pair, EncodingError
from ...intbytes import byte2int, indexbytes, iterbytes
from . import der
from . import ScriptError
from . import errno
from .flags import (
VERIFY_NULLDUMMY, VERIFY_NULLFAIL, VERIFY_STRICTENC, VERIFY_MINIMALDATA,
VERIFY_DERSIG, VERIFY_LOW_S, VERIFY_WITNESS_PUBKEYTYPE
)
from .microcode import VCH_TRUE, VCH_FALSE
from .tools import bin_script, delete_subscript, int_from_script_bytes
def _check_valid_signature_1(sig):
ls = len(sig)
if ls < 9 or ls > 73:
raise ScriptError("bad signature size", errno.SIG_DER)
if sig[0] != 0x30:
raise ScriptError("bad signature byte 0", errno.SIG_DER)
if sig[1] != ls - 3:
raise ScriptError("signature size wrong", errno.SIG_DER)
r_len = sig[3]
if 5 + r_len >= ls:
raise ScriptError("r length exceed signature size", errno.SIG_DER)
def _check_valid_signature_2(sig):
ls = len(sig)
r_len = sig[3]
s_len = sig[5 + r_len]
if r_len + s_len + 7 != ls:
raise ScriptError("r and s size exceed signature size", errno.SIG_DER)
if sig[2] != 2:
raise ScriptError("R value region does not start with 0x02", errno.SIG_DER)
if r_len == 0:
raise ScriptError("zero-length R value", errno.SIG_DER)
if sig[4] & 0x80:
raise ScriptError("sig R value not allowed to be negative", errno.SIG_DER)
if r_len > 1 and sig[4] == 0 and not (sig[5] & 0x80):
raise ScriptError(
"R value can't have leading 0 byte unless doing so would make it negative", errno.SIG_DER)
if sig[r_len + 4] != 2:
raise ScriptError("S value region does not start with 0x02", errno.SIG_DER)
if s_len == 0:
raise ScriptError("zero-length S value", errno.SIG_DER)
if sig[r_len + 6] & 0x80:
raise ScriptError("negative S values not allowed", errno.SIG_DER)
if s_len > 1 and sig[r_len + 6] == 0 and not (sig[r_len + 7] & 0x80):
raise ScriptError(
"S value can't have leading 0 byte unless doing so would make it negative", errno.SIG_DER)
def check_valid_signature(sig):
# ported from bitcoind src/script/interpreter.cpp IsValidSignatureEncoding
sig = [s for s in iterbytes(sig)]
_check_valid_signature_1(sig)
_check_valid_signature_2(sig)
def check_low_der_signature(sig_pair):
# IsLowDERSignature
r, s = sig_pair
hi_s = ecdsa.generator_secp256k1.curve().p() - s
if hi_s < s:
raise ScriptError("signature has high S value", errno.SIG_HIGH_S)
def check_defined_hashtype_signature(sig):
# IsDefinedHashtypeSignature
from pycoin.tx.Tx import SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_ANYONECANPAY
if len(sig) == 0:
raise ScriptError("signature is length 0")
hash_type = indexbytes(sig, -1) & (~SIGHASH_ANYONECANPAY)
if hash_type < SIGHASH_ALL or hash_type > SIGHASH_SINGLE:
raise ScriptError("bad hash type after signature", errno.SIG_HASHTYPE)
def parse_signature_blob(sig_blob, flags=0):
if len(sig_blob) == 0:
raise ValueError("empty sig_blob")
if flags & (VERIFY_DERSIG | VERIFY_LOW_S | VERIFY_STRICTENC):
check_valid_signature(sig_blob)
if flags & VERIFY_STRICTENC:
check_defined_hashtype_signature(sig_blob)
sig_pair = der.sigdecode_der(sig_blob[:-1], use_broken_open_ssl_mechanism=True)
signature_type = ord(sig_blob[-1:])
if flags & VERIFY_LOW_S:
check_low_der_signature(sig_pair)
return sig_pair, signature_type
def check_public_key_encoding(blob):
lb = len(blob)
if lb >= 33:
fb = byte2int(blob)
if fb == 4:
if lb == 65:
return
elif fb in (2, 3):
if lb == 33:
return
raise ScriptError("invalid public key blob", errno.PUBKEYTYPE)
def op_checksig(stack, signature_for_hash_type_f, expected_hash_type, tmp_script, flags):
try:
pair_blob = stack.pop()
sig_blob = stack.pop()
verify_strict = not not (flags & VERIFY_STRICTENC)
# if verify_strict flag is set, we fail the script immediately on bad encoding
if verify_strict:
check_public_key_encoding(pair_blob)
if flags & VERIFY_WITNESS_PUBKEYTYPE:
if byte2int(pair_blob) not in (2, 3) or len(pair_blob) != 33:
raise ScriptError("uncompressed key in witness", errno.WITNESS_PUBKEYTYPE)
sig_pair, signature_type = parse_signature_blob(sig_blob, flags)
public_pair = sec_to_public_pair(pair_blob, strict=verify_strict)
except (der.UnexpectedDER, ValueError, EncodingError):
stack.append(VCH_FALSE)
return
if expected_hash_type not in (None, signature_type):
raise ScriptError("wrong hash type")
# Drop the signature, since there's no way for a signature to sign itself
# see: Bitcoin Core/script/interpreter.cpp::EvalScript()
if not getattr(signature_for_hash_type_f, "skip_delete", False):
tmp_script = delete_subscript(tmp_script, bin_script([sig_blob]))
signature_hash = signature_for_hash_type_f(signature_type, script=tmp_script)
if ecdsa.verify(ecdsa.generator_secp256k1, public_pair, signature_hash, sig_pair):
stack.append(VCH_TRUE)
else:
if flags & VERIFY_NULLFAIL:
if len(sig_blob) > 0:
raise ScriptError("bad signature not NULL", errno.NULLFAIL)
stack.append(VCH_FALSE)
def sig_blob_matches(sig_blobs, public_pair_blobs, tmp_script, signature_for_hash_type_f,
flags, exit_early=False):
"""
sig_blobs: signature blobs
public_pair_blobs: a list of public pair blobs
tmp_script: the script as of the last code separator
signature_for_hash_type_f: signature_for_hash_type_f
flags: verification flags to apply
exit_early: if True, we may exit early if one of the sig_blobs is incorrect or misplaced. Used
for checking a supposedly validated transaction. A -1 indicates no match.
Returns a list of indices into public_pairs. If exit_early is True, it may return early.
If sig_blob_indices isn't long enough or contains a -1, the signature is not valid.
"""
strict_encoding = not not (flags & VERIFY_STRICTENC)
# Drop the signatures, since there's no way for a signature to sign itself
if not getattr(signature_for_hash_type_f, "skip_delete", False):
for sig_blob in sig_blobs:
tmp_script = delete_subscript(tmp_script, bin_script([sig_blob]))
sig_cache = {}
sig_blob_indices = []
ppb_idx = -1
while sig_blobs and len(sig_blobs) <= len(public_pair_blobs):
if exit_early and -1 in sig_blob_indices:
break
sig_blob, sig_blobs = sig_blobs[0], sig_blobs[1:]
try:
sig_pair, signature_type = parse_signature_blob(sig_blob, flags)
except (der.UnexpectedDER, ValueError):
sig_blob_indices.append(-1)
continue
if signature_type not in sig_cache:
sig_cache[signature_type] = signature_for_hash_type_f(signature_type, script=tmp_script)
try:
ppp = ecdsa.possible_public_pairs_for_signature(
ecdsa.generator_secp256k1, sig_cache[signature_type], sig_pair)
except ecdsa.NoSuchPointError:
ppp = []
while len(sig_blobs) < len(public_pair_blobs):
public_pair_blob, public_pair_blobs = public_pair_blobs[0], public_pair_blobs[1:]
ppb_idx += 1
if strict_encoding:
check_public_key_encoding(public_pair_blob)
if flags & VERIFY_WITNESS_PUBKEYTYPE:
if byte2int(public_pair_blob) not in (2, 3) or len(public_pair_blob) != 33:
raise ScriptError("uncompressed key in witness", errno.WITNESS_PUBKEYTYPE)
try:
public_pair = sec_to_public_pair(public_pair_blob, strict=strict_encoding)
except EncodingError:
public_pair = None
if public_pair in ppp:
sig_blob_indices.append(ppb_idx)
break
else:
sig_blob_indices.append(-1)
return sig_blob_indices
def op_checkmultisig(stack, signature_for_hash_type_f, expected_hash_type, tmp_script, flags):
require_minimal = flags & VERIFY_MINIMALDATA
key_count = int_from_script_bytes(stack.pop(), require_minimal=require_minimal)
if key_count < 0 or key_count > 20:
raise ScriptError("key_count not in range 0 to 20", errno.PUBKEY_COUNT)
public_pair_blobs = [stack.pop() for _ in range(key_count)]
signature_count = int_from_script_bytes(stack.pop(), require_minimal=require_minimal)
if signature_count < 0 or signature_count > key_count:
raise ScriptError(
"invalid number of signatures: %d for %d keys" % (signature_count, key_count), errno.SIG_COUNT)
sig_blobs = [stack.pop() for _ in range(signature_count)]
# check that we have the required hack 00 byte
hack_byte = stack.pop()
if flags & VERIFY_NULLDUMMY and hack_byte != b'':
raise ScriptError("bad dummy byte in checkmultisig", errno.SIG_NULLDUMMY)
sig_blob_indices = sig_blob_matches(
sig_blobs, public_pair_blobs, tmp_script, signature_for_hash_type_f, flags, exit_early=True)
sig_ok = VCH_FALSE
if -1 not in sig_blob_indices and len(sig_blob_indices) == len(sig_blobs):
# bitcoin requires the signatures to be in the same order as the public keys
# so let's make sure the indices are strictly increasing
for i in range(len(sig_blob_indices) - 1):
if sig_blob_indices[i] >= sig_blob_indices[i+1]:
break
else:
sig_ok = VCH_TRUE
if not sig_ok and flags & VERIFY_NULLFAIL:
for sig_blob in sig_blobs:
if len(sig_blob) > 0:
raise ScriptError("bad signature not NULL", errno.NULLFAIL)
stack.append(sig_ok)
return key_count
|
mit
|
mattilyra/scikit-learn
|
sklearn/cluster/affinity_propagation_.py
|
60
|
10688
|
"""Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
bsd-3-clause
|
badp/ganeti
|
test/py/cmdlib/testsupport/__init__.py
|
2
|
1758
|
#
#
# Copyright (C) 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Support classes and functions for testing the cmdlib module.
"""
from cmdlib.testsupport.cmdlib_testcase import CmdlibTestCase, \
withLockedLU
from cmdlib.testsupport.config_mock import ConfigMock
from cmdlib.testsupport.iallocator_mock import patchIAllocator
from cmdlib.testsupport.utils_mock import patchUtils
from cmdlib.testsupport.lock_manager_mock import LockManagerMock
from cmdlib.testsupport.netutils_mock import patchNetutils, HostnameMock
from cmdlib.testsupport.processor_mock import ProcessorMock
from cmdlib.testsupport.rpc_runner_mock import CreateRpcRunnerMock, \
RpcResultsBuilder
from cmdlib.testsupport.ssh_mock import patchSsh
__all__ = ["CmdlibTestCase",
"withLockedLU",
"ConfigMock",
"CreateRpcRunnerMock",
"HostnameMock",
"patchIAllocator",
"patchUtils",
"patchNetutils",
"patchSsh",
"LockManagerMock",
"ProcessorMock",
"RpcResultsBuilder",
]
|
gpl-2.0
|
OlexandrI/pyside
|
paste/lint.py
|
1
|
15002
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .next() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
import re
import sys
from types import DictType, StringType, TupleType, ListType
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def middleware(application, global_conf=None):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will throw an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to throw an exception
at that point).
"""
def lint_app(*args, **kw):
assert len(args) == 2, "Two arguments required"
assert not kw, "No keyword arguments allowed"
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % args)
assert not kw, "No keyword arguments allowed"
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert iterator is not None and iterator != False, (
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper(object):
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert len(args) <= 1
v = self.input.read(*args)
assert type(v) is type("")
return v
def readline(self, *args):
v = self.input.readline(*args)
assert type(v) is type("")
return v
def readlines(self, *args):
assert len(args) <= 1
lines = self.input.readlines(*args)
assert type(lines) is type([])
for line in lines:
assert type(line) is type("")
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert 0, "input.close() must not be called"
class ErrorWrapper(object):
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert type(s) is type("")
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert 0, "errors.close() must not be called"
class WriteWrapper(object):
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert type(s) is type("")
self.writer(s)
class PartialIteratorWrapper(object):
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator)
class IteratorWrapper(object):
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def __next__(self):
assert not self.closed, (
"Iterator read after closed")
v = next(self.iterator)
if self.check_start_response is not None:
assert self.check_start_response, (
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert self.closed, (
"Iterator garbage collected without being closed")
def check_environ(environ):
assert type(environ) is DictType, (
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert key in environ, (
"Environment missing required key: %r" % key)
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert key not in environ, (
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in list(environ.keys()):
if '.' in key:
# Extension, we don't care about its type
continue
assert type(environ[key]) is StringType, (
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert type(environ['wsgi.version']) is TupleType, (
"wsgi.version should be a tuple (%r)" % environ['wsgi.version'])
assert environ['wsgi.url_scheme'] in ('http', 'https'), (
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert (not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/')), (
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert (not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/')), (
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert int(environ['CONTENT_LENGTH']) >= 0, (
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert 'PATH_INFO' in environ, (
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert environ.get('SCRIPT_NAME') != '/', (
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert hasattr(wsgi_input, attr), (
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert hasattr(wsgi_errors, attr), (
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
assert type(status) is StringType, (
"Status must be a string (not %r)" % status)
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert len(status_code) == 3, (
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert status_int >= 100, "Status code is invalid: %r" % status_int
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert type(headers) is ListType, (
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert type(item) is TupleType, (
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert len(item) == 2
name, value = item
assert name.lower() != 'status', (
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert '\n' not in name and ':' not in name, (
"Header names may not contain ':' or '\\n': %r" % name)
assert header_re.search(name), "Bad header name: %r" % name
assert not name.endswith('-') and not name.endswith('_'), (
"Names may not end in '-' or '_': %r" % name)
assert not bad_header_value_re.search(value), (
"Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
NO_MESSAGE_TYPE = (204, 304)
for name, value in headers:
if name.lower() == 'content-type':
if code not in NO_MESSAGE_TYPE:
return
assert 0, (("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert 0, "No Content-Type header found in headers (%s)" % headers
def check_exc_info(exc_info):
assert exc_info is None or type(exc_info) is type(()), (
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a string is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert not isinstance(iterator, str), (
"You should not return a string as your application iterator, "
"instead return a single-item list containing that string.")
def make_middleware(application, global_conf):
# @@: global_conf should be taken out of the middleware function,
# and isolated here
return middleware(application)
make_middleware.__doc__ = __doc__
__all__ = ['middleware', 'make_middleware']
|
lgpl-3.0
|
sadaf2605/django
|
tests/middleware/tests.py
|
2
|
36708
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import random
import re
from io import BytesIO
from unittest import skipIf
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import (
FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound,
HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse,
)
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import (
BrokenLinkEmailsMiddleware, CommonMiddleware,
)
from django.middleware.gzip import GZipMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import quote
@override_settings(ROOT_URLCONF='middleware.urls')
class CommonMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/slash/')
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/noslash')
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/unknown')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring(self):
"""
APPEND_SLASH should preserve querystrings when redirecting.
"""
request = self.rf.get('/slash?test=1')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.url, '/slash/?test=1')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST, PUT, or PATCH to an URL which
would normally be redirected to a slashed version.
"""
msg = "maintaining %s data. Change your form to point to testserver/slash/"
request = self.rf.get('/slash')
request.method = 'POST'
response = HttpResponseNotFound()
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PUT'
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PATCH'
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote('/needsquoting#'))
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self.rf.get('/path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self.rf.get('/slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom URLconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
request.method = 'POST'
response = HttpResponseNotFound()
with self.assertRaisesMessage(RuntimeError, 'end in a slash'):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote('/customurlconf/needsquoting#'))
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/customurlconf/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www_custom_urlconf(self):
request = self.rf.get('/customurlconf/path/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/customurlconf/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/customurlconf/slash/')
# ETag + If-Not-Modified support tests
@override_settings(USE_ETAGS=True)
def test_etag(self):
req = HttpRequest()
res = HttpResponse('content')
self.assertTrue(CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
res['ETag'] = 'tomatoes'
self.assertEqual(CommonMiddleware().process_response(req, res).get('ETag'), 'tomatoes')
@override_settings(USE_ETAGS=True)
def test_no_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
self.assertFalse(CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_if_none_match(self):
first_req = HttpRequest()
first_res = CommonMiddleware().process_response(first_req, HttpResponse('content'))
second_req = HttpRequest()
second_req.method = 'GET'
second_req.META['HTTP_IF_NONE_MATCH'] = first_res['ETag']
second_res = CommonMiddleware().process_response(second_req, HttpResponse('content'))
self.assertEqual(second_res.status_code, 304)
# Tests for the Content-Length header
def test_content_length_header_added(self):
response = HttpResponse('content')
self.assertNotIn('Content-Length', response)
response = CommonMiddleware().process_response(HttpRequest(), response)
self.assertEqual(int(response['Content-Length']), len(response.content))
def test_content_length_header_not_added_for_streaming_response(self):
response = StreamingHttpResponse('content')
self.assertNotIn('Content-Length', response)
response = CommonMiddleware().process_response(HttpRequest(), response)
self.assertNotIn('Content-Length', response)
def test_content_length_header_not_changed(self):
response = HttpResponse()
bad_content_length = len(response.content) + 10
response['Content-Length'] = bad_content_length
response = CommonMiddleware().process_response(HttpRequest(), response)
self.assertEqual(int(response['Content-Length']), bad_content_length)
# Other tests
@override_settings(DISALLOWED_USER_AGENTS=[re.compile(r'foo')])
def test_disallowed_user_agents(self):
request = self.rf.get('/slash')
request.META['HTTP_USER_AGENT'] = 'foo'
with self.assertRaisesMessage(PermissionDenied, 'Forbidden user agent'):
CommonMiddleware().process_request(request)
def test_non_ascii_query_string_does_not_crash(self):
"""Regression test for #15152"""
request = self.rf.get('/slash')
request.META['QUERY_STRING'] = force_str('drink=café')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
def test_response_redirect_class(self):
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponsePermanentRedirect)
def test_response_redirect_class_subclass(self):
class MyCommonMiddleware(CommonMiddleware):
response_redirect_class = HttpResponseRedirect
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = MyCommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponseRedirect)
@override_settings(
IGNORABLE_404_URLS=[re.compile(r'foo')],
MANAGERS=['[email protected]'],
)
class BrokenLinkEmailsMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.req = self.rf.get('/regular_url/that/does/not/exist')
self.resp = self.client.get(self.req.path)
def test_404_error_reporting(self):
self.req.META['HTTP_REFERER'] = '/another/url/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = 'foo_url/that/does/not/exist'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@skipIf(six.PY3, "HTTP_REFERER is str type on Python 3")
def test_404_error_nonascii_referrer(self):
# Such referer strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = b'http://testserver/c/\xd0\xbb\xd0\xb8/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@skipIf(six.PY3, "HTTP_USER_AGENT is str type on Python 3")
def test_404_error_nonascii_user_agent(self):
# Such user agent strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = b'\xd0\xbb\xd0\xb8\xff\xff'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('User agent: \u043b\u0438\ufffd\ufffd\n', mail.outbox[0].body)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (re.compile(r'Spider.*'), re.compile(r'Robot.*'))
def is_ignorable_request(self, request, uri, domain, referer):
'''Check user-agent in addition to normal checks.'''
if super(SubclassedMiddleware, self).is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META['HTTP_USER_AGENT']
return any(pattern.search(user_agent) for pattern in self.ignored_user_agent_patterns)
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = 'Spider machine 3.4'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
self.req.META['HTTP_USER_AGENT'] = 'My user agent'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
def test_referer_equal_to_requested_url(self):
"""
Some bots set the referer to the current URL to avoid being blocked by
an referer check (#25302).
"""
self.req.META['HTTP_REFERER'] = self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
# URL with scheme and domain should also be ignored
self.req.META['HTTP_REFERER'] = 'http://testserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
# URL with a different scheme should be ignored as well because bots
# tend to use http:// in referers even when browsing HTTPS websites.
self.req.META['HTTP_X_PROTO'] = 'https'
self.req.META['SERVER_PORT'] = 443
with self.settings(SECURE_PROXY_SSL_HEADER=('HTTP_X_PROTO', 'https')):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_referer_equal_to_requested_url_on_another_domain(self):
self.req.META['HTTP_REFERER'] = 'http://anotherserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@override_settings(APPEND_SLASH=True)
def test_referer_equal_to_requested_url_without_trailing_slash_when_append_slash_is_set(self):
self.req.path = self.req.path_info = '/regular_url/that/does/not/exist/'
self.req.META['HTTP_REFERER'] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@override_settings(APPEND_SLASH=False)
def test_referer_equal_to_requested_url_without_trailing_slash_when_append_slash_is_unset(self):
self.req.path = self.req.path_info = '/regular_url/that/does/not/exist/'
self.req.META['HTTP_REFERER'] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ROOT_URLCONF='middleware.cond_get_urls')
class ConditionalGetMiddlewareTest(SimpleTestCase):
def setUp(self):
self.req = RequestFactory().get('/')
self.resp = self.client.get(self.req.path_info)
# Tests for the Date header
def test_date_header_added(self):
self.assertNotIn('Date', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Date', self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
# Already set by CommonMiddleware, remove it to check that
# ConditionalGetMiddleware readds it.
del self.resp['Content-Length']
self.assertNotIn('Content-Length', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Content-Length', self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_added(self):
resp = StreamingHttpResponse('content')
self.assertNotIn('Content-Length', resp)
resp = ConditionalGetMiddleware().process_response(self.req, resp)
self.assertNotIn('Content-Length', resp)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_same_etag_with_quotes(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = '"spam"'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_redirect(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_none_match_and_client_error(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_redirect(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_modified_since_and_client_error(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
class XFrameOptionsMiddlewareTest(SimpleTestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
The X_FRAME_OPTIONS setting can be set to SAMEORIGIN to have the
middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='sameorigin'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
The X_FRAME_OPTIONS setting can be set to DENY to have the middleware
use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
with override_settings(X_FRAME_OPTIONS='deny'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
If the X_FRAME_OPTIONS setting is not set then it defaults to
SAMEORIGIN.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
If the X-Frame-Options header is already set then the middleware does
not attempt to override it.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
If the response has an xframe_options_exempt attribute set to False
then it still sets the header, but if it's set to True then it doesn't.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertIsNone(r.get('X-Frame-Options'))
def test_is_extendable(self):
"""
The XFrameOptionsMiddleware method that determines the X-Frame-Options
header value can be overridden based on something in the request or
response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(), response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request, HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(), HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
class GZipMiddlewareTest(SimpleTestCase):
"""
Tests the GZipMiddleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b'a' * 500
incompressible_string = b''.join(six.int2byte(random.randint(0, 255)) for _ in range(500))
sequence = [b'a' * 500, b'b' * 200, b'a' * 300]
sequence_unicode = ['a' * 500, 'é' * 200, 'a' * 300]
def setUp(self):
self.req = RequestFactory().get('/')
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp_unicode = StreamingHttpResponse(self.sequence_unicode)
self.stream_resp_unicode['Content-Type'] = 'text/html; charset=UTF-8'
@staticmethod
def decompress(gzipped_string):
with gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)) as f:
return f.read()
def test_compress_response(self):
"""
Compression is performed on responses with compressible content.
"""
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Compression is performed on responses with streaming content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp)
self.assertEqual(self.decompress(b''.join(r)), b''.join(self.sequence))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_streaming_response_unicode(self):
"""
Compression is performed on responses with streaming Unicode content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp_unicode)
self.assertEqual(
self.decompress(b''.join(r)),
b''.join(x.encode('utf-8') for x in self.sequence_unicode)
)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_file_response(self):
"""
Compression is performed on FileResponse.
"""
with open(__file__, 'rb') as file1:
file_resp = FileResponse(file1)
file_resp['Content-Type'] = 'text/html; charset=UTF-8'
r = GZipMiddleware().process_response(self.req, file_resp)
with open(__file__, 'rb') as file2:
self.assertEqual(self.decompress(b''.join(r)), file2.read())
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertIsNot(r.file_to_stream, file1)
def test_compress_non_200_response(self):
"""
Compression is performed on responses with a status other than 200
(#10762).
"""
self.resp.status_code = 404
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
def test_no_compress_short_response(self):
"""
Compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.short_string)
self.assertIsNone(r.get('Content-Encoding'))
def test_no_compress_compressed_response(self):
"""
Compression isn't performed on responses that are already compressed.
"""
self.resp['Content-Encoding'] = 'deflate'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'deflate')
def test_no_compress_incompressible_response(self):
"""
Compression isn't performed on responses with incompressible content.
"""
self.resp.content = self.incompressible_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.incompressible_string)
self.assertIsNone(r.get('Content-Encoding'))
@override_settings(USE_ETAGS=True)
class ETagGZipMiddlewareTest(SimpleTestCase):
"""
Tests if the ETagMiddleware behaves correctly with GZipMiddleware.
"""
rf = RequestFactory()
compressible_string = b'a' * 500
def test_compress_response(self):
"""
ETag is changed after gzip compression is performed.
"""
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate')
response = GZipMiddleware().process_response(
request,
CommonMiddleware().process_response(request, HttpResponse(self.compressible_string))
)
gzip_etag = response.get('ETag')
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='')
response = GZipMiddleware().process_response(
request,
CommonMiddleware().process_response(request, HttpResponse(self.compressible_string))
)
nogzip_etag = response.get('ETag')
self.assertNotEqual(gzip_etag, nogzip_etag)
|
bsd-3-clause
|
4eek/edx-platform
|
lms/djangoapps/shoppingcart/migrations/0013_auto__add_field_invoice_is_valid.py
|
114
|
13795
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Invoice.is_valid'
db.add_column('shoppingcart_invoice', 'is_valid',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Invoice.is_valid'
db.delete_column('shoppingcart_invoice', 'is_valid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'company_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tax_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
agpl-3.0
|
abegong/textbadger
|
textbadger/tb_app/models.py
|
1
|
17389
|
#from django.db.models import Model, TextField
#from djangotoolbox.fields import ListField, EmbeddedModelField, DictField
from django.contrib.auth.models import User
from django.db import connections
from bson.objectid import ObjectId
from pymongo.errors import InvalidId
import csv, re, json, datetime, random
from collections import defaultdict
import tb_app.kripp as kripp
def uses_mongo(function):
def _inner(*args, **kwargs):
mongo = connections["default"]
return function(mongo, *args, **kwargs)
return _inner
class MongoEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ObjectId):
return str(obj)
if hasattr(obj, 'isoformat'):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
##############################################################################
#This is one way new collections are created
def convert_document_csv_to_bson(csv_text):
C = csv.reader(csv.StringIO(csv_text))
#Parse the header row
H = C.next()
#Capture the url/content column index
url_index, content_index = None, None
if 'url' in H:
url_index = H.index('url')
if 'content' in H:
content_index = H.index('content')
if url_index==None and content_index==None:
raise Exception('You must specify either a "url" column or a "content" column in the .csv header.')
#Identify metadata_fields
meta_fields = {}
for h in H:
if re.match('META_', h):
name = re.sub('^META_', '', h)
index = H.index(h)
if name in meta_fields:
raise Exception('Duplicate META_ name : '+name)
meta_fields[name] = index
# print json.dumps(meta_fields, indent=2)
documents_json = []
#http://lethain.com/handling-very-large-csv-and-xml-files-in-python/
#print csv.field_size_limit()
csv.field_size_limit(1000000)
#For each row in the collection
for row in C:
j = {}
#Grab the content or url
#If both are present, url gets precedence
if url_index != None:
j['url'] = row[url_index]
elif content_index != None:
j['content'] = row[content_index]
#Grab metadata fields
m = {}
for f in meta_fields:
#Don't include missing values
#! Maybe include other missing values here
if meta_fields[f] != '':
m[f] = row[meta_fields[f]]
#Don't include empty metadata objects
if m != {}:
j["metadata"] = m
documents_json.append(j)
# print json.dumps(documents_json, indent=2)
return documents_json
def get_new_collection_json(name, description, documents):
""" Create a new collection, given the name, description, and documents """
J = {
'profile' : {
'name' : name,
'description' : description,
'created_at' : datetime.datetime.now(),
'size' : len(documents),
},
'documents' : documents,
}
return J
@uses_mongo
def create_collection_json(mongo, name, description, collections):
""" Create a new collection using documents from other collections
collections is an array with the form:
[{tb_app_collection.$id : docs to retrieve from this collection}]
"""
coll = mongo.get_collection("tb_app_collection")
documents = []
for id_ in collections:
collection = coll.find_one({"_id": ObjectId(id_)})
doc_count = collections[id_]
doc_list = collection["documents"]
random.shuffle( doc_list )
for doc in doc_list[:doc_count]:
doc["metadata"]["source_id"] = id_
doc["metadata"]["source_name"] = collection["profile"]["name"]
documents += doc_list[:doc_count]
random.shuffle(documents)
return get_new_collection_json(name, description, documents)
def get_default_codebook_questions():
return [
{
"question_type": "Static text",
"var_name": "default_question",
"params": {
"header_text": "<h2> New codebook </h2><p><strong>Use the controls at right to add questions.</strong></p>",
}
},
{
"question_type": "Multiple choice",
"var_name": "mchoice",
"params": {
"header_text": "Here is an example of a multiple choice question. Which answer do you like best?",
"answer_array": ["This one", "No, this one", "A third option"],
}
},
{
"question_type": "Short essay",
"var_name": "essay",
"params": {
"header_text": "Here's a short essay question.",
}
}
]
def create_new_variable_json(question_index, subquestion_index, variable_name, question_header, subquestion_label, variable_type):
return {
'question_index': question_index,
'subquestion_index': subquestion_index,
'variable_name': variable_name,
'question_header': question_header,
'subquestion_label': subquestion_label,
'variable_type': variable_type
}
#! As the code is written, this method is never invoked.
#! Using the variables field would help clean up the code in a bunch of places
#! * reliability checking / csv export / table generation on the batch page
def get_codebook_variables_from_questions(questions):
variables = []
for i,q in enumerate(questions):
if q["var_name"]:
var_name = "_"+q["var_name"]
else:
var_name = ''
short_text = q["params"]["header_text"]
#variable_type = q["params"]["variable_type"]
if q["question_type"] == 'Static text':
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "none") )
if q["question_type"] in ['Multiple choice', 'Two-way scale']:
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "ordinal") )
if q["question_type"] == 'Check all that apply':
for j,a in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, "", "nominal") )
if q["question_type"] in ['Text box', 'Short essay']:
variables.append( create_new_variable_json(i+1, None, "Q"+str(i+1)+var_name, short_text, "", "text") )
elif q["question_type"] == 'Radio matrix':
for j,p in enumerate(q["params"]["question_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "interval") )
elif q["question_type"] == 'Checkbox matrix':
for j,p in enumerate(q["params"]["question_array"]):
for k,r in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+"_"+str(k+1)+var_name, short_text, p, "nominal") )
elif q["question_type"] == 'Two-way matrix':
for j,p in enumerate(q["params"]["left_statements"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p+"/"+q["params"]["right_statements"][j], "ordinal") )
elif q["question_type"] == 'Text matrix':
for j,p in enumerate(q["params"]["answer_array"]):
variables.append( create_new_variable_json(i+1, j+1, "Q"+str(i+1)+"_"+str(j+1)+var_name, short_text, p, "text") )
return variables
def get_new_codebook_json(name, description):
questions = get_default_codebook_questions()
variables = get_codebook_variables_from_questions(questions)
#Construct object
return {
'profile' : {
'name' : name,
'description' : description,
'created_at' : datetime.datetime.now(),
'version' : 1,
'children' : [],
'batches' : [],
'parent' : None,
},
'questions' : questions,
'variables' : variables,
}
def get_revised_codebook_json(parent_codebook, question_json):
#print parent_codebook
J = {
'profile' : {
'description' : parent_codebook['profile']["description"],
'created_at' : datetime.datetime.now(),
'version' : parent_codebook['profile']["version"] + 1,
'children' : [],
'batches' : [],
'parent' : parent_codebook['_id'],#ObjectId(parent_id),
},
'questions' : question_json,
'variables' : get_codebook_variables_from_questions(question_json),
}
if parent_codebook['profile']["children"]:
J['profile']['name'] = parent_codebook['profile']["name"] + " (branch)"
else:
J['profile']['name'] = parent_codebook['profile']["name"]
return J
def gen_codebook_column_names(codebook):
"""codebook should be in json format, hot off a mongodb query"""
col_names = ['created_at']
for i,q in enumerate(codebook["questions"]):
if q["var_name"]:
var_name = "_"+q["var_name"]
else:
var_name = ''
if q["question_type"] in ['Static text', 'Multiple choice', 'Check all that apply', 'Two-way scale', 'Text box', 'Short essay']:
col_names.append("Q"+str(i+1)+var_name)
elif q["question_type"] in ['Radio matrix', 'Checkbox matrix']:
for j,p in enumerate(q["params"]["question_array"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
elif q["question_type"] == 'Two-way matrix':
for j,p in enumerate(q["params"]["left_statements"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
elif q["question_type"] == 'Text matrix':
for j,p in enumerate(q["params"]["answer_array"]):
col_names.append("Q"+str(i+1)+"_"+str(j+1)+var_name)
return col_names
def gen_col_index_from_col_names(col_names):
return dict([(v,k) for (k,v) in enumerate(col_names)])
def gen_csv_column_from_batch_labels(labels, col_index):
csv_col = [None for i in range(len(col_index))]
print labels
for q in labels:
if type(labels[q]) == unicode:
csv_col[col_index[q]] = str(labels[q].encode("utf-8"))
else:
csv_col[col_index[q]] = labels[q]
return csv_col
### Batches ###################################################################
def get_batch_documents_json(coders, pct_overlap, shuffle, collection):
k = len(collection["documents"])
overlap = int((k * pct_overlap) / 100)
import random
doc_ids = range(k)
if shuffle:
# ? This can stay here until we do our DB refactor.
random.shuffle(doc_ids)
shared = doc_ids[:overlap]
unique = doc_ids[overlap:]
#Construct documents object
documents = []
empty_labels = dict([(x, []) for x in coders])
for i in shared:
documents.append({
'index': i,
# 'content': collection["documents"][i]["content"],
'labels': empty_labels
})
for i in unique:
documents.append({
'index': i,
# 'content': collection["documents"][i]["content"],
'labels': { coders[i%len(coders)] : [] }
#Populate the list with a random smattering of fake labels
#'labels': {coders[i % len(coders)]: random.choice([None for x in range(2)] + range(20))}
})
if shuffle:
random.shuffle(documents)
return documents
def get_new_batch_json(count, coders, pct_overlap, shuffle, codebook, collection):
#Construct profile object
profile = {
'name': 'Batch ' + str(count + 1),
'description': collection["profile"]["name"][:20] + " * " + codebook["profile"]["name"][:20] + " (" + str(codebook["profile"]["version"]) + ")",
'index': count + 1,
'codebook_id': codebook['_id'],
'collection_id': collection['_id'],
'coders': coders,
'pct_overlap': pct_overlap,
'shuffle': shuffle,
'created_at': datetime.datetime.now(),
}
documents = get_batch_documents_json(coders, pct_overlap, shuffle, collection)
#Construct batch object
batch = {
'profile' : profile,
'documents': documents,
'reports': {
'progress': {},
'reliability': {},
},
}
return batch
def get_most_recent_answer_set(answer_set_list):
#Get the most recent answer set for this coder (important if the coder used did an "undo")
most_recent_answer_set = {}
most_recent_date = None
for answer_set in answer_set_list:
if not most_recent_date or answer_set["created_at"] > most_recent_date:
most_recent_answer_set = answer_set
most_recent_date = answer_set["created_at"]
return most_recent_answer_set
@uses_mongo
def update_batch_progress(mongo, id_):
#Connect to the DB
coll = mongo.get_collection("tb_app_batch")
#Retrieve the batch
batch = coll.find_one({"_id": ObjectId(id_)})
# print json.dumps(batch, indent=2, cls=MongoEncoder)
#Scaffold the progress object
coders = batch["profile"]["coders"]
progress = {
"coders": dict([(c, {"assigned":0, "complete":0}) for c in coders]),
"summary": {}
}
#Count total and complete document codes
assigned, complete = 0, 0
for doc in batch["documents"]:
for coder in doc["labels"]:
assigned += 1
progress["coders"][coder]["assigned"] += 1
if doc["labels"][coder] != []:
complete += 1
progress["coders"][coder]["complete"] += 1
#Calculate percentages
for coder in progress["coders"]:
c = progress["coders"][coder]
c["percent"] = round(float(100 * c["complete"]) / c["assigned"], 1)
progress["summary"] = {
"assigned": assigned,
"complete": complete,
"percent": round(float(100 * complete) / assigned, 1),
}
batch["reports"]["progress"] = progress
coll.update({"_id": ObjectId(id_)}, batch)
def convert_batch_to_2d_arrays(batch, var_names, missing_val=None):
#2-D arrays wrapped in a dictionary : [question][document][coder]
coder_index = dict([(c,i) for i,c in enumerate(batch["profile"]["coders"])])
#Create empty arrays
#! The "None" here should be zero for CATA variables.
#! But I don't have a good way to detect CATA variables.
#! This code needs a refactor, but now is not the time.
code_arrays = dict([ (n, [[None for c in coder_index] for d in batch["documents"]]) for n in var_names])
for i, doc in enumerate(batch["documents"]):
for coder in doc["labels"]:
answer_set = get_most_recent_answer_set(doc["labels"][coder])
#print answer_set
for question in answer_set:
if question in code_arrays.keys():
try:
#print '\t'.join([str(x) for x in [question, i, coder, answer_set[question]]])
code_arrays[question][i][coder_index[coder]] = float(answer_set[question])
except ValueError:
code_arrays[question][i][coder_index[coder]] = missing_val
return code_arrays
@uses_mongo
def update_batch_reliability(mongo, batch_id):
batch = mongo.get_collection("tb_app_batch").find_one({"_id": ObjectId(batch_id)})
codebook = mongo.get_collection("tb_app_codebook").find_one({"_id": ObjectId(batch["profile"]["codebook_id"])})
variables = codebook["variables"]
var_names = [v["variable_name"] for v in variables]
data_arrays = convert_batch_to_2d_arrays(batch, var_names)
summary = {}
for i, v in enumerate(variables):
# print v
v_name = v["variable_name"]
# print q, '\t', kripp.alpha(data_arrays[q], kripp.interval)
#print v_name, '\t', v["variable_type"]
#Get variable metric
v_type = v["variable_type"]
if v_type == "nominal":
metric = kripp.nominal
elif v_type in ["interval", "ordinal"]:
metric = kripp.interval
elif v_type == "ratio":
metric = kripp.ratio
if metric:
alpha = kripp.alpha(data_arrays[v_name], metric)
try:
alpha_100 = 100*alpha
except TypeError:
alpha_100 = None
summary[v_name] = dict(v.items() + {
'alpha': alpha,
'alpha_100': alpha_100,
}.items())
#Build the reliability object
reliability = {
"updated_at" : datetime.datetime.now(),
#"docs": {},
#"coders": dict([(c, {}) for c in coders]),
"summary": summary,
}
#batch["reports"]["reliability"] = reliability
#print json.dumps(reliability, indent=2, cls=MongoEncoder)
mongo.get_collection("tb_app_batch").update(
{ "_id": ObjectId(batch_id) },
{ "$set": { 'reports.reliability' : reliability}}
)
|
mit
|
bitmazk/django-multilingual-survey
|
runtests.py
|
1
|
1047
|
#!/usr/bin/env python
"""
This script is used to run tests, create a coverage report and output the
statistics at the end of the tox run.
To run this script just execute ``tox``
"""
import re
from fabric.api import local, warn
from fabric.colors import green, red
if __name__ == '__main__':
local('flake8 --ignore=E126 --ignore=W391 --statistics'
' --exclude=submodules,migrations,south_migrations,build .')
local('coverage run --source="multilingual_survey" manage.py test -v 2'
' --traceback --failfast'
' --settings=multilingual_survey.tests.settings'
' --pattern="*_tests.py"')
local('coverage html -d coverage --omit="*__init__*,*/settings/*'
',*/migrations/*,*/south_migrations/*,*/tests/*,*admin*"')
total_line = local('grep -n pc_cov coverage/index.html', capture=True)
percentage = float(re.findall(r'(\d+)%', total_line)[-1])
if percentage < 100:
warn(red('Coverage is {0}%'.format(percentage)))
print(green('Coverage is {0}%'.format(percentage)))
|
mit
|
alfa-addon/addon
|
plugin.video.alfa/channels/blogdepelis.py
|
1
|
6801
|
# -*- coding: utf-8 -*-
# -*- Channel Blog de Pelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
from builtins import range
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
from bs4 import BeautifulSoup
host = 'https://www.blogdepelis.to/'
list_language = list()
list_quality = []
list_servers = ['directo']
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def add_menu_items(item):
logger.info()
itemlist = list()
soup = create_soup(host)
matches = soup.find_all("li", class_="menu-item")
for elem in matches:
url = elem.a["href"]
title = elem.a.text.capitalize()
itemlist.append(Item(channel=item.channel, url=url, title=title, action="list_all"))
return itemlist
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="add_menu_items",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + "?s=",
thumbnail=get_thumb('search', auto=True), page=1))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find_all("article", class_="latestPost")
for elem in matches:
url = elem.a["href"]
thumb = elem.img["src"]
year = scrapertools.find_single_match(elem.a["title"], r"\((\d{4})\)")
title = re.sub(r" \(%s\)" % year, "", elem.a["title"]).capitalize()
action = "findvideos"
if "online" in title.lower() or "películas de" in title.lower():
title = re.sub(r" \(online\)", "", title.lower()).capitalize()
action = "get_from_list"
itemlist.append(Item(channel=item.channel, title=title, url=url, contentTitle=title, action=action,
thumbnail=thumb, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
next_page = soup.find("a", class_="next page-numbers")["href"]
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=next_page, action='list_all'))
except:
pass
return itemlist
def get_from_list(item):
logger.info()
itemlist = list()
soup = create_soup(item.url)
matches = soup.find_all("div", class_="MsoNormal")
for elem in matches:
if not elem.find("a"):
continue
url = elem.a["href"]
year = scrapertools.find_single_match(elem.text, "\d{4}")
title = elem.a.text.capitalize()
itemlist.append(Item(channel=item.channel, title=title, url=url, contentTitle=title, action="findvideos",
infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, True)
return itemlist
def genres(item):
logger.info()
itemlist = list()
soup = create_soup(host+'peliculas')
action = 'list_all'
matches = soup.find("div", id="panel_genres_filter").find_all("a")
for elem in matches:
title = elem.text
url = "%sresults/?cat=%s&genre=%s&p=" % (host, item.cat, title)
itemlist.append(Item(channel=item.channel, title=title, url=url, action=action, section=item.section, page=0))
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
url = create_soup(item.url).find("iframe")["src"]
itemlist.append(Item(channel=item.channel, title='%s', url=url, action="play", server="directo",
language="LAT", infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def play(item):
logger.info()
item.url = item.url.replace('&f=frame', '') # Necesario para ProxyWeb
data = httptools.downloadpage(item.url, headers={"referer": host}).data
url = scrapertools.find_single_match(data, '"file":"([^"]+)","label":".*?"')
item = item.clone(url=url + "|referer=%s" % item.url)
return [item]
def search(item, texto):
logger.info()
try:
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + "category/disney-channel"
item.page=1
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
|
gpl-3.0
|
sudosurootdev/kernel_lge_lgl24
|
tools/perf/scripts/python/syscall-counts-by-pid.py
|
11180
|
1927
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
amb/blender-texture-tools
|
__init__.py
|
1
|
55839
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# Copyright 2019-2021: Tommi Hyppänen
bl_info = {
"name": "Image Edit Operations",
"category": "Paint",
"description": "Various image processing filters and operations",
"author": "Tommi Hyppänen (ambi)",
"location": "Image Editor > Side Panel > Image",
"documentation": "https://blenderartists.org/t/seamless-texture-patching-and-filtering-addon",
"version": (0, 2, 0),
"blender": (2, 93, 0),
}
import bpy # noqa
import functools
import numpy as np
import random
from . import pycl as cl
rnd = random.random
from . import image_ops
import importlib
# from .oklab import linear_to_srgb, srgb_to_linear
importlib.reload(image_ops)
import json
from .cl_abstraction import CLDev
from . import toml_loader
importlib.reload(toml_loader)
cl_load = toml_loader.load
cl_builder = CLDev(0)
cl_nodes = cl_load(cl_builder)
def grayscale(ssp):
out = cl_builder.new_image(ssp.shape[1], ssp.shape[0])
cl_nodes["grayscale"].run([], [cl_builder.new_image_from_ndarray(ssp)], [out])
return out.to_numpy()
def rgb_to_luminance(c):
r = c[..., 0]
g = c[..., 1]
b = c[..., 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
@functools.lru_cache(maxsize=128)
def gauss_curve(x):
# gaussian with 0.01831 at last
res = np.array([np.exp(-((i * (2 / x)) ** 2)) for i in range(-x, x + 1)], dtype=np.float32)
res /= np.sum(res)
return res
def gaussian_repeat_cl(img, out, s):
# TODO: store local pass & barrier(CLK_LOCAL_MEM_FENCE);
cl_nodes["gaussian_h"].run([s], [img], [out])
cl_nodes["gaussian_v"].run([s], [out], [img])
return (img, out)
def gaussian_repeat(pix, s):
"Separated gaussian for image. Over borders = wraparound"
assert pix.dtype == np.float32
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
gaussian_repeat_cl(img, out, s)
return img.to_numpy()
def bilateral_cl(pix, radius, preserve):
"Bilateral filter, OpenCL implementation"
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
cl_nodes["bilateral"].run([radius, preserve], [img], [out])
return out.to_numpy()
def image_gradient_cl(img, out):
src = """
#define READP(x,y) read_imagef(input, sampler, (int2)(x, y))
kernel void image_flow(
__read_only image2d_t input,
__write_only image2d_t output
)
{
int x = get_global_id(0);
int y = get_global_id(1);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
float4 pix = read_imagef(input, sampler, (int2)(x, y));
float x_comp = READP(x-1, y).x
+READP(x-1, y+1).x
+READP(x-1, y-1).x
- READP(x+1, y).x
- READP(x+1, y+1).x
- READP(x+1, y-1).x;
float y_comp = READP(x, y-1).x
+ READP(x+1, y-1).x
+ READP(x-1, y-1).x
- READP(x, y+1).x
- READP(x+1, y+1).x
- READP(x-1, y+1).x;
float2 grad = (float2)(x_comp, y_comp);
float l = length(grad);
//grad = l > 0.0f ? grad/l : (float2)(0.0f, 0.0f);
// from pythagoras
float height;
height = l < 1.0f ? sqrt(1.0f - l*l) : 0.0f;
float4 out = (float4)(x_comp, y_comp, height, l);
write_imagef(output, (int2)(x,y), out);
}
"""
blr = cl_builder.build("image_flow", src, (cl.cl_image, cl.cl_image))
(out, img) = grayscale_cl(img, out)
cl_builder.run(blr, [], (out,), img)
return (img, out)
def directional_blur_cl(pix, radius, preserve):
"Directional bilateral filter, OpenCL implementation"
original = np.copy(pix)
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
(grad, l0) = image_gradient_cl(img, out)
(grad, l0) = gaussian_repeat_cl(grad, l0, 2)
src = """
#define POW2(a) ((a) * (a))
#define F4_ABS(v) ((float4)(fabs(v.x), fabs(v.y), fabs(v.z), 1.0f))
kernel void guided_bilateral(
const float radius,
const float preserve,
__read_only image2d_t gradient,
__read_only image2d_t input,
__write_only image2d_t output
)
{
int gidx = get_global_id(0);
int gidy = get_global_id(1);
float2 gvec = (float2)(gidx, gidy);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
const sampler_t sampler_f = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_LINEAR;
int n_radius = ceil(radius);
float4 center_pix = read_imagef(input, sampler, (int2)(gidx, gidy));
float4 grad = read_imagef(gradient, sampler, (int2)(gidx, gidy));
float4 acc_A = 0.0f;
float4 acc_B = 0.0f;
float4 tempf = 0.0f;
float count = 0.0f;
float diff_map, gaussian_weight, weight;
float dx = grad.x;
float dy = grad.y;
// along tangent flow
float2 v_vec = (float2)(-dy, dx);
// against tangent flow
float2 u_vec = (float2)(dx, dy);
weight = 1.0f;
for (float v = -n_radius; v <= n_radius; v=v+1.0f) {
float2 loc = gvec + (v_vec * v) + (float2)(0.5f, 0.5f);
tempf = read_imagef(input, sampler_f, loc);
diff_map = exp (
- ( POW2(center_pix.x - tempf.x)
+ POW2(center_pix.y - tempf.y)
+ POW2(center_pix.z - tempf.z))
* preserve);
gaussian_weight = exp(-0.5f * (POW2(v)) / radius);
weight = diff_map * gaussian_weight;
// weight = gaussian_weight;
// weight = 1.0;
acc_A += tempf * weight;
count += weight;
}
float4 res = acc_A/fabs(count);
res.w = 1.0f;
write_imagef(output, (int2)(gidx,gidy), res);
//write_imagef(output, (int2)(gidx,gidy), F4_ABS(res));
}
"""
blr = cl_builder.build(
"guided_bilateral", src, (cl.cl_float, cl.cl_float, cl.cl_image, cl.cl_image, cl.cl_image)
)
l1 = cl_builder.new_image_from_ndarray(original)
cl_builder.run(blr, [radius, preserve], (grad, l1), l0)
for _ in range(8):
cl_builder.run(blr, [radius, preserve], (grad, l0), l1)
cl_builder.run(blr, [radius, preserve], (grad, l1), l0)
return l0.to_numpy()
def median_filter(pix, radius):
src = f"""
#define RADIUS {radius}
#define READP(x,y) read_imagef(input, sampler, (int2)(x, y))
kernel void wirth_median_{radius}(
const int width,
const int height,
__read_only image2d_t input,
__write_only image2d_t output)
{{
const int x = get_global_id(0);
const int y = get_global_id(1);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
float rcol[4] = {{0.0, 0.0, 0.0, 1.0}};
float a[RADIUS][RADIUS*RADIUS];
for (int m = 0; m < RADIUS; m++) {{
for (int n = 0; n < RADIUS; n++) {{
float4 ta = READP(x + n - (RADIUS / 2), y + m - (RADIUS / 2));
a[0][n+RADIUS*m] = ta.x;
a[1][n+RADIUS*m] = ta.y;
a[2][n+RADIUS*m] = ta.z;
}}
}}
// Wirth median
for (int z=0; z<RADIUS; z++) {{
int k = (RADIUS*RADIUS)/2;
int n = (RADIUS*RADIUS);
int i,j,l,m;
float val;
l=0;
m=n-1;
while (l < m) {{
val = a[z][k];
i=l;
j=m;
do {{
while (a[z][i] < val) i++;
while (val < a[z][j]) j--;
if (i<=j) {{
float tmp = a[z][i];
a[z][i] = a[z][j];
a[z][j] = tmp;
i++; j--;
}}
}} while (i <= j);
if (j < k) l=i;
if (k < i) m=j;
}}
rcol[z] = a[z][k];
}}
write_imagef(output, (int2)(x, y), (float4)(rcol[0], rcol[1], rcol[2], 1.0f));
}}"""
k = cl_builder.build(
"wirth_median_" + repr(radius), src, (cl.cl_int, cl.cl_int, cl.cl_image, cl.cl_image)
)
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
cl_builder.run(k, [], [img.data], [out.data], shape=(img.height, img.width))
return out.to_numpy()
def vectors_to_nmap(vectors):
nmap = np.empty((vectors.shape[0], vectors.shape[1], 4), dtype=np.float32)
vectors *= 0.5
nmap[:, :, 0] = vectors[:, :, 0] + 0.5
nmap[:, :, 1] = vectors[:, :, 1] + 0.5
nmap[:, :, 2] = vectors[:, :, 2] + 0.5
nmap[..., 3] = 1.0
return nmap
def nmap_to_vectors(nmap):
vectors = np.empty((nmap.shape[0], nmap.shape[1], 4), dtype=np.float32)
vectors[..., 0] = nmap[..., 0] - 0.5
vectors[..., 1] = nmap[..., 1] - 0.5
vectors[..., 2] = nmap[..., 2] - 0.5
vectors *= 2.0
vectors[..., 3] = 1.0
return vectors
def normalize(pix, save_alpha=False):
# TODO: HSL or Lab lightness normalization, maintain chroma
if save_alpha:
A = pix[..., 3]
t = pix - np.min(pix)
t = t / np.max(t)
if save_alpha:
t[..., 3] = A
return t
def sharpen(pix, width, intensity):
A = pix[..., 3]
gas = gaussian_repeat(pix, width)
pix += (pix - gas) * intensity
pix[..., 3] = A
return pix
def hi_pass(pix, s):
bg = pix.copy()
pix = (bg - gaussian_repeat(pix, s)) * 0.5 + 0.5
pix[:, :, 3] = bg[:, :, 3]
return pix
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def gaussianize(source, NG=1000):
"Make histogram into gaussian, save transform"
oldshape = source.shape
output = source.copy()
transforms = []
t_values = np.arange(NG * 8 + 1) / (NG * 8)
t_counts = gauss_curve(NG * 4)
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_max = 0.0
for i in range(3):
# s_values, bin_idx, s_counts = np.lib.arraysetops.unique(
s_values, bin_idx, s_counts = np.unique(
source[..., i].ravel(), return_inverse=True, return_counts=True
)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
s_max = s_quantiles[-1]
if s_max > t_max:
t_max = s_max
transforms.append([s_values, s_quantiles, s_max])
tv = np.interp(s_quantiles, t_quantiles, t_values)[bin_idx]
output[..., i] = tv.reshape(oldshape[:2])
return output, transforms
def degaussianize(source, transforms):
"Make a Gaussianized histogram back to the original using the transform"
oldshape = source.shape
output = source.copy()
for i in range(3):
s_values, bin_idx, s_counts = np.unique(
output[..., i].ravel(), return_inverse=True, return_counts=True
)
t_values, t_quantiles, _ = transforms[i]
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
tv = np.interp(s_quantiles, t_quantiles, t_values)[bin_idx]
output[..., i] = tv.reshape(oldshape[:2])
return output
def hi_pass_balance(pix, s, zoom, scalers, into_lch=True):
from .oklab import LCh_to_srgb, srgb_to_LCh
if scalers == None or scalers == []:
scalers = [1.0, 1.0, 1.0]
assert len(scalers) == 3
assert type(scalers[0]) == float
# separate hue, saturation, value
if into_lch:
pix = srgb_to_LCh(pix)
# save original
bg = pix.copy()
# limit middle sampler max dimensions to the image max dimensions
yzm = pix.shape[0] // 2
xzm = pix.shape[1] // 2
yzoom = zoom if zoom < yzm else yzm
xzoom = zoom if zoom < xzm else xzm
# middle value = (low + high) / 2
pixmin = np.min(pix)
pixmax = np.max(pix)
med = (pixmin + pixmax) / 2
# high pass
# TODO: np.mean
gas = gaussian_repeat(pix - med, s) + med
pix = (pix - gas) * 0.5 + 0.5
# use the middle sampler to normalize histogram
for c in range(3):
pix[..., c] = hist_match(
pix[..., c], bg[yzm - yzoom : yzm + yzoom, xzm - xzoom : xzm + xzoom, c]
)
# apply scalers
for c in range(3):
assert scalers[c] >= 0.0 and scalers[c] <= 1.0
pix[..., c] = pix[..., c] * scalers[c] + bg[..., c] * (1.0 - scalers[c])
pix[..., 3] = bg[..., 3]
if into_lch:
pix = LCh_to_srgb(pix)
return pix
def hgram_equalize(pix, intensity, atest):
old = pix.copy()
# aw = np.argwhere(pix[..., 3] > atest)
aw = (pix[..., 3] > atest).nonzero()
aws = (aw[0], aw[1])
# aws = (aw[:, 0], aw[:, 1])
for c in range(3):
t = pix[..., c][aws]
pix[..., c][aws] = np.sort(t).searchsorted(t)
# pix[..., c][aws] = np.argsort(t)
pix[..., :3] /= np.max(pix[..., :3])
return old * (1.0 - intensity) + pix * intensity
def normals_simple(pix, source):
pix = grayscale(pix)
pix = normalize(pix)
steepness = 1.0
# TODO: better vector calc, not just side pixels
src = """
#define READP(x,y) read_imagef(input, sampler, (int2)(x, y))
kernel void height_to_normals(
const int width,
const int height,
const float steepness,
__read_only image2d_t input,
__write_only image2d_t output
)
{
int x = get_global_id(0);
int y = get_global_id(1);
const sampler_t sampler = \
CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP_TO_EDGE |
CLK_FILTER_NEAREST;
float4 pix = read_imagef(input, sampler, (int2)(x, y));
// sobel operator
float x_comp = READP(x-1, y).x
+READP(x-1, y+1).x
+READP(x-1, y-1).x
- READP(x+1, y).x
- READP(x+1, y+1).x
- READP(x+1, y-1).x;
float y_comp = READP(x, y-1).x
+ READP(x+1, y-1).x
+ READP(x-1, y-1).x
- READP(x, y+1).x
- READP(x+1, y+1).x
- READP(x-1, y+1).x;
float2 grad = (float2)(x_comp, y_comp);
float l = length(grad);
grad /= l;
// from pythagoras
float hg;
hg = l < 1.0f ? sqrt(1.0f - l*l) : 0.0f;
float4 out = (float4)(x_comp*0.5 + 0.5, y_comp*0.5 + 0.5, hg*0.5 + 0.5, 1.0f);
write_imagef(output, (int2)(x,y), out);
}
"""
blr = cl_builder.build(
"height_to_normals", src, (cl.cl_int, cl.cl_int, cl.cl_float, cl.cl_image, cl.cl_image)
)
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
assert steepness != 0.0
cl_builder.run(blr, [steepness], [img.data], [out.data], shape=img.shape)
return out.to_numpy()
def normals_to_curvature(pix):
intensity = 1.0
curve = np.zeros((pix.shape[0], pix.shape[1]), dtype=np.float32)
vectors = nmap_to_vectors(pix)
# y_vec = np.array([1, 0, 0], dtype=np.float32)
# x_vec = np.array([0, 1, 0], dtype=np.float32)
# yd = vectors.dot(x_vec)
# xd = vectors.dot(y_vec)
xd = vectors[:, :, 0]
yd = vectors[:, :, 1]
# curve[0,0] = yd[1,0]
curve[:-1, :] += yd[1:, :]
curve[-1, :] += yd[0, :]
# curve[0,0] = yd[-1,0]
curve[1:, :] -= yd[:-1, :]
curve[0, :] -= yd[-1, :]
# curve[0,0] = xd[1,0]
curve[:, :-1] += xd[:, 1:]
curve[:, -1] += xd[:, 0]
# curve[0,0] = xd[-1,0]
curve[:, 1:] -= xd[:, :-1]
curve[:, 0] -= xd[:, -1]
# normalize
dv = max(abs(np.min(curve)), abs(np.max(curve)))
curve /= dv
# 0 = 0.5 grey
curve = curve * intensity + 0.5
pix[..., 0] = curve
pix[..., 1] = curve
pix[..., 2] = curve
return pix
def gauss_seidel_cl(w, h, h2, target, inp, outp):
# TODO: fix name
src = """
__kernel void curvature_to_height(
const int i_width,
const int i_height,
const float step,
__global const float *input,
__global const float *target,
__global float *output
)
{
int x = get_global_id(0);
int y = get_global_id(1);
int loc = x + y * i_width;
float t = 0.0f;
t += x > 0 ? input[loc-1] : input[loc+(i_width-1)];
t += y > 0 ? input[loc-i_width] : input[loc+(i_height-1)*i_width];
t += x < i_width-1 ? input[loc+1] : input[loc-(i_width-1)];
t += y < i_height-1 ? input[loc+i_width] : input[loc-(i_height-1)*i_width];
t *= 0.25;
t -= step * target[loc];
output[loc] = t;
}
"""
cth = cl_builder.build(
"curvature_to_height",
src,
(cl.cl_int, cl.cl_int, cl.cl_float, cl.cl_mem, cl.cl_mem, cl.cl_mem),
)
assert w % 8 == 0, "Image width must be divisible by 8"
assert h % 8 == 0, "Image height must be divisible by 8"
# cl_builder.run_buffer(cth, [w, h, h2, inp, target], outp, shape=(h, w))
# kernel, params, inputs, outputs
cl_builder.run(cth, [h2], [inp, target], [outp], shape=(h, w))
def curvature_to_height(image, h2, iterations=2000):
target = image[..., 0]
# TODO: from grayscale, not just 1 component
w, h = target.shape[1], target.shape[0]
f = cl_builder.to_buffer(target)
ping = cl_builder.to_buffer(np.ones_like(target) * 0.5)
pong = cl_builder.to_buffer(np.zeros_like(target))
for ic in range(iterations):
gauss_seidel_cl(w, h, h2, f, ping, pong)
gauss_seidel_cl(w, h, h2, f, pong, ping)
res_v, evt = cl.buffer_to_ndarray(cl_builder.queue, ping, like=image[..., 0])
evt.wait()
u = res_v
u = -u
u -= np.min(u)
u /= np.max(u)
return np.dstack([u, u, u, image[..., 3]])
def normals_to_height(image, iterations=2000, intensity=1.0, step=1.0):
vectors = nmap_to_vectors(image)
vectors *= intensity
target = np.roll(vectors[..., 0], 1, axis=1)
target -= np.roll(vectors[..., 0], -1, axis=1)
target += np.roll(vectors[..., 1], 1, axis=0)
target -= np.roll(vectors[..., 1], -1, axis=0)
target *= 0.125
w, h = target.shape[1], target.shape[0]
f = cl_builder.to_buffer(target)
ping = cl_builder.to_buffer(np.ones_like(target) * 0.5)
pong = cl_builder.to_buffer(np.zeros_like(target))
for ic in range(iterations):
gauss_seidel_cl(w, h, step, f, ping, pong)
gauss_seidel_cl(w, h, step, f, pong, ping)
res_v, evt = cl.buffer_to_ndarray(cl_builder.queue, ping, like=image[..., 0])
evt.wait()
u = res_v
u -= np.min(u)
u /= np.max(u)
return np.dstack([u, u, u, image[..., 3]])
def fill_alpha(image, style="black"):
if style == "black":
for c in range(3):
image[..., c] *= image[..., 3]
image[..., 3] = 1.0
return image
else:
cols = [0.5, 0.5, 1.0]
A = image[..., 3]
for c in range(3):
image[..., c] = cols[c] * (1 - A) + image[..., c] * A
image[..., 3] = 1.0
return image
def dog(pix, a, b, threshold):
"Difference of Gaussians with a threshold"
size = max(a, b)
gpix = grayscale(pix)
res = (gaussian_repeat(gpix, a) - gaussian_repeat(gpix, b))[..., :3]
tt = threshold / size
# Xdog Winnemöller et al
pix[..., :3] = np.where(tt >= res, 1.0, 1.0 + np.tanh(40.0 * (tt - res)))
return pix
def gimpify(image):
pixels = np.copy(image)
xs, ys = image.shape[1], image.shape[0]
image = np.roll(image, xs * 2 + xs * 4 * (ys // 2))
sxs = xs // 2
sys = ys // 2
# generate the mask
mask_pix = []
for y in range(0, sys):
zy0 = y / sys + 0.001
zy1 = 1 - y / sys + 0.001
for x in range(0, sxs):
xp = x / sxs
p = 1.0 - zy0 / (1.0 - xp + 0.001)
t = 1.0 - xp / zy1
mask_pix.append(t if t > p else p)
# imask[y, x] = max(, imask[y, x])
tmask = np.array(mask_pix, dtype=np.float32)
tmask = tmask.reshape((sys, sxs))
imask = np.zeros((pixels.shape[0], pixels.shape[1]), dtype=np.float32)
imask[:sys, :sxs] = tmask
imask[imask < 0] = 0
# copy the data into the three remaining corners
imask[0 : sys + 1, sxs:xs] = np.fliplr(imask[0 : sys + 1, 0:sxs])
imask[-sys:ys, 0:sxs] = np.flipud(imask[0:sys, 0:sxs])
imask[-sys:ys, sxs:xs] = np.flipud(imask[0:sys, sxs:xs])
imask[sys, :] = imask[sys - 1, :] # center line
# apply mask
amask = np.empty(pixels.shape, dtype=float)
amask[:, :, 0] = imask
amask[:, :, 1] = imask
amask[:, :, 2] = imask
amask[:, :, 3] = imask
return amask * image + (1.0 - amask) * pixels
def inpaint_tangents(pixels, threshold):
# invalid = pixels[:, :, 2] < 0.5 + (self.tolerance * 0.5)
invalid = pixels[:, :, 2] < threshold
# n2 = (
# ((pixels[:, :, 0] - 0.5) * 2) ** 2
# + ((pixels[:, :, 1] - 0.5) * 2) ** 2
# + ((pixels[:, :, 2] - 0.5) * 2) ** 2
# )
# invalid |= (n2 < 0.9) | (n2 > 1.1)
# grow selection
for _ in range(2):
invalid[0, :] = False
invalid[-1, :] = False
invalid[:, 0] = False
invalid[:, -1] = False
invalid = (
np.roll(invalid, 1, axis=0)
| np.roll(invalid, -1, axis=0)
| np.roll(invalid, 1, axis=1)
| np.roll(invalid, -1, axis=1)
)
pixels[invalid] = np.array([0.5, 0.5, 1.0, 1.0])
invalid[0, :] = False
invalid[-1, :] = False
invalid[:, 0] = False
invalid[:, -1] = False
# fill
front = np.copy(invalid)
locs = [(0, -1, 1), (0, 1, -1), (1, -1, 1), (1, 1, -1)]
for i in range(4):
print("fill step:", i)
for l in locs:
r = np.roll(front, l[1], axis=l[0])
a = (r != front) & front
pixels[a] = pixels[np.roll(a, l[2], axis=l[0])]
front[a] = False
cl = np.roll(invalid, -1, axis=0)
cr = np.roll(invalid, 1, axis=0)
uc = np.roll(invalid, -1, axis=1)
bc = np.roll(invalid, 1, axis=1)
# smooth
for i in range(4):
print("smooth step:", i)
pixels[invalid] = (pixels[invalid] + pixels[cl] + pixels[cr] + pixels[uc] + pixels[bc]) / 5
return pixels
def normalize_tangents(image):
vectors = nmap_to_vectors(image)[..., :3]
vectors = (vectors.T / np.linalg.norm(vectors, axis=2).T).T
retarr = vectors_to_nmap(vectors)
return retarr
def texture_to_normals(image, high, mid, low):
# imgg = gaussian_repeat(image, 4)
g = grayscale(image)
b = curvature_to_height(g, 0.5, iterations=100)
c = curvature_to_height(g, 0.5, iterations=1000)
d = normals_simple(g * high + b * mid + c * low, "Luminance")
d = normals_to_height(d, iterations=500, step=0.5)
d = normals_simple(d, "Luminance")
# d = srgb_to_linear(d)
return d
def knife_seamless(image, v_margin, h_margin, step, m_constraint, smooth, weights):
def diffblocks(a, b, constrain_middle, wg):
l = len(a)
if constrain_middle >= 0.0 and constrain_middle <= 15.0:
penalty = np.abs(((np.arange(l) - (l - 1) * 0.5) * 2.0 / (l - 1))) ** (
constrain_middle + 1.0
)
else:
penalty = 0.0
# assert np.all(penalty) >= 0.0
# assert np.all(penalty) <= 1.0
diff = np.abs(a - b)
# diff = a
# normalize
# diff += np.min(diff)
# diffm = np.max(diff)
# if diffm > 0.0:
# diff /= diffm
return (
diff[..., 0] * weights[0]
+ diff[..., 1] * weights[1]
+ diff[..., 2] * weights[2]
+ penalty
)
def findmin(ar, loc, step):
minloc = loc
lar = len(ar)
for x in range(-step, step + 1):
if loc + x >= 0 and loc + x < lar and ar[loc + x] < ar[minloc]:
minloc = loc + x
return minloc
def copy_to_v(image, img_orig, sr, rv, y):
# sr = stripe width / 2, y = stripe location, rv = cut location
w = image.shape[1]
hw = w // 2
L2 = 8
L = L2 * 2
image[y, hw - sr : hw - sr + rv, :] = img_orig[y, -2 * sr : -2 * sr + rv, :]
image[y, hw - sr + rv : hw + sr, :] = img_orig[y, rv : sr * 2, :]
la = hw - sr + rv
lb = rv
# blending between the two border images
for i in range(L):
l = i - L2
d = i / (L - 1)
lval = img_orig[y, -2 * sr + rv + l, :]
rval = img_orig[y, lb + l, :]
# blend more of the selection which has higher lightness
d = d + (rval[..., 0] * d - lval[..., 0] * (1.0 - d)) * 2.0
if d < 0.0:
d = 0.0
if d > 1.0:
d = 1.0
image[y, la + l, :] = lval * (1.0 - d) + rval * d
def copy_to_h(image, img_orig, sr, rv, x):
h = image.shape[0]
hh = h // 2
image[hh - sr : hh - sr + rv, x, :] = img_orig[h - 2 * sr : h - 2 * sr + rv, x, :]
r2 = sr * 2 - rv
image[hh + sr - r2 : hh + sr, x, :] = img_orig[sr * 2 - r2 : sr * 2, x, :]
h, w = image.shape[0], image.shape[1]
# if self.square:
# max_space = min(h, w)
# h_margin += w - max_space
# v_margin += h - max_space
# new_width = w
# new_height = h
# Make sure result is divisible by 8
v_margin += -((h + v_margin) % 16)
h_margin += -((w + h_margin) % 16)
v_margin //= 2
h_margin //= 2
# -- vertical cut
if smooth > 0:
smoothed = gaussian_repeat(image, smooth)
else:
smoothed = image.copy()
img_orig = image.copy()
hw = w // 2
# right on left
image[:, : hw + h_margin, :] = img_orig[:, hw - h_margin :, :]
# left on right
image[:, hw - h_margin :, :] = img_orig[:, : hw + h_margin, :]
abr = diffblocks(
smoothed[0, -(2 * h_margin) :, :], smoothed[0, : h_margin * 2, :], m_constraint, weights
)
rv = np.argmin(abr)
for y in range(h):
abr = diffblocks(
smoothed[y, -(2 * h_margin) :, :], smoothed[y, : h_margin * 2, :], m_constraint, weights
)
rv = findmin(abr, rv, step)
copy_to_v(image, img_orig, h_margin, rv, y)
# -- horizontal cut
if smooth > 0:
smoothed = gaussian_repeat(image, smooth)
else:
smoothed = image.copy()
img_orig = image.copy()
hw = h // 2
image[: hw + v_margin, ...] = img_orig[hw - v_margin :, ...]
image[hw - v_margin :, ...] = img_orig[: hw + v_margin, ...]
abr = diffblocks(
smoothed[-(2 * v_margin) :, 0, :], smoothed[: v_margin * 2, 0, :], m_constraint, weights
)
rv = np.argmin(abr)
for x in range(w):
abr = diffblocks(
smoothed[-(2 * v_margin) :, x, :], smoothed[: v_margin * 2, x, :], m_constraint, weights
)
rv = findmin(abr, rv, step)
copy_to_h(image, img_orig, v_margin, rv, x)
print(image.shape, img_orig.shape, v_margin, h_margin)
return image[v_margin:-v_margin, h_margin:-h_margin]
def crop_to_square(image):
h, w = image.shape[0], image.shape[1]
offx = w // 2
offy = h // 2
if h > w:
h = w
if w > h:
w = h
# make compatible with CL calcs
w = w - (w % 8)
h = h - (h % 8)
xt = w // 2
yt = w // 2
# crop to center
image = image[offy - yt : offy + yt, offx - xt : offx + xt]
return image
class Grayscale_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "grayscale"
self.info = "Grayscale from RGB"
self.category = "Basic"
self.payload = lambda self, image, context: grayscale(image)
class Random_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "random"
self.info = "Random RGB pixels"
self.category = "Basic"
def _pl(self, image, context):
t = np.random.random(image.shape)
t[..., 3] = 1.0
return t
self.payload = _pl
class Swizzle_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["order_a"] = bpy.props.StringProperty(name="Order A", default="RGBA")
self.props["order_b"] = bpy.props.StringProperty(name="Order B", default="RBGa")
self.props["direction"] = bpy.props.EnumProperty(
name="Direction", items=[("ATOB", "A to B", "", 1), ("BTOA", "B to A", "", 2)]
)
self.prefix = "swizzle"
self.info = "Channel swizzle"
self.category = "Basic"
def _pl(self, image, context):
test_a = self.order_a.upper()
test_b = self.order_b.upper()
if len(test_a) != 4 or len(test_b) != 4:
self.report({"INFO"}, "Swizzle channel count must be 4")
return image
if set(test_a) != set(test_b):
self.report({"INFO"}, "Swizzle channels must have same names")
return image
first = self.order_a
second = self.order_b
if self.direction == "BTOA":
first, second = second, first
temp = image.copy()
for i in range(4):
fl = first[i].upper()
t = second.upper().index(fl)
if second[t] != first[i]:
temp[..., t] = 1.0 - image[..., i]
else:
temp[..., t] = image[..., i]
return temp
self.payload = _pl
class Normalize_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "normalize"
self.info = "Normalize"
self.category = "Basic"
def _pl(self, image, context):
tmp = image[..., 3]
res = normalize(image)
res[..., 3] = tmp
return res
self.payload = _pl
class CropToP2_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "crop_to_power"
self.info = "Crops the middle of the image to power of twos"
self.category = "Dimensions"
def _pl(self, image, context):
h, w = image.shape[0], image.shape[1]
offx = 0
offy = 0
wpow = int(np.log2(w))
hpow = int(np.log2(h))
offx = (w - 2 ** wpow) // 2
offy = (h - 2 ** hpow) // 2
if w > 2 ** wpow:
w = 2 ** wpow
if h > 2 ** hpow:
h = 2 ** hpow
# crop to center
image = image[offy : offy + h, offx : offx + w]
return image
self.payload = _pl
class CropToSquare_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "crop_to_square"
self.info = "Crop the middle to square with two divisible height and width"
self.category = "Dimensions"
def _pl(self, image, context):
return crop_to_square(image)
self.payload = _pl
class Sharpen_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=2, default=5)
self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "sharpen"
self.info = "Simple sharpen"
self.category = "Filter"
self.payload = lambda self, image, context: sharpen(image, self.width, self.intensity)
class DoG_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width_a"] = bpy.props.IntProperty(name="Width A", min=2, default=5)
self.props["width_b"] = bpy.props.IntProperty(name="Width B", min=2, default=4)
self.props["threshold"] = bpy.props.FloatProperty(
name="Threshold", min=0.0, max=1.0, default=0.01
)
self.props["preserve"] = bpy.props.BoolProperty(name="Preserve", default=True)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "dog"
self.info = "DoG"
self.category = "Advanced"
def _pl(self, image, context):
t = image.copy()
d = dog(image, self.width_a, self.width_b, self.threshold)
if self.preserve:
return t * d
else:
return d
self.payload = _pl
class TextureToNormals_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["high_freq"] = bpy.props.FloatProperty(
name="High frequency", min=0.0, max=1.0, default=0.1
)
self.props["mid_freq"] = bpy.props.FloatProperty(
name="Mid frequency", min=0.0, max=1.0, default=0.2
)
self.props["low_freq"] = bpy.props.FloatProperty(
name="Low frequency", min=0.0, max=1.0, default=0.7
)
self.prefix = "texture_to_normals"
self.info = "Texture to Normals"
self.category = "Advanced"
def _pl(self, image, context):
# # imgg = gaussian_repeat(image, 4)
# g = grayscale(image)
# b = curvature_to_height(g, 0.5, iterations=100)
# c = curvature_to_height(g, 0.5, iterations=1000)
# d = normals_simple(
# g * self.high_freq + b * self.mid_freq + c * self.low_freq, "Luminance"
# )
# d = normals_to_height(d, iterations=500, step=0.5)
# d = normals_simple(d, "Luminance")
return texture_to_normals(image, self.high_freq, self.mid_freq, self.low_freq)
self.payload = _pl
class FillAlpha_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["style"] = bpy.props.EnumProperty(
name="Style",
items=[("black", "Black color", "", 1), ("tangent", "Neutral tangent", "", 2)],
)
self.prefix = "fill_alpha"
self.info = "Fill alpha with color or normal"
self.category = "Basic"
self.payload = lambda self, image, context: fill_alpha(image, style=self.style)
class GaussianBlur_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=1, default=20)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "gaussian_blur"
self.info = "Does a Gaussian blur"
self.category = "Filter"
self.payload = lambda self, image, context: gaussian_repeat(image, self.width)
class Median_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["width"] = bpy.props.IntProperty(name="Width", min=3, max=9, default=3)
self.props["width"] = bpy.props.EnumProperty(
name="Width",
items=[
("3", "3", "", 3),
("5", "5", "", 5),
("9", "9", "", 9),
("15", "15 (crash your computer)", "", 15),
],
default="5",
)
self.prefix = "median_filter"
self.info = "Median filter"
self.category = "Filter"
self.payload = lambda self, image, context: median_filter(image, int(self.width))
class Bilateral_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["radius"] = bpy.props.FloatProperty(
name="Radius", min=0.01, max=100.0, default=10.0
)
self.props["preserve"] = bpy.props.FloatProperty(
name="Preserve", min=0.01, max=100.0, default=20.0
)
self.prefix = "bilateral"
self.info = "Bilateral"
self.category = "Filter"
self.payload = lambda self, image, context: bilateral_cl(image, self.radius, self.preserve)
class DirectionalBilateral_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["radius"] = bpy.props.FloatProperty(
name="Radius", min=0.01, max=100.0, default=10.0
)
self.props["preserve"] = bpy.props.FloatProperty(
name="Preserve", min=0.01, max=100.0, default=20.0
)
self.prefix = "directional_blur"
self.info = "Directional bilateral"
self.category = "Advanced"
self.payload = lambda self, image, context: directional_blur_cl(
image, self.radius, self.preserve
)
class HiPass_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=1, default=20)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "high_pass"
self.info = "High pass"
self.category = "Filter"
self.payload = lambda self, image, context: hi_pass(image, self.width)
class HiPassBalance_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["width"] = bpy.props.IntProperty(name="Width", min=1, default=50)
self.props["zoom"] = bpy.props.IntProperty(name="Center slice", min=5, default=200)
self.props["hue"] = bpy.props.BoolProperty(name="Preserve hue", default=True)
self.props["sat"] = bpy.props.BoolProperty(name="Preserve chroma", default=False)
# self.props["A"] = bpy.props.FloatProperty(name="C1", default=1.0, min=0.0, max=1.0)
# self.props["B"] = bpy.props.FloatProperty(name="C2", default=1.0, min=0.0, max=1.0)
# self.props["C"] = bpy.props.FloatProperty(name="C3", default=1.0, min=0.0, max=1.0)
self.prefix = "hipass_balance"
self.info = "Remove low frequencies from the image"
self.category = "Balance"
self.payload = lambda self, image, context: hi_pass_balance(
# image, self.width, self.zoom, [self.A, self.B, self.C], into_lch=self.lch
image,
self.width,
self.zoom,
[1.0, 1.0 - 1.0 * self.sat, 1.0 - 1.0 * self.hue],
into_lch=True,
)
class ContrastBalance_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "contrast_balance"
self.info = "Balance contrast"
self.category = "Balance"
self.props["gA"] = bpy.props.IntProperty(name="Range", min=1, max=256, default=20)
self.props["gB"] = bpy.props.IntProperty(name="Error", min=1, max=256, default=40)
self.props["strength"] = bpy.props.FloatProperty(name="Strength", min=0.0, default=1.0)
def _pl(self, image, context):
tmp = image.copy()
# squared error
gcr = gaussian_repeat(tmp, self.gA)
error = (tmp - gcr) ** 2
mask = -gaussian_repeat(error, self.gB)
mask -= np.min(mask)
mask /= np.max(mask)
mask = (mask - 0.5) * self.strength + 1.0
res = gcr + mask * (tmp - gcr)
res[..., 3] = tmp[..., 3]
return res
self.payload = _pl
class HistogramEQ_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["intensity"] = bpy.props.FloatProperty(
name="Intensity", min=0.0, max=1.0, default=1.0
)
self.prefix = "histogram_eq"
self.info = "Histogram equalization"
self.category = "Advanced"
self.payload = lambda self, image, context: hgram_equalize(image, self.intensity, 0.5)
class Gaussianize_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["count"] = bpy.props.IntProperty(name="Count", min=10, max=100000, default=1000)
self.prefix = "gaussianize"
self.info = "Gaussianize histogram"
self.category = "Advanced"
self.payload = lambda self, image, context: gaussianize(image, NG=self.count)[0]
class GimpSeamless_IOP(image_ops.ImageOperatorGenerator):
"""Image seamless generator operator"""
# TODO: the smoothing is not complete, it goes only one way
def generate(self):
self.prefix = "gimp_seamless"
self.info = "Gimp style seamless image operation"
self.category = "Seamless"
self.payload = lambda self, image, context: gimpify(image)
class KnifeSeamless_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "knife_seamless"
self.info = "Optimal knife cut into seamless"
self.category = "Seamless"
self.props["step"] = bpy.props.IntProperty(name="Step", min=1, max=16, default=3)
self.props["margin"] = bpy.props.IntProperty(name="Margin", min=4, max=256, default=40)
self.props["smooth"] = bpy.props.IntProperty(
name="Cut smoothing", min=0, max=64, default=16
)
self.props["constrain"] = bpy.props.FloatProperty(
name="Middle constraint", min=0.0, max=15.0, default=2.0
)
# self.props["square"] = bpy.props.BoolProperty(name="To square", default=False)
# def diffblocks(a, b, constrain_middle):
# l = len(a)
# if constrain_middle >= 0.0 and constrain_middle <= 15.0:
# penalty = np.abs(((np.arange(l) - (l - 1) * 0.5) * 2.0 / (l - 1))) ** (
# constrain_middle + 1.0
# )
# else:
# penalty = 0.0
# # assert np.all(penalty) >= 0.0
# # assert np.all(penalty) <= 1.0
# # TODO: adding power might be better
# # return rgb_to_luminance(np.abs(a - b)) ** 2.0 + penalty
# return rgb_to_luminance(np.abs(a - b)) + penalty
# def findmin(ar, loc, step):
# minloc = loc
# lar = len(ar)
# for x in range(-step, step + 1):
# if loc + x >= 0 and loc + x < lar and ar[loc + x] < ar[minloc]:
# minloc = loc + x
# return minloc
# def copy_to_v(image, img_orig, sr, rv, y):
# w = image.shape[1]
# hw = w // 2
# image[y, hw - sr : hw - sr + rv, :] = img_orig[y, w - 2 * sr : w - 2 * sr + rv, :]
# r2 = sr * 2 - rv
# image[y, hw + sr - r2 : hw + sr, :] = img_orig[y, sr * 2 - r2 : sr * 2, :]
# def copy_to_h(image, img_orig, sr, rv, y):
# w = image.shape[0]
# hw = w // 2
# image[hw - sr : hw - sr + rv, y, :] = img_orig[w - 2 * sr : w - 2 * sr + rv, y, :]
# r2 = sr * 2 - rv
# image[hw + sr - r2 : hw + sr, y, :] = img_orig[sr * 2 - r2 : sr * 2, y, :]
def _pl(self, image, context):
h, w = image.shape[0], image.shape[1]
print(w, h)
v_margin = self.margin
h_margin = self.margin
step = self.step
m_constraint = 16.0 - self.constrain
# if self.square:
max_space = min(h, w)
h_margin += w - max_space
v_margin += h - max_space
print(h_margin, v_margin)
from .oklab import srgb_to_LCh, LCh_to_srgb
kr = knife_seamless(
srgb_to_LCh(image),
v_margin,
h_margin,
step,
m_constraint,
self.smooth,
[1.0, 0.0, 0.0],
)
return LCh_to_srgb(kr)
# h, w = image.shape[0], image.shape[1]
# # new_width = w
# # new_height = h
# # -- vertical cut
# if self.smooth > 0:
# smoothed = gaussian_repeat(image, self.smooth)
# else:
# smoothed = image.copy()
# img_orig = image.copy()
# hw = w // 2
# # right on left
# image[:, : hw + h_margin, :] = img_orig[:, hw - h_margin :, :]
# # left on right
# image[:, hw - h_margin :, :] = img_orig[:, : hw + h_margin, :]
# abr = diffblocks(
# smoothed[0, -(2 * h_margin) :, :], smoothed[0, : h_margin * 2, :], m_constraint
# )
# rv = np.argmin(abr)
# for y in range(h):
# abr = diffblocks(
# smoothed[y, -(2 * h_margin) :, :], smoothed[y, : h_margin * 2, :],
# m_constraint
# )
# rv = findmin(abr, rv, step)
# copy_to_v(image, img_orig, h_margin, rv, y)
# # -- horizontal cut
# if self.smooth > 0:
# smoothed = gaussian_repeat(image, self.smooth)
# else:
# smoothed = image.copy()
# img_orig = image.copy()
# hw = h // 2
# image[: hw + v_margin, ...] = img_orig[hw - v_margin :, ...]
# image[hw - v_margin :, ...] = img_orig[: hw + v_margin, ...]
# abr = diffblocks(
# smoothed[-(2 * v_margin) :, 0, :], smoothed[: v_margin * 2, 0, :], m_constraint
# )
# rv = np.argmin(abr)
# for x in range(w):
# abr = diffblocks(
# smoothed[-(2 * v_margin) :, x, :], smoothed[: v_margin * 2, x, :],
# m_constraint
# )
# rv = findmin(abr, rv, step)
# copy_to_h(image, img_orig, v_margin, rv, x)
# return image[v_margin:-v_margin, h_margin:-h_margin]
self.payload = _pl
class HistogramSeamless_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "histogram_seamless"
self.info = "Seamless histogram blending"
self.category = "Seamless"
def _pl(self, image, context):
gimg, transforms = gaussianize(image)
blended = gimpify(gimg)
return degaussianize(blended, transforms)
self.payload = _pl
class Normals_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "height_to_normals"
self.info = "(Very rough estimate) normal map from RGB"
self.category = "Normals"
self.payload = lambda self, image, context: normals_simple(
# image, self.width, self.intensity, "Luminance"
image,
"Luminance",
)
class NormalsToCurvature_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["width"] = bpy.props.IntProperty(name="Width", min=0, default=2)
# self.props["intensity"] = bpy.props.FloatProperty(name="Intensity", min=0.0, default=1.0)
self.prefix = "normals_to_curvature"
self.info = "Curvature map from tangent normal map"
self.category = "Normals"
self.payload = lambda self, image, context: normals_to_curvature(image)
class CurveToHeight_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.props["step"] = bpy.props.FloatProperty(name="Step", min=0.00001, default=0.1)
self.props["iterations"] = bpy.props.IntProperty(name="Iterations", min=10, default=400)
self.prefix = "curvature_to_height"
self.info = "Height from curvature"
self.category = "Normals"
self.payload = lambda self, image, context: curvature_to_height(
image, self.step, iterations=self.iterations
)
class NormalsToHeight_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["grid"] = bpy.props.IntProperty(name="Grid subd", min=1, default=4)
self.props["iterations"] = bpy.props.IntProperty(name="Iterations", min=10, default=200)
self.prefix = "normals_to_height"
self.info = "Normals to height"
self.category = "Normals"
self.payload = lambda self, image, context: normals_to_height(
image, iterations=self.iterations
)
class InpaintTangents_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
# self.props["flip"] = bpy.props.BoolProperty(name="Flip direction", default=False)
# self.props["iterations"] = bpy.props.IntProperty(name="Iterations", min=10, default=200)
self.props["threshold"] = bpy.props.FloatProperty(
name="Threshold", min=0.1, max=0.9, default=0.5
)
self.prefix = "inpaint_invalid"
self.info = "Inpaint invalid tangents"
self.category = "Normals"
self.payload = lambda self, image, context: inpaint_tangents(image, self.threshold)
class NormalizeTangents_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "normalize_tangents"
self.info = "Make all tangents length 1"
self.category = "Normals"
self.payload = lambda self, image, context: normalize_tangents(image)
class ImageToMaterial_IOP(image_ops.ImageOperatorGenerator):
def generate(self):
self.prefix = "image_to_material"
self.info = "Image to material"
self.category = "Materials"
self.props["mat_name"] = bpy.props.StringProperty(
name="Name", description="Material name", default="Test"
)
def _pl(self, image, context):
from . import json_material
# json_out = json_material.read_material_nodes_to_json(bpy.data.materials[0])
# print(json_out)
# print(bpy.utils.resource_path('LOCAL'))
# print(os.getcwd())
# print(bpy.utils.user_resource('SCRIPTS', "addons"))
# print(directory)
# with open('mat.json', 'w') as out_file:
# json.dump(json_out, out_file)
import os
with open(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "default_material.json"),
"r",
) as in_file:
json_in = json.load(in_file)
mat = bpy.data.materials.get(self.mat_name) or bpy.data.materials.new(self.mat_name)
json_material.overwrite_material_from_json(mat, json_in)
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(d_nodes)
base_image = image_ops.get_area_image(context)
base_data = image_ops.image_to_ndarray(base_image)
# ----- Crop to square
print("Crop image to square")
base_data = crop_to_square(base_data)
h, w = base_data.shape[:2]
print(f"({w}, {h}) after crop")
min_dim = min(h, w)
# ----- Make seamless image
print("Make seamless diffuse")
# TODO: check this is optimal
# data_d = hi_pass_balance(base_data, min_dim, min_dim // 2)
data_d = hi_pass_balance(base_data, min_dim, min_dim, [1.0, 0.0, 0.0])
knife_result = knife_seamless(
data_d, h // 3 // 2, w // 3 // 2, 4, 12.0, 8, [1.0, 1.0, 1.0]
)
# Save new width and height after seamless knife cut
h, w = knife_result.shape[:2]
print(f"({w}, {h}) after seamless cut")
img_d = image_ops.image_create_overwrite(base_image.name + "_d", w, h, "sRGB")
image_ops.ndarray_to_image(img_d, knife_result)
mat.node_tree.nodes["Diffuse Texture"].image = img_d
# ----- Create normal map image
print("Make normal map")
img_n = image_ops.image_create_overwrite(base_image.name + "_n", w, h, "Non-Color")
# image_ops.ndarray_to_image(img_n, texture_to_normals(knife_result, 0.1, 0.2, 0.7))
image_ops.ndarray_to_image(img_n, texture_to_normals(knife_result, 0.05, 0.3, 0.6))
mat.node_tree.nodes["Normal Texture"].image = img_n
# ----- Create height map
print("Make height map for roughness")
img_h = image_ops.image_create_overwrite(base_image.name + "_h", w, h, "Non-Color")
image_ops.ndarray_to_image(
img_h, curvature_to_height(knife_result, 0.5, iterations=500)
)
mat.node_tree.nodes["Roughness Texture"].image = img_h
mat.node_tree.nodes["Invert"].inputs["Fac"].default_value = 1.0
mat.node_tree.nodes["Gamma"].inputs["Gamma"].default_value = 0.5
mat.node_tree.nodes["Normal Map"].inputs["Strength"].default_value = 4.0
return image
self.payload = _pl
# class StoreMaterialTemplate_IOP(image_ops.ImageOperatorGenerator):
# def generate(self):
# self.prefix = "store_material_template"
# self.info = "Store material template"
# self.category = "Materials"
# self.props["mat_name"] = bpy.props.StringProperty(
# name="Name", description="Material name", default="Test"
# )
# def _pl(self, image, context):
# from . import json_material
# with open("default_material.json", "w") as out_file:
# json.dump(
# json_material.read_material_nodes_to_json(bpy.data.materials[self.mat_name]),
# out_file,
# )
# return image
# self.payload = _pl
# additional_classes = [BTT_InstallLibraries, BTT_AddonPreferences]
additional_classes = []
register, unregister = image_ops.create(locals(), additional_classes)
|
gpl-2.0
|
MacGyverNL/alot
|
setup.py
|
1
|
1650
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
import alot
setup(
name='alot',
version=alot.__version__,
description=alot.__description__,
author=alot.__author__,
author_email=alot.__author_email__,
url=alot.__url__,
license=alot.__copyright__,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console :: Curses',
'Framework :: AsyncIO',
'Intended Audience :: End Users/Desktop',
(
'License :: OSI Approved'
':: GNU General Public License v3 or later (GPLv3+)'),
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Topic :: Database :: Front-Ends',
],
packages=find_packages(exclude=['tests*']),
package_data={
'alot': [
'defaults/alot.rc.spec',
'defaults/notmuch.rc.spec',
'defaults/abook_contacts.spec',
'defaults/default.theme',
'defaults/default.bindings',
'defaults/config.stub',
'defaults/theme.spec',
]
},
entry_points={
'console_scripts':
['alot = alot.__main__:main'],
},
install_requires=[
'notmuch>=0.27',
'urwid>=1.3.0',
'urwidtrees>=1.0',
'twisted>=10.2.0',
'python-magic',
'configobj>=4.7.0',
'gpg'
],
provides=['alot'],
test_suite="tests",
python_requires=">=3.6",
)
|
gpl-3.0
|
plowman/python-mcparseface
|
models/syntaxnet/tensorflow/tensorflow/contrib/ffmpeg/encode_audio_op_test.py
|
6
|
1742
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.encode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from tensorflow.contrib import ffmpeg
from tensorflow.python.platform import resource_loader
class EncodeAudioOpTest(tf.test.TestCase):
def testRoundTrip(self):
"""Fabricates some audio, creates a wav file, reverses it, and compares."""
with self.test_session():
path = os.path.join(
resource_loader.get_data_files_path(), 'testdata/mono_10khz.wav')
with open(path, 'rb') as f:
original_contents = f.read()
audio_op = ffmpeg.decode_audio(
original_contents, file_format='wav', samples_per_second=10000,
channel_count=1)
encode_op = ffmpeg.encode_audio(
audio_op, file_format='wav', samples_per_second=10000)
encoded_contents = encode_op.eval()
self.assertEqual(original_contents, encoded_contents)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
jalanb/dotjab
|
src/python/y.py
|
2
|
1435
|
"""y not?"""
import os
import argv
argv.add_options([
('delete', 'delete python compiled files as well', False),
('wipe', 'remove known garbage', False),
('stat', 'run svn stat', False),
('tags', 'refresh the tags file', True),
('verbose', 'run ptags verbosely', False),
])
from ls import ly
from repositories import svn
def remove_globs(globs):
for glob in globs:
#print glob, [f for f in argv.first_directory.files(glob)]
for p in argv.first_directory.listdir(glob):
if p.islink():
p.unlink()
elif p.isfile():
p.remove()
elif p.isdir():
p.rmdir()
else:
raise ValueError('Do not know how to remove %s' % p)
def wipe():
remove_globs([
'*~', '.*~', '*.orig', 'fred*', 'mary',
'*.tmp', '*.bak', 'one', 'two'])
_ = [f.rm() for f in argv.first_directory.files('*.fail') if not f.size]
def delete():
remove_globs(['*.pyc', '*.pyo'])
def tags():
import ptags
ptags.read_write_dir(argv.first_directory)
def main():
if argv.first_directory != os.getcwd():
print 'cd', argv.first_directory
argv.first_directory.cd()
for method in argv.methods:
method()
ly.show()
if argv.options.stat:
svn.show_stat(argv.first_directory)
if __name__ == '__main__':
ly.prepare_argv()
argv.main(main)
|
mit
|
ArnossArnossi/django
|
tests/template_tests/test_context.py
|
166
|
5389
|
# -*- coding: utf-8 -*-
from django.http import HttpRequest
from django.template import (
Context, Engine, RequestContext, Template, Variable, VariableDoesNotExist,
)
from django.template.context import RenderContext
from django.test import RequestFactory, SimpleTestCase
class ContextTests(SimpleTestCase):
def test_context(self):
c = Context({"a": 1, "b": "xyzzy"})
self.assertEqual(c["a"], 1)
self.assertEqual(c.push(), {})
c["a"] = 2
self.assertEqual(c["a"], 2)
self.assertEqual(c.get("a"), 2)
self.assertEqual(c.pop(), {"a": 2})
self.assertEqual(c["a"], 1)
self.assertEqual(c.get("foo", 42), 42)
def test_push_context_manager(self):
c = Context({"a": 1})
with c.push():
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.push(a=3):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_update_context_manager(self):
c = Context({"a": 1})
with c.update({}):
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.update({'a': 3}):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_setdefault(self):
c = Context()
x = c.setdefault('x', 42)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
x = c.setdefault('x', 100)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
def test_resolve_on_context_method(self):
"""
#17778 -- Variable shouldn't resolve RequestContext methods
"""
empty_context = Context()
with self.assertRaises(VariableDoesNotExist):
Variable('no_such_variable').resolve(empty_context)
with self.assertRaises(VariableDoesNotExist):
Variable('new').resolve(empty_context)
self.assertEqual(
Variable('new').resolve(Context({'new': 'foo'})),
'foo',
)
def test_render_context(self):
test_context = RenderContext({'fruit': 'papaya'})
# Test that push() limits access to the topmost dict
test_context.push()
test_context['vegetable'] = 'artichoke'
self.assertEqual(list(test_context), ['vegetable'])
self.assertNotIn('fruit', test_context)
with self.assertRaises(KeyError):
test_context['fruit']
self.assertIsNone(test_context.get('fruit'))
def test_flatten_context(self):
a = Context()
a.update({'a': 2})
a.update({'b': 4})
a.update({'c': 8})
self.assertEqual(a.flatten(), {
'False': False, 'None': None, 'True': True,
'a': 2, 'b': 4, 'c': 8
})
def test_context_comparable(self):
"""
#21765 -- equality comparison should work
"""
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
self.assertEqual(Context(test_data), Context(test_data))
a = Context()
b = Context()
self.assertEqual(a, b)
# update only a
a.update({'a': 1})
self.assertNotEqual(a, b)
# update both to check regression
a.update({'c': 3})
b.update({'c': 3})
self.assertNotEqual(a, b)
# make contexts equals again
b.update({'a': 1})
self.assertEqual(a, b)
def test_copy_request_context_twice(self):
"""
#24273 -- Copy twice shouldn't raise an exception
"""
RequestContext(HttpRequest()).new().new()
class RequestContextTests(SimpleTestCase):
def test_include_only(self):
"""
#15721 -- ``{% include %}`` and ``RequestContext`` should work
together.
"""
engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ var|default:"none" }}',
}),
])
request = RequestFactory().get('/')
ctx = RequestContext(request, {'var': 'parent'})
self.assertEqual(engine.from_string('{% include "child" %}').render(ctx), 'parent')
self.assertEqual(engine.from_string('{% include "child" only %}').render(ctx), 'none')
def test_stack_size(self):
"""
#7116 -- Optimize RequetsContext construction
"""
request = RequestFactory().get('/')
ctx = RequestContext(request, {})
# The stack should now contain 3 items:
# [builtins, supplied context, context processor, empty dict]
self.assertEqual(len(ctx.dicts), 4)
def test_context_comparable(self):
# Create an engine without any context processors.
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
# test comparing RequestContext to prevent problems if somebody
# adds __eq__ in the future
request = RequestFactory().get('/')
self.assertEqual(
RequestContext(request, dict_=test_data),
RequestContext(request, dict_=test_data),
)
def test_modify_context_and_render(self):
template = Template('{{ foo }}')
request = RequestFactory().get('/')
context = RequestContext(request, {})
context['foo'] = 'foo'
self.assertEqual(template.render(context), 'foo')
|
bsd-3-clause
|
waseem18/oh-mainline
|
mysite/project/tasks/__init__.py
|
15
|
1736
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.mail import send_mail
def send_email_to_all_because_project_icon_was_marked_as_wrong(project__pk, project__name, project_icon_url):
# links you to the project page
# links you to the secret, wrong project icon
# TODO:figure out if we should be worried about project icons getting deleted
# i think that we dont. provide a justification here.
project_page_url = 'https://openhatch.org/projects/' + project__name
# FIXME: this url
hidden_project_icon_url = 'https://openhatch.org/static/images/icons/projects/'
subject = '[OH]- ' + project__name + ' icon was marked as incorrect'
body = ''
body += 'project name: ' + project__name + '\n'
body += 'project url: ' + project_page_url + '\n'
body += 'project icon url (currently not displayed): ' + \
project_icon_url + '\n'
body += 'thanks'
return send_mail(subject, body, '[email protected]', ['[email protected]'], fail_silently=False)
|
agpl-3.0
|
kei-yamazaki/jenkins-job-builder
|
jenkins_jobs/modules/wrappers.py
|
1
|
54570
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrappers can alter the way the build is run as well as the build output.
**Component**: wrappers
:Macro: wrapper
:Entry Point: jenkins_jobs.wrappers
"""
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.modules.builders import create_builders
from jenkins_jobs.modules.helpers import config_file_provider_builder
def ci_skip(parser, xml_parent, data):
"""yaml: ci-skip
Skip making a build for certain push.
Just add [ci skip] into your commit's message to let Jenkins know,
that you do not want to perform build for the next push.
Requires the Jenkins :jenkins-wiki:`Ci Skip Plugin <Ci+Skip+Plugin>`.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/ci-skip001.yaml
"""
rpobj = XML.SubElement(xml_parent, 'ruby-proxy-object')
robj = XML.SubElement(rpobj, 'ruby-object', attrib={
'pluginid': 'ci-skip',
'ruby-class': 'Jenkins::Tasks::BuildWrapperProxy'
})
pluginid = XML.SubElement(robj, 'pluginid', {
'pluginid': 'ci-skip', 'ruby-class': 'String'
})
pluginid.text = 'ci-skip'
obj = XML.SubElement(robj, 'object', {
'ruby-class': 'CiSkipWrapper', 'pluginid': 'ci-skip'
})
XML.SubElement(obj, 'ci__skip', {
'pluginid': 'ci-skip', 'ruby-class': 'NilClass'
})
def config_file_provider(parser, xml_parent, data):
"""yaml: config-file-provider
Provide configuration files (i.e., settings.xml for maven etc.)
which will be copied to the job's workspace.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`.
:arg list files: List of managed config files made up of three
parameters
:files: * **file-id** (`str`) -- The identifier for the managed config
file
* **target** (`str`) -- Define where the file should be created
(optional)
* **variable** (`str`) -- Define an environment variable to be
used (optional)
Example:
.. literalinclude:: \
/../../tests/wrappers/fixtures/config-file-provider003.yaml
:language: yaml
"""
cfp = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.configfiles.'
'buildwrapper.ConfigFileBuildWrapper')
cfp.set('plugin', 'config-file-provider')
config_file_provider_builder(cfp, data)
def logfilesize(parser, xml_parent, data):
"""yaml: logfilesize
Abort the build if its logfile becomes too big.
Requires the Jenkins :jenkins-wiki:`Logfilesizechecker Plugin
<Logfilesizechecker+Plugin>`.
:arg bool set-own: Use job specific maximum log size instead of global
config value (default false).
:arg bool fail: Make builds aborted by this wrapper be marked as "failed"
(default false).
:arg int size: Abort the build if logfile size is bigger than this
value (in MiB, default 128). Only applies if set-own is true.
Minimum config example:
.. literalinclude:: /../../tests/wrappers/fixtures/logfilesize002.yaml
Full config example:
.. literalinclude:: /../../tests/wrappers/fixtures/logfilesize001.yaml
"""
lfswrapper = XML.SubElement(xml_parent,
'hudson.plugins.logfilesizechecker.'
'LogfilesizecheckerWrapper')
lfswrapper.set("plugin", "logfilesizechecker")
XML.SubElement(lfswrapper, 'setOwn').text = str(
data.get('set-own', 'false')).lower()
XML.SubElement(lfswrapper, 'maxLogSize').text = str(
data.get('size', '128')).lower()
XML.SubElement(lfswrapper, 'failBuild').text = str(
data.get('fail', 'false')).lower()
def timeout(parser, xml_parent, data):
"""yaml: timeout
Abort the build if it runs too long.
Requires the Jenkins :jenkins-wiki:`Build Timeout Plugin
<Build-timeout+Plugin>`.
:arg bool fail: Mark the build as failed (default false)
:arg bool write-description: Write a message in the description
(default false)
:arg int timeout: Abort the build after this number of minutes (default 3)
:arg str timeout-var: Export an environment variable to reference the
timeout value (optional)
:arg str type: Timeout type to use (default absolute)
:arg int elastic-percentage: Percentage of the three most recent builds
where to declare a timeout (default 0)
:arg int elastic-default-timeout: Timeout to use if there were no previous
builds (default 3)
:type values:
* **likely-stuck**
* **elastic**
* **absolute**
* **no-activity**
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/timeout001.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout002.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout003.yaml
.. literalinclude:: /../../tests/wrappers/fixtures/timeout004.yaml
"""
twrapper = XML.SubElement(xml_parent,
'hudson.plugins.build__timeout.'
'BuildTimeoutWrapper')
XML.SubElement(twrapper, 'timeoutMinutes').text = str(
data.get('timeout', 3))
timeout_env_var = data.get('timeout-var')
if timeout_env_var:
XML.SubElement(twrapper, 'timeoutEnvVar').text = str(timeout_env_var)
XML.SubElement(twrapper, 'failBuild').text = str(
data.get('fail', 'false')).lower()
XML.SubElement(twrapper, 'writingDescription').text = str(
data.get('write-description', 'false')).lower()
XML.SubElement(twrapper, 'timeoutPercentage').text = str(
data.get('elastic-percentage', 0))
XML.SubElement(twrapper, 'timeoutMinutesElasticDefault').text = str(
data.get('elastic-default-timeout', 3))
tout_type = str(data.get('type', 'absolute')).lower()
if tout_type == 'likely-stuck':
tout_type = 'likelyStuck'
elif tout_type == 'no-activity':
noactivity = XML.SubElement(twrapper,
'strategy',
{'class': 'hudson.plugins.build_timeout.impl.NoActivityTimeOutStrategy'})
XML.SubElement(noactivity,
'timeout').text = str(
data.get('no-activity-timeout', 180000))
XML.SubElement(twrapper, 'timeoutType').text = tout_type
def timestamps(parser, xml_parent, data):
"""yaml: timestamps
Add timestamps to the console log.
Requires the Jenkins :jenkins-wiki:`Timestamper Plugin <Timestamper>`.
Example::
wrappers:
- timestamps
"""
XML.SubElement(xml_parent,
'hudson.plugins.timestamper.TimestamperBuildWrapper')
def ansicolor(parser, xml_parent, data):
"""yaml: ansicolor
Translate ANSI color codes to HTML in the console log.
Requires the Jenkins :jenkins-wiki:`Ansi Color Plugin <AnsiColor+Plugin>`.
:arg string colormap: (optional) color mapping to use
Examples::
wrappers:
- ansicolor
# Explicitly setting the colormap
wrappers:
- ansicolor:
colormap: vga
"""
cwrapper = XML.SubElement(
xml_parent,
'hudson.plugins.ansicolor.AnsiColorBuildWrapper')
# Optional colormap
colormap = data.get('colormap')
if colormap:
XML.SubElement(cwrapper, 'colorMapName').text = colormap
def mask_passwords(parser, xml_parent, data):
"""yaml: mask-passwords
Hide passwords in the console log.
Requires the Jenkins :jenkins-wiki:`Mask Passwords Plugin
<Mask+Passwords+Plugin>`.
Example::
wrappers:
- mask-passwords
"""
XML.SubElement(xml_parent,
'com.michelin.cio.hudson.plugins.maskpasswords.'
'MaskPasswordsBuildWrapper')
def workspace_cleanup(parser, xml_parent, data):
"""yaml: workspace-cleanup (pre-build)
Requires the Jenkins :jenkins-wiki:`Workspace Cleanup Plugin
<Workspace+Cleanup+Plugin>`.
The post-build workspace-cleanup is available as a publisher.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too
Example::
wrappers:
- workspace-cleanup:
include:
- "*.zip"
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.PreBuildCleanup')
p.set("plugin", "[email protected]")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
deldirs = XML.SubElement(p, 'deleteDirs')
deldirs.text = str(data.get("dirmatch", False)).lower()
def m2_repository_cleanup(parser, xml_parent, data):
"""yaml: m2-repository-cleanup
Configure M2 Repository Cleanup
Requires the Jenkins :jenkins-wiki:`M2 Repository Cleanup
<M2+Repository+Cleanup+Plugin>`.
:arg list patterns: List of patterns for artifacts to cleanup before
building. (optional)
This plugin allows you to configure a maven2 job to clean some or all of
the artifacts from the repository before it runs.
Example:
.. literalinclude:: \
../../tests/wrappers/fixtures/m2-repository-cleanup001.yaml
"""
m2repo = XML.SubElement(
xml_parent,
'hudson.plugins.m2__repo__reaper.M2RepoReaperWrapper')
m2repo.set("plugin", "m2-repo-reaper")
patterns = data.get("patterns", [])
XML.SubElement(m2repo, 'artifactPatterns').text = ",".join(patterns)
p = XML.SubElement(m2repo, 'patterns')
for pattern in patterns:
XML.SubElement(p, 'string').text = pattern
def rvm_env(parser, xml_parent, data):
"""yaml: rvm-env
Set the RVM implementation
Requires the Jenkins :jenkins-wiki:`Rvm Plugin <RVM+Plugin>`.
:arg str implementation: Type of implementation. Syntax is RUBY[@GEMSET],
such as '1.9.3' or 'jruby@foo'.
Example::
wrappers:
- rvm-env:
implementation: 1.9.3
"""
rpo = XML.SubElement(xml_parent,
'ruby-proxy-object')
ro_class = "Jenkins::Plugin::Proxies::BuildWrapper"
ro = XML.SubElement(rpo,
'ruby-object',
{'ruby-class': ro_class,
'pluginid': 'rvm'})
o = XML.SubElement(ro,
'object',
{'ruby-class': 'RvmWrapper',
'pluginid': 'rvm'})
XML.SubElement(o,
'impl',
{'pluginid': 'rvm',
'ruby-class': 'String'}).text = data['implementation']
XML.SubElement(ro,
'pluginid',
{'pluginid': 'rvm',
'ruby-class': 'String'}).text = "rvm"
def rbenv(parser, xml_parent, data):
"""yaml: rbenv
Set the rbenv implementation.
Requires the Jenkins :jenkins-wiki:`rbenv plugin <rbenv+plugin>`.
All parameters are optional.
:arg str ruby-version: Version of Ruby to use (default: 1.9.3-p484)
:arg bool ignore-local-version: If true, ignore local Ruby
version (defined in the ".ruby-version" file in workspace) even if it
has been defined (default: false)
:arg str preinstall-gem-list: List of gems to install
(default: 'bundler,rake')
:arg str rbenv-root: RBENV_ROOT (default: $HOME/.rbenv)
:arg str rbenv-repo: Which repo to clone rbenv from
(default: https://github.com/sstephenson/rbenv.git)
:arg str rbenv-branch: Which branch to clone rbenv from (default: master)
:arg str ruby-build-repo: Which repo to clone ruby-build from
(default: https://github.com/sstephenson/ruby-build.git)
:arg str ruby-build-branch: Which branch to clone ruby-build from
(default: master)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/rbenv003.yaml
"""
mapping = [
# option, xml name, default value (text), attributes (hard coded)
("preinstall-gem-list", 'gem__list', 'bundler,rake'),
("rbenv-root", 'rbenv__root', '$HOME/.rbenv'),
("rbenv-repo", 'rbenv__repository',
'https://github.com/sstephenson/rbenv.git'),
("rbenv-branch", 'rbenv__revision', 'master'),
("ruby-build-repo", 'ruby__build__repository',
'https://github.com/sstephenson/ruby-build.git'),
("ruby-build-branch", 'ruby__build__revision', 'master'),
("ruby-version", 'version', '1.9.3-p484'),
]
rpo = XML.SubElement(xml_parent,
'ruby-proxy-object')
ro_class = "Jenkins::Tasks::BuildWrapperProxy"
ro = XML.SubElement(rpo,
'ruby-object',
{'ruby-class': ro_class,
'pluginid': 'rbenv'})
XML.SubElement(ro,
'pluginid',
{'pluginid': "rbenv",
'ruby-class': "String"}).text = "rbenv"
o = XML.SubElement(ro,
'object',
{'ruby-class': 'RbenvWrapper',
'pluginid': 'rbenv'})
for elem in mapping:
(optname, xmlname, val) = elem[:3]
xe = XML.SubElement(o,
xmlname,
{'ruby-class': "String",
'pluginid': "rbenv"})
if optname and optname in data:
val = data[optname]
if type(val) == bool:
xe.text = str(val).lower()
else:
xe.text = val
ignore_local_class = 'FalseClass'
if 'ignore-local-version' in data:
ignore_local_string = str(data['ignore-local-version']).lower()
if ignore_local_string == 'true':
ignore_local_class = 'TrueClass'
XML.SubElement(o,
'ignore__local__version',
{'ruby-class': ignore_local_class,
'pluginid': 'rbenv'})
def build_name(parser, xml_parent, data):
"""yaml: build-name
Set the name of the build
Requires the Jenkins :jenkins-wiki:`Build Name Setter Plugin
<Build+Name+Setter+Plugin>`.
:arg str name: Name for the build. Typically you would use a variable
from Jenkins in the name. The syntax would be ${FOO} for
the FOO variable.
Example::
wrappers:
- build-name:
name: Build-${FOO}
"""
bsetter = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.buildnamesetter.'
'BuildNameSetter')
XML.SubElement(bsetter, 'template').text = data['name']
def port_allocator(parser, xml_parent, data):
"""yaml: port-allocator
Assign unique TCP port numbers
Requires the Jenkins :jenkins-wiki:`Port Allocator Plugin
<Port+Allocator+Plugin>`.
:arg str name: Deprecated, use names instead
:arg list names: Variable list of names of the port or list of
specific port numbers
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/port-allocator002.yaml
"""
pa = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.port__allocator.'
'PortAllocator')
ports = XML.SubElement(pa, 'ports')
names = data.get('names')
if not names:
logger = logging.getLogger(__name__)
logger.warn('port_allocator name is deprecated, use a names list '
' instead')
names = [data['name']]
for name in names:
dpt = XML.SubElement(ports,
'org.jvnet.hudson.plugins.port__allocator.'
'DefaultPortType')
XML.SubElement(dpt, 'name').text = name
def locks(parser, xml_parent, data):
"""yaml: locks
Control parallel execution of jobs.
Requires the Jenkins :jenkins-wiki:`Locks and Latches Plugin
<Locks+and+Latches+plugin>`.
:arg: list of locks to use
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/locks002.yaml
:language: yaml
"""
locks = data
if locks:
lw = XML.SubElement(xml_parent,
'hudson.plugins.locksandlatches.LockWrapper')
locktop = XML.SubElement(lw, 'locks')
for lock in locks:
lockwrapper = XML.SubElement(locktop,
'hudson.plugins.locksandlatches.'
'LockWrapper_-LockWaitConfig')
XML.SubElement(lockwrapper, 'name').text = lock
def copy_to_slave(parser, xml_parent, data):
"""yaml: copy-to-slave
Copy files to slave before build
Requires the Jenkins :jenkins-wiki:`Copy To Slave Plugin
<Copy+To+Slave+Plugin>`.
:arg list includes: list of file patterns to copy
:arg list excludes: list of file patterns to exclude
:arg bool flatten: flatten directory structure
:arg str relative-to: base location of includes/excludes,
must be userContent ($JENKINS_HOME/userContent)
home ($JENKINS_HOME) or workspace
:arg bool include-ant-excludes: exclude ant's default excludes
Example::
wrappers:
- copy-to-slave:
includes:
- file1
- file2*.txt
excludes:
- file2bad.txt
"""
p = 'com.michelin.cio.hudson.plugins.copytoslave.CopyToSlaveBuildWrapper'
cs = XML.SubElement(xml_parent, p)
XML.SubElement(cs, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cs, 'excludes').text = ','.join(data.get('excludes', ['']))
XML.SubElement(cs, 'flatten').text = \
str(data.get('flatten', False)).lower()
XML.SubElement(cs, 'includeAntExcludes').text = \
str(data.get('include-ant-excludes', False)).lower()
rel = str(data.get('relative-to', 'userContent'))
opt = ('userContent', 'home', 'workspace')
if rel not in opt:
raise ValueError('relative-to must be one of %r' % opt)
XML.SubElement(cs, 'relativeTo').text = rel
# seems to always be false, can't find it in source code
XML.SubElement(cs, 'hudsonHomeRelative').text = 'false'
def inject(parser, xml_parent, data):
"""yaml: inject
Add or override environment variables to the whole build process
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`.
:arg str properties-file: path to the properties file (default '')
:arg str properties-content: key value pair of properties (default '')
:arg str script-file: path to the script file (default '')
:arg str script-content: contents of a script (default '')
:arg str groovy-script-content: contents of a groovy script (default '')
Example::
wrappers:
- inject:
properties-file: /usr/local/foo
properties-content: PATH=/foo/bar
script-file: /usr/local/foo.sh
script-content: echo $PATH
"""
eib = XML.SubElement(xml_parent, 'EnvInjectBuildWrapper')
info = XML.SubElement(eib, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptFilePath', data.get('script-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptContent', data.get('script-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'groovyScriptContent', data.get('groovy-script-content'))
XML.SubElement(info, 'loadFilesFromMaster').text = 'false'
def inject_ownership_variables(parser, xml_parent, data):
"""yaml: inject-ownership-variables
Inject ownership variables to the build as environment variables.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`
and Jenkins :jenkins-wiki:`Ownership plugin <Ownership+Plugin>`.
:arg bool job-variables: inject job ownership variables to the job
(default false)
:arg bool node-variables: inject node ownership variables to the job
(default false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/ownership001.yaml
"""
ownership = XML.SubElement(xml_parent, 'com.synopsys.arc.jenkins.plugins.'
'ownership.wrappers.OwnershipBuildWrapper')
XML.SubElement(ownership, 'injectNodeOwnership').text = \
str(data.get('node-variables', False)).lower()
XML.SubElement(ownership, 'injectJobOwnership').text = \
str(data.get('job-variables', False)).lower()
def inject_passwords(parser, xml_parent, data):
"""yaml: inject-passwords
Inject passwords to the build as environment variables.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin <EnvInject+Plugin>`.
:arg bool global: inject global passwords to the job
:arg bool mask-password-params: mask passsword parameters
:arg list job-passwords: key value pair of job passwords
:Parameter: * **name** (`str`) Name of password
* **password** (`str`) Encrypted password
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/passwords001.yaml
"""
eib = XML.SubElement(xml_parent, 'EnvInjectPasswordWrapper')
XML.SubElement(eib, 'injectGlobalPasswords').text = \
str(data.get('global', False)).lower()
XML.SubElement(eib, 'maskPasswordParameters').text = \
str(data.get('mask-password-params', False)).lower()
entries = XML.SubElement(eib, 'passwordEntries')
passwords = data.get('job-passwords', [])
if passwords:
for password in passwords:
entry = XML.SubElement(entries, 'EnvInjectPasswordEntry')
XML.SubElement(entry, 'name').text = password['name']
XML.SubElement(entry, 'value').text = password['password']
def env_file(parser, xml_parent, data):
"""yaml: env-file
Add or override environment variables to the whole build process
Requires the Jenkins :jenkins-wiki:`Environment File Plugin
<Envfile+Plugin>`.
:arg str properties-file: path to the properties file (default '')
Example::
wrappers:
- env-file:
properties-file: ${WORKSPACE}/foo
"""
eib = XML.SubElement(xml_parent,
'hudson.plugins.envfile.EnvFileBuildWrapper')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
eib, 'filePath', data.get('properties-file'))
def env_script(parser, xml_parent, data):
"""yaml: env-script
Add or override environment variables to the whole build process.
Requires the Jenkins :jenkins-wiki:`Environment Script Plugin
<Environment+Script+Plugin>`.
:arg script-content: The script to run (default: '')
:arg only-run-on-parent: Only applicable for Matrix Jobs. If true, run only
on the matrix parent job (default: false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/env-script001.yaml
"""
el = XML.SubElement(xml_parent, 'com.lookout.jenkins.EnvironmentScript')
XML.SubElement(el, 'script').text = data.get('script-content', '')
only_on_parent = str(data.get('only-run-on-parent', False)).lower()
XML.SubElement(el, 'onlyRunOnParent').text = only_on_parent
def jclouds(parser, xml_parent, data):
"""yaml: jclouds
Uses JClouds to provide slave launching on most of the currently
usable Cloud infrastructures.
Requires the Jenkins :jenkins-wiki:`JClouds Plugin <JClouds+Plugin>`.
:arg bool single-use: Whether or not to terminate the slave after use
(default: False).
:arg list instances: The name of the jclouds template to create an
instance from, and its parameters.
:arg str cloud-name: The name of the jclouds profile containing the
specified template.
:arg int count: How many instances to create (default: 1).
:arg bool stop-on-terminate: Whether or not to suspend instead of terminate
the instance (default: False).
Example::
wrappers:
- jclouds:
single-use: True
instances:
- jenkins-dev-slave:
cloud-name: mycloud1
count: 1
stop-on-terminate: True
- jenkins-test-slave:
cloud-name: mycloud2
count: 2
stop-on-terminate: False
"""
if 'instances' in data:
buildWrapper = XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.compute.'
'JCloudsBuildWrapper')
instances = XML.SubElement(buildWrapper, 'instancesToRun')
for foo in data['instances']:
for template, params in foo.items():
instance = XML.SubElement(instances,
'jenkins.plugins.jclouds.compute.'
'InstancesToRun')
XML.SubElement(instance, 'templateName').text = template
XML.SubElement(instance, 'cloudName').text = \
params.get('cloud-name', '')
XML.SubElement(instance, 'count').text = \
str(params.get('count', 1))
XML.SubElement(instance, 'suspendOrTerminate').text = \
str(params.get('stop-on-terminate', False)).lower()
if data.get('single-use'):
XML.SubElement(xml_parent,
'jenkins.plugins.jclouds.compute.'
'JCloudsOneOffSlave')
def build_user_vars(parser, xml_parent, data):
"""yaml: build-user-vars
Set environment variables to the value of the user that started the build.
Requires the Jenkins :jenkins-wiki:`Build User Vars Plugin
<Build+User+Vars+Plugin>`.
Example::
wrappers:
- build-user-vars
"""
XML.SubElement(xml_parent, 'org.jenkinsci.plugins.builduser.BuildUser')
def release(parser, xml_parent, data):
"""yaml: release
Add release build configuration
Requires the Jenkins :jenkins-wiki:`Release Plugin <Release+Plugin>`.
:arg bool keep-forever: Keep build forever (default true)
:arg bool override-build-parameters: Enable build-parameter override
(default false)
:arg string version-template: Release version template (default '')
:arg list parameters: Release parameters (see the :ref:`Parameters` module)
:arg list pre-build: Pre-build steps (see the :ref:`Builders` module)
:arg list post-build: Post-build steps (see :ref:`Builders`)
:arg list post-success: Post successful-build steps (see :ref:`Builders`)
:arg list post-failed: Post failed-build steps (see :ref:`Builders`)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/release001.yaml
"""
relwrap = XML.SubElement(xml_parent,
'hudson.plugins.release.ReleaseWrapper')
# For 'keep-forever', the sense of the XML flag is the opposite of
# the YAML flag.
no_keep_forever = 'false'
if str(data.get('keep-forever', True)).lower() == 'false':
no_keep_forever = 'true'
XML.SubElement(relwrap, 'doNotKeepLog').text = no_keep_forever
XML.SubElement(relwrap, 'overrideBuildParameters').text = str(
data.get('override-build-parameters', False)).lower()
XML.SubElement(relwrap, 'releaseVersionTemplate').text = data.get(
'version-template', '')
parameters = data.get('parameters', [])
if parameters:
pdef = XML.SubElement(relwrap, 'parameterDefinitions')
for param in parameters:
parser.registry.dispatch('parameter', parser, pdef, param)
builder_steps = {
'pre-build': 'preBuildSteps',
'post-build': 'postBuildSteps',
'post-success': 'postSuccessfulBuildSteps',
'post-fail': 'postFailedBuildSteps',
}
for step in builder_steps.keys():
for builder in data.get(step, []):
parser.registry.dispatch('builder', parser,
XML.SubElement(relwrap,
builder_steps[step]),
builder)
def sauce_ondemand(parser, xml_parent, data):
"""yaml: sauce-ondemand
Allows you to integrate Sauce OnDemand with Jenkins. You can
automate the setup and tear down of Sauce Connect and integrate
the Sauce OnDemand results videos per test. Requires the Jenkins
:jenkins-wiki:`Sauce OnDemand Plugin <Sauce+OnDemand+Plugin>`.
:arg bool enable-sauce-connect: launches a SSH tunnel from their cloud
to your private network (default false)
:arg str sauce-host: The name of the selenium host to be used. For
tests run using Sauce Connect, this should be localhost.
ondemand.saucelabs.com can also be used to conenct directly to
Sauce OnDemand, The value of the host will be stored in the
SAUCE_ONDEMAND_HOST environment variable. (default '')
:arg str sauce-port: The name of the Selenium Port to be used. For
tests run using Sauce Connect, this should be 4445. If using
ondemand.saucelabs.com for the Selenium Host, then use 4444.
The value of the port will be stored in the SAUCE_ONDEMAND_PORT
environment variable. (default '')
:arg str override-username: If set then api-access-key must be set.
Overrides the username from the global config. (default '')
:arg str override-api-access-key: If set then username must be set.
Overrides the api-access-key set in the global config. (default '')
:arg str starting-url: The value set here will be stored in the
SELENIUM_STARTING_ULR environment variable. Only used when type
is selenium. (default '')
:arg str type: Type of test to run (default selenium)
:type values:
* **selenium**
* **webdriver**
:arg list platforms: The platforms to run the tests on. Platforms
supported are dynamically retrieved from sauce labs. The format of
the values has only the first letter capitalized, no spaces, underscore
between os and version, underscore in internet_explorer, everything
else is run together. If there are not multiple version of the browser
then just the first version number is used.
Examples: Mac_10.8iphone5.1 or Windows_2003firefox10
or Windows_2012internet_explorer10 (default '')
:arg bool launch-sauce-connect-on-slave: Whether to launch sauce connect
on the slave. (default false)
:arg str https-protocol: The https protocol to use (default '')
:arg str sauce-connect-options: Options to pass to sauce connect
(default '')
Example::
wrappers:
- sauce-ondemand:
enable-sauce-connect: true
sauce-host: foo
sauce-port: 8080
override-username: foo
override-api-access-key: 123lkj123kh123l;k12323
type: webdriver
platforms:
- Linuxandroid4
- Linuxfirefox10
- Linuxfirefox11
launch-sauce-connect-on-slave: true
"""
sauce = XML.SubElement(xml_parent, 'hudson.plugins.sauce__ondemand.'
'SauceOnDemandBuildWrapper')
XML.SubElement(sauce, 'enableSauceConnect').text = str(data.get(
'enable-sauce-connect', False)).lower()
host = data.get('sauce-host', '')
XML.SubElement(sauce, 'seleniumHost').text = host
port = data.get('sauce-port', '')
XML.SubElement(sauce, 'seleniumPort').text = port
# Optional override global authentication
username = data.get('override-username')
key = data.get('override-api-access-key')
if username and key:
cred = XML.SubElement(sauce, 'credentials')
XML.SubElement(cred, 'username').text = username
XML.SubElement(cred, 'apiKey').text = key
atype = data.get('type', 'selenium')
info = XML.SubElement(sauce, 'seleniumInformation')
if atype == 'selenium':
url = data.get('starting-url', '')
XML.SubElement(info, 'startingURL').text = url
browsers = XML.SubElement(info, 'seleniumBrowsers')
for platform in data['platforms']:
XML.SubElement(browsers, 'string').text = platform
XML.SubElement(info, 'isWebDriver').text = 'false'
XML.SubElement(sauce, 'seleniumBrowsers',
{'reference': '../seleniumInformation/'
'seleniumBrowsers'})
if atype == 'webdriver':
browsers = XML.SubElement(info, 'webDriverBrowsers')
for platform in data['platforms']:
XML.SubElement(browsers, 'string').text = platform
XML.SubElement(info, 'isWebDriver').text = 'true'
XML.SubElement(sauce, 'webDriverBrowsers',
{'reference': '../seleniumInformation/'
'webDriverBrowsers'})
XML.SubElement(sauce, 'launchSauceConnectOnSlave').text = str(data.get(
'launch-sauce-connect-on-slave', False)).lower()
protocol = data.get('https-protocol', '')
XML.SubElement(sauce, 'httpsProtocol').text = protocol
options = data.get('sauce-connect-options', '')
XML.SubElement(sauce, 'options').text = options
def pathignore(parser, xml_parent, data):
"""yaml: pathignore
This plugin allows SCM-triggered jobs to ignore
build requests if only certain paths have changed.
Requires the Jenkins :jenkins-wiki:`Pathignore Plugin <Pathignore+Plugin>`.
:arg str ignored: A set of patterns to define ignored changes
Example::
wrappers:
- pathignore:
ignored: "docs, tests"
"""
ruby = XML.SubElement(xml_parent, 'ruby-proxy-object')
robj = XML.SubElement(ruby, 'ruby-object', attrib={
'pluginid': 'pathignore',
'ruby-class': 'Jenkins::Plugin::Proxies::BuildWrapper'
})
pluginid = XML.SubElement(robj, 'pluginid', {
'pluginid': 'pathignore', 'ruby-class': 'String'
})
pluginid.text = 'pathignore'
obj = XML.SubElement(robj, 'object', {
'ruby-class': 'PathignoreWrapper', 'pluginid': 'pathignore'
})
ignored = XML.SubElement(obj, 'ignored__paths', {
'pluginid': 'pathignore', 'ruby-class': 'String'
})
ignored.text = data.get('ignored', '')
XML.SubElement(obj, 'invert__ignore', {
'ruby-class': 'FalseClass', 'pluginid': 'pathignore'
})
def pre_scm_buildstep(parser, xml_parent, data):
"""yaml: pre-scm-buildstep
Execute a Build Step before running the SCM
Requires the Jenkins :jenkins-wiki:`pre-scm-buildstep <pre-scm-buildstep>`.
:arg list buildsteps: List of build steps to execute
:Buildstep: Any acceptable builder, as seen in the example
Example::
wrappers:
- pre-scm-buildstep:
- shell: |
#!/bin/bash
echo "Doing somethiung cool"
- shell: |
#!/bin/zsh
echo "Doing somethin cool with zsh"
- ant: "target1 target2"
ant-name: "Standard Ant"
- inject:
properties-file: example.prop
properties-content: EXAMPLE=foo-bar
"""
bsp = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.preSCMbuildstep.'
'PreSCMBuildStepsWrapper')
bs = XML.SubElement(bsp, 'buildSteps')
for step in data:
for edited_node in create_builders(parser, step):
bs.append(edited_node)
def logstash(parser, xml_parent, data):
"""yaml: logstash build wrapper
Dump the Jenkins console output to Logstash
Requires the Jenkins :jenkins-wiki:`logstash plugin <Logstash+Plugin>`.
:arg use-redis: Boolean to use Redis. (default: true)
:arg redis: Redis config params
:Parameter: * **host** (`str`) Redis hostname\
(default 'localhost')
:Parameter: * **port** (`int`) Redis port number (default 6397)
:Parameter: * **database-number** (`int`)\
Redis database number (default 0)
:Parameter: * **database-password** (`str`)\
Redis database password (default '')
:Parameter: * **data-type** (`str`)\
Redis database type (default 'list')
:Parameter: * **key** (`str`) Redis key (default 'logstash')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/logstash001.yaml
"""
logstash = XML.SubElement(xml_parent,
'jenkins.plugins.logstash.'
'LogstashBuildWrapper')
logstash.set('plugin', '[email protected]')
redis_bool = XML.SubElement(logstash, 'useRedis')
redis_bool.text = str(data.get('use-redis', True)).lower()
if data.get('use-redis'):
redis_config = data.get('redis', {})
redis_sub_element = XML.SubElement(logstash, 'redis')
host_sub_element = XML.SubElement(redis_sub_element, 'host')
host_sub_element.text = str(
redis_config.get('host', 'localhost'))
port_sub_element = XML.SubElement(redis_sub_element, 'port')
port_sub_element.text = str(redis_config.get('port', '6379'))
database_numb_sub_element = XML.SubElement(redis_sub_element, 'numb')
database_numb_sub_element.text = \
str(redis_config.get('database-number', '0'))
database_pass_sub_element = XML.SubElement(redis_sub_element, 'pass')
database_pass_sub_element.text = \
str(redis_config.get('database-password', ''))
data_type_sub_element = XML.SubElement(redis_sub_element, 'dataType')
data_type_sub_element.text = \
str(redis_config.get('data-type', 'list'))
key_sub_element = XML.SubElement(redis_sub_element, 'key')
key_sub_element.text = str(redis_config.get('key', 'logstash'))
def mongo_db(parser, xml_parent, data):
"""yaml: mongo-db build wrapper
Initalizes a MongoDB database while running the build.
Requires the Jenkins :jenkins-wiki:`MongoDB plugin <MongoDB+Plugin>`.
:arg str name: The name of the MongoDB install to use
:arg str data-directory: Data directory for the server (optional)
:arg int port: Port for the server (optional)
:arg str startup-params: Startup parameters for the server (optional)
:arg int start-timeout: How long to wait for the server to start in
milliseconds. 0 means no timeout. (default '0')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/mongo-db001.yaml
"""
mongodb = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.mongodb.'
'MongoBuildWrapper')
mongodb.set('plugin', 'mongodb')
if not str(data.get('name', '')):
raise JenkinsJobsException('The mongo install name must be specified.')
XML.SubElement(mongodb, 'mongodbName').text = str(data.get('name', ''))
XML.SubElement(mongodb, 'port').text = str(data.get('port', ''))
XML.SubElement(mongodb, 'dbpath').text = str(data.get(
'data-directory', ''))
XML.SubElement(mongodb, 'parameters').text = str(data.get(
'startup-params', ''))
XML.SubElement(mongodb, 'startTimeout').text = str(data.get(
'start-timeout', '0'))
def delivery_pipeline(parser, xml_parent, data):
"""yaml: delivery-pipeline
If enabled the job will create a version based on the template.
The version will be set to the environment variable PIPELINE_VERSION and
will also be set in the downstream jobs.
Requires the Jenkins :jenkins-wiki:`Delivery Pipeline Plugin
<Delivery+Pipeline+Plugin>`.
:arg str version-template: Template for generated version e.g
1.0.${BUILD_NUMBER} (default: '')
:arg bool set-display-name: Set the generated version as the display name
for the build (default: false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/delivery-pipeline1.yaml
"""
pvc = XML.SubElement(xml_parent,
'se.diabol.jenkins.pipeline.'
'PipelineVersionContributor')
XML.SubElement(pvc, 'versionTemplate').text = data.get(
'version-template', '')
XML.SubElement(pvc, 'updateDisplayName').text = str(data.get(
'set-display-name', False)).lower()
def matrix_tie_parent(parser, xml_parent, data):
"""yaml: matrix-tie-parent
Tie parent to a node.
Requires the Jenkins :jenkins-wiki:`Matrix Tie Parent Plugin
<Matrix+Tie+Parent+Plugin>`.
Note that from Jenkins version 1.532 this plugin's functionality is
available under the "advanced" option of the matrix project configuration.
You can use the top level ``node`` parameter to control where the parent
job is tied in Jenkins 1.532 and higher.
:arg str node: Name of the node.
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/matrix-tie-parent.yaml
"""
mtp = XML.SubElement(xml_parent, 'matrixtieparent.BuildWrapperMtp')
XML.SubElement(mtp, 'labelName').text = data['node']
def exclusion(parser, xml_parent, data):
"""yaml: exclusion
Add a resource to use for critical sections to establish a mutex on. If
another job specifies the same resource, the second job will wait for the
blocked resource to become available.
Requires the Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
:arg list resources: List of resources to add for exclusion
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/exclusion002.yaml
"""
exl = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.exclusion.IdAllocator')
exl.set('plugin', 'Exclusion')
ids = XML.SubElement(exl, 'ids')
resources = data.get('resources', [])
for resource in resources:
dit = \
XML.SubElement(ids,
'org.jvnet.hudson.plugins.exclusion.DefaultIdType')
XML.SubElement(dit, 'name').text = str(resource).upper()
def ssh_agent_credentials(parser, xml_parent, data):
"""yaml: ssh-agent-credentials
Sets up the user for the ssh agent plugin for jenkins.
Requires the Jenkins :jenkins-wiki:`SSH-Agent Plugin <SSH+Agent+Plugin>`.
:arg list users: A list of Jenkins users credential IDs (required)
:arg str user: The user id of the jenkins user credentials (deprecated)
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials002.yaml
if both **users** and **user** parameters specified, **users** will be
prefered, **user** will be ignored.
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials003.yaml
The **users** with one value in list equals to the **user**. In this
case old style XML will be generated. Use this format if you use
SSH-Agent plugin < 1.5.
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials004.yaml
equals to:
.. literalinclude::
/../../tests/wrappers/fixtures/ssh-agent-credentials001.yaml
"""
logger = logging.getLogger(__name__)
entry_xml = XML.SubElement(
xml_parent,
'com.cloudbees.jenkins.plugins.sshagent.SSHAgentBuildWrapper')
xml_key = 'user'
user_list = list()
if 'users' in data:
user_list += data['users']
if len(user_list) > 1:
entry_xml = XML.SubElement(entry_xml, 'credentialIds')
xml_key = 'string'
if 'user' in data:
logger.warn("Both 'users' and 'user' parameters specified for "
"ssh-agent-credentials. 'users' is used, 'user' is "
"ignored.")
elif 'user' in data:
logger.warn("The 'user' param has been deprecated, "
"use the 'users' param instead.")
user_list.append(data['user'])
else:
raise JenkinsJobsException("Missing 'user' or 'users' parameter "
"for ssh-agent-credentials")
for user in user_list:
XML.SubElement(entry_xml, xml_key).text = user
def credentials_binding(parser, xml_parent, data):
"""yaml: credentials-binding
Binds credentials to environment variables using the credentials binding
plugin for jenkins.
Requires the Jenkins :jenkins-wiki:`Credentials Binding Plugin
<Credentials+Binding+Plugin>` version 1.1 or greater.
:arg list binding-type: List of each bindings to create. Bindings may be\
of type `zip-file`, `file`, `username-password`,\
or `text`
:Parameters: * **credential-id** (`str`) UUID of the credential being\
referenced
* **variable** (`str`) Environment variable where the\
credential will be stored
Example:
.. literalinclude::
/../../tests/wrappers/fixtures/credentials_binding.yaml
:language: yaml
"""
entry_xml = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.credentialsbinding.impl.SecretBuildWrapper')
bindings_xml = XML.SubElement(entry_xml, 'bindings')
binding_types = {
'zip-file': 'org.jenkinsci.plugins.credentialsbinding.impl.'
'ZipFileBinding',
'file': 'org.jenkinsci.plugins.credentialsbinding.impl.FileBinding',
'username-password': 'org.jenkinsci.plugins.credentialsbinding.impl.'
'UsernamePasswordBinding',
'text': 'org.jenkinsci.plugins.credentialsbinding.impl.StringBinding'
}
if not data:
raise JenkinsJobsException('At least one binding-type must be '
'specified for the credentials-binding '
'element')
for binding in data:
for binding_type, params in binding.items():
if binding_type not in binding_types.keys():
raise JenkinsJobsException('binding-type must be one of %r' %
binding_types.keys())
binding_xml = XML.SubElement(bindings_xml,
binding_types[binding_type])
variable_xml = XML.SubElement(binding_xml, 'variable')
variable_xml.text = params.get('variable')
credential_xml = XML.SubElement(binding_xml, 'credentialsId')
credential_xml.text = params.get('credential-id')
def custom_tools(parser, xml_parent, data):
"""yaml: custom-tools
Requires the Jenkins :jenkins-wiki:`Custom Tools Plugin
<Custom+Tools+Plugin>`.
:arg list tools: List of custom tools to add
(optional)
:arg bool skip-master-install: skips the install in top level matrix job
(default 'false')
:arg bool convert-homes-to-upper: Converts the home env vars to uppercase
(default 'false')
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/custom-tools001.yaml
"""
base = 'com.cloudbees.jenkins.plugins.customtools'
wrapper = XML.SubElement(xml_parent,
base + ".CustomToolInstallWrapper")
wrapper_tools = XML.SubElement(wrapper, 'selectedTools')
tools = data.get('tools', [])
tool_node = base + '.CustomToolInstallWrapper_-SelectedTool'
for tool in tools:
tool_wrapper = XML.SubElement(wrapper_tools, tool_node)
XML.SubElement(tool_wrapper, 'name').text = str(tool)
opts = XML.SubElement(wrapper,
'multiconfigOptions')
skip_install = str(data.get('skip-master-install', 'false'))
XML.SubElement(opts,
'skipMasterInstallation').text = skip_install
convert_home = str(data.get('convert-homes-to-upper', 'false'))
XML.SubElement(wrapper,
'convertHomesToUppercase').text = convert_home
def xvnc(parser, xml_parent, data):
"""yaml: xvnc
Enable xvnc during the build.
Requires the Jenkins :jenkins-wiki:`xvnc plugin <Xvnc+Plugin>`.
:arg bool screenshot: Take screenshot upon build completion
(default: false)
:arg bool xauthority: Create a dedicated Xauthority file per build
(default: true)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvnc001.yaml
"""
xwrapper = XML.SubElement(xml_parent,
'hudson.plugins.xvnc.Xvnc')
XML.SubElement(xwrapper, 'takeScreenshot').text = str(
data.get('screenshot', False)).lower()
XML.SubElement(xwrapper, 'useXauthority').text = str(
data.get('xauthority', True)).lower()
def job_log_logger(parser, xml_parent, data):
"""yaml: job-log-logger
Enable writing the job log to the underlying logging system.
Requires the Jenkins :jenkins-wiki:`Job Log Logger plugin
<Job+Log+Logger+Plugin>`.
:arg bool suppress-empty: Suppress empty log messages
(default: true)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/job-log-logger001.yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkins.ci.plugins.jobloglogger.'
'JobLogLoggerBuildWrapper')
XML.SubElement(top, 'suppressEmpty').text = str(
data.get('suppress-empty', True)).lower()
def xvfb(parser, xml_parent, data):
"""yaml: xvfb
Enable xvfb during the build.
Requires the Jenkins :jenkins-wiki:`Xvfb Plugin <Xvfb+Plugin>`.
:arg str installation-name: The name of the Xvfb tool instalation
(default: default)
:arg bool auto-display-name: Uses the -displayfd option of Xvfb by which it
chooses it's own display name
(default: false)
:arg str display-name: Ordinal of the display Xvfb will be running on, if
left empty choosen based on current build executor
number (optional)
:arg str assigned-labels: If you want to start Xvfb only on specific nodes
specify its name or label (optional)
:arg bool parallel-build: When running multiple Jenkins nodes on the same
machine this setting influences the display
number generation (default: false)
:arg int timeout: A timeout of given seconds to wait before returning
control to the job (default: 0)
:arg str screen: Resolution and color depth. (default: 1024x768x24)
:arg str display-name-offset: Offset for display names. (default: 1)
:arg str additional-options: Additional options to be added with the
options above to the Xvfb command line
(optional)
:arg bool debug: If Xvfb output should appear in console log of this job
(default: false)
:arg bool shutdown-with-build: Should the display be kept until the whole
job ends (default: false)
Example:
.. literalinclude:: /../../tests/wrappers/fixtures/xvfb001.yaml
"""
xwrapper = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.xvfb.XvfbBuildWrapper')
XML.SubElement(xwrapper, 'installationName').text = str(data.get(
'installation-name', 'default'))
XML.SubElement(xwrapper, 'autoDisplayName').text = str(data.get(
'auto-display-name', False)).lower()
if 'display-name' in data:
XML.SubElement(xwrapper, 'displayName').text = str(data.get(
'display-name', ''))
XML.SubElement(xwrapper, 'assignedLabels').text = str(data.get(
'assigned-labels', ''))
XML.SubElement(xwrapper, 'parallelBuild').text = str(data.get(
'parallel-build', False)).lower()
XML.SubElement(xwrapper, 'timeout').text = str(data.get('timeout', '0'))
XML.SubElement(xwrapper, 'screen').text = str(data.get(
'screen', '1024x768x24'))
XML.SubElement(xwrapper, 'displayNameOffset').text = str(data.get(
'display-name-offset', '1'))
XML.SubElement(xwrapper, 'additionalOptions').text = str(data.get(
'additional-options', ''))
XML.SubElement(xwrapper, 'debug').text = str(data.get(
'debug', False)).lower()
XML.SubElement(xwrapper, 'shutdownWithBuild').text = str(data.get(
'shutdown-with-build', False)).lower()
class Wrappers(jenkins_jobs.modules.base.Base):
sequence = 80
component_type = 'wrapper'
component_list_type = 'wrappers'
def gen_xml(self, parser, xml_parent, data):
wrappers = XML.SubElement(xml_parent, 'buildWrappers')
for wrap in data.get('wrappers', []):
self.registry.dispatch('wrapper', parser, wrappers, wrap)
|
apache-2.0
|
xubenben/scikit-learn
|
sklearn/linear_model/ridge.py
|
25
|
39394
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
bsd-3-clause
|
clobrano/personfinder
|
app/unidecode/x070.py
|
252
|
4693
|
data = (
'You ', # 0x00
'Yang ', # 0x01
'Lu ', # 0x02
'Si ', # 0x03
'Jie ', # 0x04
'Ying ', # 0x05
'Du ', # 0x06
'Wang ', # 0x07
'Hui ', # 0x08
'Xie ', # 0x09
'Pan ', # 0x0a
'Shen ', # 0x0b
'Biao ', # 0x0c
'Chan ', # 0x0d
'Mo ', # 0x0e
'Liu ', # 0x0f
'Jian ', # 0x10
'Pu ', # 0x11
'Se ', # 0x12
'Cheng ', # 0x13
'Gu ', # 0x14
'Bin ', # 0x15
'Huo ', # 0x16
'Xian ', # 0x17
'Lu ', # 0x18
'Qin ', # 0x19
'Han ', # 0x1a
'Ying ', # 0x1b
'Yong ', # 0x1c
'Li ', # 0x1d
'Jing ', # 0x1e
'Xiao ', # 0x1f
'Ying ', # 0x20
'Sui ', # 0x21
'Wei ', # 0x22
'Xie ', # 0x23
'Huai ', # 0x24
'Hao ', # 0x25
'Zhu ', # 0x26
'Long ', # 0x27
'Lai ', # 0x28
'Dui ', # 0x29
'Fan ', # 0x2a
'Hu ', # 0x2b
'Lai ', # 0x2c
'[?] ', # 0x2d
'[?] ', # 0x2e
'Ying ', # 0x2f
'Mi ', # 0x30
'Ji ', # 0x31
'Lian ', # 0x32
'Jian ', # 0x33
'Ying ', # 0x34
'Fen ', # 0x35
'Lin ', # 0x36
'Yi ', # 0x37
'Jian ', # 0x38
'Yue ', # 0x39
'Chan ', # 0x3a
'Dai ', # 0x3b
'Rang ', # 0x3c
'Jian ', # 0x3d
'Lan ', # 0x3e
'Fan ', # 0x3f
'Shuang ', # 0x40
'Yuan ', # 0x41
'Zhuo ', # 0x42
'Feng ', # 0x43
'She ', # 0x44
'Lei ', # 0x45
'Lan ', # 0x46
'Cong ', # 0x47
'Qu ', # 0x48
'Yong ', # 0x49
'Qian ', # 0x4a
'Fa ', # 0x4b
'Guan ', # 0x4c
'Que ', # 0x4d
'Yan ', # 0x4e
'Hao ', # 0x4f
'Hyeng ', # 0x50
'Sa ', # 0x51
'Zan ', # 0x52
'Luan ', # 0x53
'Yan ', # 0x54
'Li ', # 0x55
'Mi ', # 0x56
'Shan ', # 0x57
'Tan ', # 0x58
'Dang ', # 0x59
'Jiao ', # 0x5a
'Chan ', # 0x5b
'[?] ', # 0x5c
'Hao ', # 0x5d
'Ba ', # 0x5e
'Zhu ', # 0x5f
'Lan ', # 0x60
'Lan ', # 0x61
'Nang ', # 0x62
'Wan ', # 0x63
'Luan ', # 0x64
'Xun ', # 0x65
'Xian ', # 0x66
'Yan ', # 0x67
'Gan ', # 0x68
'Yan ', # 0x69
'Yu ', # 0x6a
'Huo ', # 0x6b
'Si ', # 0x6c
'Mie ', # 0x6d
'Guang ', # 0x6e
'Deng ', # 0x6f
'Hui ', # 0x70
'Xiao ', # 0x71
'Xiao ', # 0x72
'Hu ', # 0x73
'Hong ', # 0x74
'Ling ', # 0x75
'Zao ', # 0x76
'Zhuan ', # 0x77
'Jiu ', # 0x78
'Zha ', # 0x79
'Xie ', # 0x7a
'Chi ', # 0x7b
'Zhuo ', # 0x7c
'Zai ', # 0x7d
'Zai ', # 0x7e
'Can ', # 0x7f
'Yang ', # 0x80
'Qi ', # 0x81
'Zhong ', # 0x82
'Fen ', # 0x83
'Niu ', # 0x84
'Jiong ', # 0x85
'Wen ', # 0x86
'Po ', # 0x87
'Yi ', # 0x88
'Lu ', # 0x89
'Chui ', # 0x8a
'Pi ', # 0x8b
'Kai ', # 0x8c
'Pan ', # 0x8d
'Yan ', # 0x8e
'Kai ', # 0x8f
'Pang ', # 0x90
'Mu ', # 0x91
'Chao ', # 0x92
'Liao ', # 0x93
'Gui ', # 0x94
'Kang ', # 0x95
'Tun ', # 0x96
'Guang ', # 0x97
'Xin ', # 0x98
'Zhi ', # 0x99
'Guang ', # 0x9a
'Guang ', # 0x9b
'Wei ', # 0x9c
'Qiang ', # 0x9d
'[?] ', # 0x9e
'Da ', # 0x9f
'Xia ', # 0xa0
'Zheng ', # 0xa1
'Zhu ', # 0xa2
'Ke ', # 0xa3
'Zhao ', # 0xa4
'Fu ', # 0xa5
'Ba ', # 0xa6
'Duo ', # 0xa7
'Duo ', # 0xa8
'Ling ', # 0xa9
'Zhuo ', # 0xaa
'Xuan ', # 0xab
'Ju ', # 0xac
'Tan ', # 0xad
'Pao ', # 0xae
'Jiong ', # 0xaf
'Pao ', # 0xb0
'Tai ', # 0xb1
'Tai ', # 0xb2
'Bing ', # 0xb3
'Yang ', # 0xb4
'Tong ', # 0xb5
'Han ', # 0xb6
'Zhu ', # 0xb7
'Zha ', # 0xb8
'Dian ', # 0xb9
'Wei ', # 0xba
'Shi ', # 0xbb
'Lian ', # 0xbc
'Chi ', # 0xbd
'Huang ', # 0xbe
'[?] ', # 0xbf
'Hu ', # 0xc0
'Shuo ', # 0xc1
'Lan ', # 0xc2
'Jing ', # 0xc3
'Jiao ', # 0xc4
'Xu ', # 0xc5
'Xing ', # 0xc6
'Quan ', # 0xc7
'Lie ', # 0xc8
'Huan ', # 0xc9
'Yang ', # 0xca
'Xiao ', # 0xcb
'Xiu ', # 0xcc
'Xian ', # 0xcd
'Yin ', # 0xce
'Wu ', # 0xcf
'Zhou ', # 0xd0
'Yao ', # 0xd1
'Shi ', # 0xd2
'Wei ', # 0xd3
'Tong ', # 0xd4
'Xue ', # 0xd5
'Zai ', # 0xd6
'Kai ', # 0xd7
'Hong ', # 0xd8
'Luo ', # 0xd9
'Xia ', # 0xda
'Zhu ', # 0xdb
'Xuan ', # 0xdc
'Zheng ', # 0xdd
'Po ', # 0xde
'Yan ', # 0xdf
'Hui ', # 0xe0
'Guang ', # 0xe1
'Zhe ', # 0xe2
'Hui ', # 0xe3
'Kao ', # 0xe4
'[?] ', # 0xe5
'Fan ', # 0xe6
'Shao ', # 0xe7
'Ye ', # 0xe8
'Hui ', # 0xe9
'[?] ', # 0xea
'Tang ', # 0xeb
'Jin ', # 0xec
'Re ', # 0xed
'[?] ', # 0xee
'Xi ', # 0xef
'Fu ', # 0xf0
'Jiong ', # 0xf1
'Che ', # 0xf2
'Pu ', # 0xf3
'Jing ', # 0xf4
'Zhuo ', # 0xf5
'Ting ', # 0xf6
'Wan ', # 0xf7
'Hai ', # 0xf8
'Peng ', # 0xf9
'Lang ', # 0xfa
'Shan ', # 0xfb
'Hu ', # 0xfc
'Feng ', # 0xfd
'Chi ', # 0xfe
'Rong ', # 0xff
)
|
apache-2.0
|
Elbagoury/odoo
|
addons/event/event.py
|
56
|
19486
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import timedelta
import pytz
from openerp import models, fields, api, _
from openerp.exceptions import AccessError, Warning
class event_type(models.Model):
""" Event Type """
_name = 'event.type'
_description = 'Event Type'
name = fields.Char(string='Event Type', required=True)
default_reply_to = fields.Char(string='Default Reply-To',
help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one.")
default_email_event = fields.Many2one('email.template', string='Event Confirmation Email',
help="It will select this default confirmation event mail value when you choose this event")
default_email_registration = fields.Many2one('email.template', string='Registration Confirmation Email',
help="It will select this default confirmation registration mail value when you choose this event")
default_registration_min = fields.Integer(string='Default Minimum Registration', default=0,
help="It will select this default minimum value when you choose this event")
default_registration_max = fields.Integer(string='Default Maximum Registration', default=0,
help="It will select this default maximum value when you choose this event")
class event_event(models.Model):
"""Event"""
_name = 'event.event'
_description = 'Event'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = 'date_begin'
name = fields.Char(string='Event Name', translate=True, required=True,
readonly=False, states={'done': [('readonly', True)]})
user_id = fields.Many2one('res.users', string='Responsible User',
default=lambda self: self.env.user,
readonly=False, states={'done': [('readonly', True)]})
type = fields.Many2one('event.type', string='Type of Event',
readonly=False, states={'done': [('readonly', True)]})
seats_max = fields.Integer(string='Maximum Available Seats', oldname='register_max',
readonly=True, states={'draft': [('readonly', False)]},
help="You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )")
seats_min = fields.Integer(string='Minimum Reserved Seats', oldname='register_min',
readonly=True, states={'draft': [('readonly', False)]},
help="You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )")
seats_reserved = fields.Integer(oldname='register_current', string='Reserved Seats',
store=True, readonly=True, compute='_compute_seats')
seats_available = fields.Integer(oldname='register_avail', string='Available Seats',
store=True, readonly=True, compute='_compute_seats')
seats_unconfirmed = fields.Integer(oldname='register_prospect', string='Unconfirmed Seat Reservations',
store=True, readonly=True, compute='_compute_seats')
seats_used = fields.Integer(oldname='register_attended', string='Number of Participations',
store=True, readonly=True, compute='_compute_seats')
@api.multi
@api.depends('seats_max', 'registration_ids.state', 'registration_ids.nb_register')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0
for event in self:
event.seats_unconfirmed = event.seats_reserved = event.seats_used = 0
# aggregate registrations by event and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open':'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_id, state, sum(nb_register)
FROM event_registration
WHERE event_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_id, state
"""
self._cr.execute(query, (tuple(self.ids),))
for event_id, state, num in self._cr.fetchall():
event = self.browse(event_id)
event[state_field[state]] += num
# compute seats_available
for event in self:
event.seats_available = \
event.seats_max - (event.seats_reserved + event.seats_used) \
if event.seats_max > 0 else 0
registration_ids = fields.One2many('event.registration', 'event_id', string='Registrations',
readonly=False, states={'done': [('readonly', True)]})
count_registrations = fields.Integer(string='Registrations',
compute='_count_registrations')
date_begin = fields.Datetime(string='Start Date', required=True,
readonly=True, states={'draft': [('readonly', False)]})
date_end = fields.Datetime(string='End Date', required=True,
readonly=True, states={'draft': [('readonly', False)]})
@api.model
def _tz_get(self):
return [(x, x) for x in pytz.all_timezones]
date_tz = fields.Selection('_tz_get', string='Timezone',
default=lambda self: self._context.get('tz', 'UTC'))
@api.one
@api.depends('date_tz', 'date_begin')
def _compute_date_begin_tz(self):
if self.date_begin:
self_in_tz = self.with_context(tz=(self.date_tz or 'UTC'))
date_begin = fields.Datetime.from_string(self.date_begin)
self.date_begin_located = fields.Datetime.to_string(fields.Datetime.context_timestamp(self_in_tz, date_begin))
else:
self.date_begin_located = False
@api.one
@api.depends('date_tz', 'date_end')
def _compute_date_end_tz(self):
if self.date_end:
self_in_tz = self.with_context(tz=(self.date_tz or 'UTC'))
date_end = fields.Datetime.from_string(self.date_end)
self.date_end_located = fields.Datetime.to_string(fields.Datetime.context_timestamp(self_in_tz, date_end))
else:
self.date_end_located = False
@api.one
@api.depends('address_id')
def _compute_country(self):
self.country_id = self.address_id.country_id
date_begin_located = fields.Datetime(string='Start Date Located', compute='_compute_date_begin_tz')
date_end_located = fields.Datetime(string='End Date Located', compute='_compute_date_end_tz')
state = fields.Selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('confirm', 'Confirmed'),
('done', 'Done')
], string='Status', default='draft', readonly=True, required=True, copy=False,
help="If event is created, the status is 'Draft'. If event is confirmed for the particular dates the status is set to 'Confirmed'. If the event is over, the status is set to 'Done'. If event is cancelled the status is set to 'Cancelled'.")
email_registration_id = fields.Many2one(
'email.template', string='Registration Confirmation Email',
domain=[('model', '=', 'event.registration')],
help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.')
email_confirmation_id = fields.Many2one(
'email.template', string='Event Confirmation Email',
domain=[('model', '=', 'event.registration')],
help="If you set an email template, each participant will receive this email announcing the confirmation of the event.")
reply_to = fields.Char(string='Reply-To Email',
readonly=False, states={'done': [('readonly', True)]},
help="The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one.")
address_id = fields.Many2one('res.partner', string='Location',
default=lambda self: self.env.user.company_id.partner_id,
readonly=False, states={'done': [('readonly', True)]})
country_id = fields.Many2one('res.country', string='Country',
store=True, compute='_compute_country')
description = fields.Html(string='Description', oldname='note', translate=True,
readonly=False, states={'done': [('readonly', True)]})
company_id = fields.Many2one('res.company', string='Company', change_default=True,
default=lambda self: self.env['res.company']._company_default_get('event.event'),
required=False, readonly=False, states={'done': [('readonly', True)]})
organizer_id = fields.Many2one('res.partner', string='Organizer',
default=lambda self: self.env.user.company_id.partner_id)
is_subscribed = fields.Boolean(string='Subscribed',
compute='_compute_subscribe')
@api.one
@api.depends('registration_ids')
def _count_registrations(self):
self.count_registrations = len(self.registration_ids)
@api.one
@api.depends('registration_ids.user_id', 'registration_ids.state')
def _compute_subscribe(self):
""" Determine whether the current user is already subscribed to any event in `self` """
user = self.env.user
self.is_subscribed = any(
reg.user_id == user and reg.state in ('open', 'done')
for reg in self.registration_ids
)
@api.multi
@api.depends('name', 'date_begin', 'date_end')
def name_get(self):
result = []
for event in self:
date_begin = fields.Datetime.from_string(event.date_begin)
date_end = fields.Datetime.from_string(event.date_end)
dates = [fields.Date.to_string(fields.Datetime.context_timestamp(event, dt)) for dt in [date_begin, date_end] if dt]
dates = sorted(set(dates))
result.append((event.id, '%s (%s)' % (event.name, ' - '.join(dates))))
return result
@api.one
@api.constrains('seats_max', 'seats_available')
def _check_seats_limit(self):
if self.seats_max and self.seats_available < 0:
raise Warning(_('No more available seats.'))
@api.one
@api.constrains('date_begin', 'date_end')
def _check_closing_date(self):
if self.date_end < self.date_begin:
raise Warning(_('Closing Date cannot be set before Beginning Date.'))
@api.one
def button_draft(self):
self.state = 'draft'
@api.one
def button_cancel(self):
for event_reg in self.registration_ids:
if event_reg.state == 'done':
raise Warning(_("You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event."))
self.registration_ids.write({'state': 'cancel'})
self.state = 'cancel'
@api.one
def button_done(self):
self.state = 'done'
@api.one
def confirm_event(self):
if self.email_confirmation_id:
# send reminder that will confirm the event for all the people that were already confirmed
regs = self.registration_ids.filtered(lambda reg: reg.state not in ('draft', 'cancel'))
regs.mail_user_confirm()
self.state = 'confirm'
@api.one
def button_confirm(self):
""" Confirm Event and send confirmation email to all register peoples """
self.confirm_event()
@api.one
def subscribe_to_event(self):
""" Subscribe the current user to a given event """
user = self.env.user
num_of_seats = int(self._context.get('ticket', 1))
regs = self.registration_ids.filtered(lambda reg: reg.user_id == user)
# the subscription is done as SUPERUSER_ID because in case we share the
# kanban view, we want anyone to be able to subscribe
if not regs:
regs = regs.sudo().create({
'event_id': self.id,
'email': user.email,
'name':user.name,
'user_id': user.id,
'nb_register': num_of_seats,
})
else:
regs.write({'nb_register': num_of_seats})
regs.sudo().confirm_registration()
@api.one
def unsubscribe_to_event(self):
""" Unsubscribe the current user from a given event """
# the unsubscription is done as SUPERUSER_ID because in case we share
# the kanban view, we want anyone to be able to unsubscribe
user = self.env.user
regs = self.sudo().registration_ids.filtered(lambda reg: reg.user_id == user)
regs.button_reg_cancel()
@api.onchange('type')
def _onchange_type(self):
if self.type:
self.reply_to = self.type.default_reply_to
self.email_registration_id = self.type.default_email_registration
self.email_confirmation_id = self.type.default_email_event
self.seats_min = self.type.default_registration_min
self.seats_max = self.type.default_registration_max
@api.onchange('date_begin')
def _onchange_date_begin(self):
if self.date_begin and not self.date_end:
date_begin = fields.Datetime.from_string(self.date_begin)
self.date_end = fields.Datetime.to_string(date_begin + timedelta(hours=1))
class event_registration(models.Model):
_name = 'event.registration'
_description = 'Event Registration'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = 'name, create_date desc'
origin = fields.Char(string='Source Document', readonly=True,
help="Reference of the sales order which created the registration")
nb_register = fields.Integer(string='Number of Participants', required=True, default=1,
readonly=True, states={'draft': [('readonly', False)]})
event_id = fields.Many2one('event.event', string='Event', required=True,
readonly=True, states={'draft': [('readonly', False)]})
partner_id = fields.Many2one('res.partner', string='Partner',
states={'done': [('readonly', True)]})
date_open = fields.Datetime(string='Registration Date', readonly=True)
date_closed = fields.Datetime(string='Attended Date', readonly=True)
reply_to = fields.Char(string='Reply-to Email', related='event_id.reply_to',
readonly=True)
log_ids = fields.One2many('mail.message', 'res_id', string='Logs',
domain=[('model', '=', _name)])
event_begin_date = fields.Datetime(string="Event Start Date", related='event_id.date_begin',
readonly=True)
event_end_date = fields.Datetime(string="Event End Date", related='event_id.date_end',
readonly=True)
user_id = fields.Many2one('res.users', string='User', states={'done': [('readonly', True)]})
company_id = fields.Many2one('res.company', string='Company', related='event_id.company_id',
store=True, readonly=True, states={'draft':[('readonly', False)]})
state = fields.Selection([
('draft', 'Unconfirmed'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Attended'),
], string='Status', default='draft', readonly=True, copy=False)
email = fields.Char(string='Email')
phone = fields.Char(string='Phone')
name = fields.Char(string='Name', select=True)
@api.one
@api.constrains('event_id', 'state', 'nb_register')
def _check_seats_limit(self):
if self.event_id.seats_max and \
self.event_id.seats_available < (self.nb_register if self.state == 'draft' else 0):
raise Warning(_('No more available seats.'))
@api.one
def do_draft(self):
self.state = 'draft'
@api.one
def confirm_registration(self):
self.event_id.message_post(
body=_('New registration confirmed: %s.') % (self.name or ''),
subtype="event.mt_event_registration")
self.message_post(body=_('Event Registration confirmed.'))
self.state = 'open'
@api.one
def registration_open(self):
""" Open Registration """
self.confirm_registration()
self.mail_user()
@api.one
def button_reg_close(self):
""" Close Registration """
today = fields.Datetime.now()
if self.event_id.date_begin <= today:
self.write({'state': 'done', 'date_closed': today})
else:
raise Warning(_("You must wait for the starting day of the event to do this action."))
@api.one
def button_reg_cancel(self):
self.state = 'cancel'
@api.one
def mail_user(self):
"""Send email to user with email_template when registration is done """
if self.event_id.state == 'confirm' and self.event_id.email_confirmation_id:
self.mail_user_confirm()
else:
template = self.event_id.email_registration_id
if template:
mail_message = template.send_mail(self.id)
@api.one
def mail_user_confirm(self):
"""Send email to user when the event is confirmed """
template = self.event_id.email_confirmation_id
if template:
mail_message = template.send_mail(self.id)
@api.multi
def message_get_suggested_recipients(self):
recipients = super(event_registration, self).message_get_suggested_recipients()
try:
for registration in self:
if registration.partner_id:
self._message_add_suggested_recipient(recipients, registration, partner=registration.partner_id, reason=_('Registrant'))
elif registration.email:
self._message_add_suggested_recipient(recipients, registration, email=registration.email, reason=_('Registrant Email'))
except AccessError: # no read access rights -> ignore suggested recipients
pass
return recipients
@api.onchange('partner_id')
def _onchange_partner(self):
if self.partner_id:
contact_id = self.partner_id.address_get().get('default', False)
if contact_id:
contact = self.env['res.partner'].browse(contact_id)
self.name = contact.name
self.email = contact.email
self.phone = contact.phone
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
sternshus/Arelle
|
setup.py
|
1
|
17326
|
"""
Created on Jan 30, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
"""
import sys
import os
import datetime
from distutils.command.build_py import build_py as _build_py
def get_version():
"""
Utility function to return the current version of the library, as defined
by the version string in the arelle's _pkg_meta.py file. The format follows
the standard Major.Minor.Fix notation.
:return: The version string in the standard Major.Minor.Fix notation.
:rtype: str
"""
import imp
source_dir = 'arelle'
with open('{}/_pkg_meta.py'.format(source_dir), 'rb') as fp:
mod = imp.load_source('_pkg_meta', source_dir, fp)
return mod.version
setup_requires = ['lxml']
# install_requires specifies a list of package dependencies that are
# installed when 'python setup.py install' is run. On Linux/Mac systems
# this also allows installation directly from the github repository
# (using 'pip install -e git+git://github.com/rheimbuchArelle.git#egg=Arelle')
# and the install_requires packages are auto-installed as well.
install_requires = ['lxml']
options = {}
scripts = []
cxFreezeExecutables = []
cmdclass = {}
# Files that should not be passed through 3to2 conversion
# in python 2.7 builds
build_py27_unmodified = [
'arelle/webserver/bottle.py',
'arelle/PythonUtil.py'
]
# Files that should be excluded from python 2.7 builds
build_py27_excluded = [
'arelle/CntlrQuickBooks.py',
'arelle/CntlrWinMain.py',
'arelle/CntlrWinTooltip.py',
'arelle/Dialog*.py',
'arelle/UiUtil.py',
'arelle/ViewWin*.py',
'arelle/WatchRss.py'
]
def match_patterns(path, pattern_list=[]):
from fnmatch import fnmatch
for pattern in pattern_list:
if fnmatch(path, pattern):
return True
return False
# When building under python 2.7, run refactorings from lib3to2
class build_py27(_build_py):
def __init__(self, *args, **kwargs):
_build_py.__init__(self, *args, **kwargs)
import logging
from lib2to3 import refactor
import lib3to2.main
rt_logger = logging.getLogger("RefactoringTool")
rt_logger.addHandler(logging.StreamHandler())
fixers = refactor.get_fixers_from_package('lib3to2.fixes')
fixers.remove('lib3to2.fixes.fix_print')
self.rtool = lib3to2.main.StdoutRefactoringTool(
fixers,
None,
[],
False,
False
)
def copy_file(self, source, target, preserve_mode=True):
if match_patterns(source, build_py27_unmodified):
_build_py.copy_file(self, source, target, preserve_mode)
elif match_patterns(source, build_py27_excluded):
print("excluding: %s" % source)
elif source.endswith('.py'):
try:
print("3to2 converting: %s => %s" % (source, target))
with open(source, 'rt') as input:
# ensure file contents have trailing newline
source_content = input.read() + "\n"
nval = self.rtool.refactor_string(source_content, source)
if nval is not None:
with open(target, 'wt') as output:
output.write('from __future__ import print_function\n')
output.write(str(nval))
else:
raise(Exception("Failed to parse: %s" % source))
except Exception as e:
print("3to2 error (%s => %s): %s" % (source,target,e))
if sys.version_info[0] < 3:
setup_requires.append('3to2')
# cmdclass allows you to override the distutils commands that are
# run through 'python setup.py somecmd'. Under python 2.7 replace
# the 'build_py' with a custom subclass (build_py27) that invokes
# 3to2 refactoring on each python file as its copied to the build
# directory.
cmdclass['build_py'] = build_py27
# (Under python3 no commands are replaced, so the default command classes are used.)
try:
# Under python2.7, run build before running build_sphinx
import sphinx.setup_command
class build_sphinx_py27(sphinx.setup_command.BuildDoc):
def run(self):
self.run_command('build_py')
# Ensure sphinx looks at the "built" arelle libs that
# have passed through the 3to2 refactorings
# in `build_py27`.
sys.path.insert(0, os.path.abspath("./build/lib"))
sphinx.setup_command.BuildDoc.run(self)
if sys.version_info[0] < 3:
setup_requires.append('3to2')
setup_requires.append('sphinx')
# do a similar override of the 'build_sphinx' command to ensure
# that the 3to2-enabled build command runs before calling back to
# the default build_sphinx superclass.
cmdclass['build_sphinx'] = build_sphinx_py27
# There is also a python 2.x conditional switch in 'apidocs/conf.py'
# that sets sphinx to look at the 3to2 converted build files instead
# of the original unconverted source.
except ImportError as e:
print("Documentation production by Sphinx is not available: %s" % e)
''' this section was for py2app which no longer works on Mavericks,
switch below to cx_Freeze
if sys.platform == 'darwin':
from setuptools import setup, find_packages
setup_requires.append('py2app')
# Cross-platform applications generally expect sys.argv to
# be used for opening files.
plist = dict(CFBundleIconFile='arelle.icns',
NSHumanReadableCopyright='(c) 2010-2013 Mark V Systems Limited')
# MacOS launches CntlrWinMain and uses "ARELLE_ARGS" to effect console (shell) mode
options = dict(py2app=dict(app=['arelle/CntlrWinMain.py'],
iconfile='arelle/images/arelle.icns',
plist=plist,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info subdirectories to site-packages directory
#
includes=['lxml', 'lxml.etree',
'lxml._elementpath', 'pg8000',
'rdflib', 'rdflib.extras', 'rdflib.tools',
# more rdflib plugin modules may need to be added later
'rdflib.plugins', 'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers', 'rdflib.plugins.serializers.rdfxml', 'rdflib.plugins.serializers.turtle', 'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib']))
packages = find_packages('.')
dataFiles = [
#XXX: this breaks build on Lion/Py3.2 --mike
#'--iconfile',
('config',['arelle/config/' + f for f in os.listdir('arelle/config')]),
('doc',['arelle/doc/' + f for f in os.listdir('arelle/doc')]),
('examples',['arelle/examples/' + f for f in os.listdir('arelle/examples')]),
('images',['arelle/images/' + f for f in os.listdir('arelle/images')]),
('examples/plugin',['arelle/examples/plugin/' + f for f in os.listdir('arelle/examples/plugin')]),
('examples/plugin/locale/fr/LC_MESSAGES',['arelle/examples/plugin/locale/fr/LC_MESSAGES/' + f for f in os.listdir('arelle/examples/plugin/locale/fr/LC_MESSAGES')]),
('plugin',['arelle/plugin/' + f for f in os.listdir('arelle/plugin')]),
('scripts',['arelle/scripts/' + f for f in os.listdir('arelle/scripts-macOS')]),
]
for dir, subDirs, files in os.walk('arelle/locale'):
dir = dir.replace('\\','/')
dataFiles.append((dir[7:],
[dir + "/" + f for f in files]))
cx_FreezeExecutables = []
#End of py2app defunct section
'''
# works on ubuntu with hand-built cx_Freeze
if sys.platform in ('darwin', 'linux2', 'linux', 'sunos5'):
from setuptools import find_packages
try:
from cx_Freeze import setup, Executable
cx_FreezeExecutables = [
Executable(script="arelleGUI.pyw", targetName="arelle"),
Executable(script="arelleCmdLine.py")
]
except:
from setuptools import setup
cx_FreezeExecutables = []
packages = find_packages(
'.', # note that new setuptools finds plugin and lib unwanted stuff
exclude=['*.plugin.*', '*.lib.*']
)
dataFiles = []
includeFiles = [
('arelle/config','config'),
('arelle/doc','doc'),
('arelle/images','images'),
('arelle/locale','locale'),
('arelle/examples','examples'),
('arelle/examples/plugin','examples/plugin'),
(
'arelle/examples/plugin/locale/fr/LC_MESSAGES',
'examples/plugin/locale/fr/LC_MESSAGES'
),
('arelle/plugin','plugin')
]
if sys.platform == 'darwin':
includeFiles.append(('arelle/scripts-macOS','scripts'))
# copy tck and tk built as described: https://www.tcl.tk/doc/howto/compile.html#mac
includeFiles.append(('/Library/Frameworks/Tcl.framework/Versions/8.6/Resources/Scripts','tcl8.6'))
includeFiles.append(('/Library/Frameworks/Tk.framework/Versions/8.6/Resources/Scripts','tk8.6'))
else:
includeFiles.append(('arelle/scripts-unix','scripts'))
if os.path.exists("/etc/redhat-release"):
# extra libraries needed for red hat
includeFiles.append(('/usr/local/lib/libexslt.so', 'libexslt.so'))
includeFiles.append(('/usr/local/lib/libxml2.so', 'libxml2.so'))
# for some reason redhat needs libxml2.so.2 as well
includeFiles.append(('/usr/local/lib/libxml2.so.2', 'libxml2.so.2'))
includeFiles.append(('/usr/local/lib/libxslt.so', 'libxslt.so'))
includeFiles.append(('/usr/local/lib/libz.so', 'libz.so'))
if os.path.exists("version.txt"):
includeFiles.append(('version.txt', 'version.txt'))
includeLibs = [
'lxml', 'lxml.etree', 'lxml._elementpath', 'lxml.html',
'pg8000', 'pymysql', 'sqlite3', 'numpy',
# note cx_Oracle isn't here because it is version and machine specific,
# ubuntu not likely working
# more rdflib plugin modules may need to be added later
'rdflib',
'rdflib.extras',
'rdflib.tools',
'rdflib.plugins',
'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers',
'rdflib.plugins.serializers.rdfxml',
'rdflib.plugins.serializers.turtle',
'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib',
'openpyxl' # openpyxl's __init__.py must be hand edited, see https://bitbucket.org/openpyxl/openpyxl/pull-requests/80/__about__py/diff
]
# uncomment the next two files if cx_Freezing with EdgarRenderer
# note that openpyxl must be 2.1.4 at this time
if os.path.exists("arelle/plugin/EdgarRenderer"):
includeLibs += [
'cherrypy', 'cherrypy.wsgiserver.wsgiserver3',
'dateutil',
'dateutil.relativedelta',
'six',
'tornado',
'pyparsing',
'matplotlib'
]
import matplotlib
dataFiles += matplotlib.get_py2exe_datafiles()
if sys.platform != 'sunos5':
try:
import pyodbc # see if this is importable
includeLibs.append('pyodbc') # has C compiling errors on Sparc
except ImportError:
pass
options = dict(
build_exe={
"include_files": includeFiles,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info
# subdirectories to site-packages directory
#
"includes": includeLibs,
"packages": packages,
}
)
if sys.platform == 'darwin':
options["bdist_mac"] = {
"iconfile": 'arelle/images/arelle.icns',
"bundle_name": 'Arelle',
}
elif sys.platform == 'win32':
from setuptools import find_packages
from cx_Freeze import setup, Executable
# py2exe is not ported to Python 3 yet
# setup_requires.append('py2exe')
# FIXME: this should use the entry_points mechanism
packages = find_packages('.')
print("packages={}".format(packages))
dataFiles = None
win32includeFiles = [
('arelle\\config','config'),
('arelle\\doc','doc'),
('arelle\\images','images'),
('arelle\\locale','locale'),
('arelle\\examples','examples'),
('arelle\\examples\\plugin','examples/plugin'),
(
'arelle\\examples\\plugin\\locale\\fr\\LC_MESSAGES',
'examples/plugin/locale/fr/LC_MESSAGES'
),
('arelle\\plugin','plugin'),
('arelle\\scripts-windows','scripts')
]
if 'arelle.webserver' in packages:
win32includeFiles.append('QuickBooks.qwc')
if os.path.exists("version.txt"):
win32includeFiles.append('version.txt')
includeLibs = [
'lxml', 'lxml.etree', 'lxml._elementpath', 'lxml.html',
'pg8000', 'pymysql', 'cx_Oracle', 'pyodbc', 'sqlite3', 'numpy',
# more rdflib plugin modules may need to be added later
'rdflib',
'rdflib.extras',
'rdflib.tools',
'rdflib.plugins',
'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers',
'rdflib.plugins.serializers.rdfxml',
'rdflib.plugins.serializers.turtle',
'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib',
'openpyxl' # openpyxl's __init__.py must be hand edited, see https://bitbucket.org/openpyxl/openpyxl/pull-requests/80/__about__py/diff
]
# uncomment the next line if cx_Freezing with EdgarRenderer
# note that openpyxl must be 2.1.4 at this time
# removed tornado
if os.path.exists("arelle/plugin/EdgarRenderer"):
includeLibs += [
'cherrypy', 'cherrypy.wsgiserver.wsgiserver3',
'dateutil', 'dateutil.relativedelta',
"six", "pyparsing", "matplotlib"
]
options = dict(
build_exe={
"include_files": win32includeFiles,
"include_msvcr": True, # include MSVCR100
# "icon": 'arelle\\images\\arelle16x16and32x32.ico',
"packages": packages,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info
# subdirectories to site-packages directory
#
"includes": includeLibs
}
)
# windows uses arelleGUI.exe to launch in GUI mode, arelleCmdLine.exe in command line mode
cx_FreezeExecutables = [
Executable(
script="arelleGUI.pyw",
base="Win32GUI",
icon='arelle\\images\\arelle16x16and32x32.ico',
),
Executable(
script="arelleCmdLine.py",
)
]
else:
#print("Your platform {0} isn't supported".format(sys.platform))
#sys.exit(1)
from setuptools import os, setup, find_packages
packages = find_packages(
'.', # note that new setuptools finds plugin and lib unwanted stuff
exclude=['*.plugin.*', '*.lib.*']
)
dataFiles = [(
'config',
['arelle/config/' + f for f in os.listdir('arelle/config')]
)]
cx_FreezeExecutables = []
timestamp = datetime.datetime.utcnow()
setup(
name='Arelle',
version=get_version(),
description='An open source XBRL platform',
long_description=open('README.md').read(),
author='arelle.org',
author_email='[email protected]',
url='http://www.arelle.org',
download_url='http://www.arelle.org/download',
cmdclass=cmdclass,
include_package_data=True, # note: this uses MANIFEST.in
packages=packages,
data_files=dataFiles,
platforms=['OS Independent'],
license='Apache-2',
keywords=['xbrl'],
classifiers=[
'Development Status :: 1 - Active',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache-2 License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Topic :: XBRL Validation and Versioning',
],
scripts=scripts,
entry_points={
'console_scripts': [
'arelle=arelle.CntlrCmdLine:main',
'arelle-gui=arelle.CntlrWinMain:main',
]
},
setup_requires=setup_requires,
install_requires=install_requires,
options=options,
executables=cx_FreezeExecutables,
)
|
apache-2.0
|
jordanemedlock/psychtruths
|
temboo/core/Library/SendGrid/NewsletterAPI/Newsletter/EditNewsletter.py
|
5
|
5093
|
# -*- coding: utf-8 -*-
###############################################################################
#
# EditNewsletter
# Edit an existing newsletter.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EditNewsletter(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EditNewsletter Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(EditNewsletter, self).__init__(temboo_session, '/Library/SendGrid/NewsletterAPI/Newsletter/EditNewsletter')
def new_input_set(self):
return EditNewsletterInputSet()
def _make_result_set(self, result, path):
return EditNewsletterResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EditNewsletterChoreographyExecution(session, exec_id, path)
class EditNewsletterInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EditNewsletter
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(EditNewsletterInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(EditNewsletterInputSet, self)._set_input('APIUser', value)
def set_HTML(self, value):
"""
Set the value of the HTML input for this Choreo. ((required, string) The html portion of the newsletter.)
"""
super(EditNewsletterInputSet, self)._set_input('HTML', value)
def set_Identity(self, value):
"""
Set the value of the Identity input for this Choreo. ((required, string) The new identity Identiy for the newsletter that is being edited.)
"""
super(EditNewsletterInputSet, self)._set_input('Identity', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of the newsletter that is being edited.)
"""
super(EditNewsletterInputSet, self)._set_input('Name', value)
def set_NewName(self, value):
"""
Set the value of the NewName input for this Choreo. ((required, string) The new name of the newsletter that is being edited.)
"""
super(EditNewsletterInputSet, self)._set_input('NewName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(EditNewsletterInputSet, self)._set_input('ResponseFormat', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) The new subject for the edited newsletter.)
"""
super(EditNewsletterInputSet, self)._set_input('Subject', value)
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((required, string) The text portion of the newsletter.)
"""
super(EditNewsletterInputSet, self)._set_input('Text', value)
class EditNewsletterResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EditNewsletter Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class EditNewsletterChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EditNewsletterResultSet(response, path)
|
apache-2.0
|
xiaotdl/ansible
|
lib/ansible/template/template.py
|
267
|
1397
|
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import jinja2
__all__ = ['AnsibleJ2Template']
class AnsibleJ2Template(jinja2.environment.Template):
'''
A helper class, which prevents Jinja2 from running _jinja2_vars through dict().
Without this, {% include %} and similar will create new contexts unlike the special
one created in template_from_file. This ensures they are all alike, except for
potential locals.
'''
def new_context(self, vars=None, shared=False, locals=None):
return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
|
gpl-3.0
|
maelnor/nova
|
nova/api/openstack/compute/plugins/v3/floating_ip_pools.py
|
5
|
2145
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
from nova import network
ALIAS = 'os-floating-ip-pools'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(object):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API()
super(FloatingIPPoolsController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class FloatingIpPools(extensions.V3APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPPoolsController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
apache-2.0
|
Maple1401/awesome-python-webapp
|
www/markdown2.py
|
27
|
92765
|
#!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
#---- Python version compat
try:
from urllib.parse import quote # python3
except ImportError:
from urllib import quote # python2
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
_metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
def _extract_metadata(self, text):
# fast test
if not text.startswith("---"):
return text
match = self._metadata_pat.match(text)
if not match:
return text
tail = text[len(match.group(0)):]
metadata_str = match.group(1).strip()
for line in metadata_str.split('\n'):
key, value = line.split(':', 1)
self.metadata[key.strip()] = value.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_data = [
('*', re.compile(r"^[ ]{0,3}\*(.*?)$", re.M)),
('-', re.compile(r"^[ ]{0,3}\-(.*?)$", re.M)),
('_', re.compile(r"^[ ]{0,3}\_(.*?)$", re.M)),
]
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
for ch, regex in self._hr_data:
if ch in text:
for m in reversed(list(regex.finditer(text))):
tail = m.group(1).rstrip()
if not tail.strip(ch + ' ') and tail.count(" ") == 0:
start, end = m.span()
text = text[:start] + hr + text[end:]
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
#print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
#pprint(rows)
hlines = ['<table>', '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_tail_of_inline_link_re = re.compile(r'''
# Match tail of: [text](/url/) or [text](/url/ "title")
\( # literal paren
[ \t]*
(?P<url> # \1
<.*?>
|
.*?
)
[ \t]*
( # \2
(['"]) # quote char = \3
(?P<title>.*?)
\3 # matching quote
)? # title is optional
\)
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
match = self._tail_of_inline_link_re.match(text, p)
if match:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
url, title = match.group("url"), match.group("title")
if url and url[0] == '<':
url = url[1:-1] # '<url>' -> 'url'
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M)
def _setext_h_sub(self, match):
n = {"=": 1, "-": 2}[match.group(2)[0]]
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(1),
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(1))
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
_atx_h_re = re.compile(r'''
^(\#{1,6}) # \1 = string of #'s
[ \t]+
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
''', re.X | re.M)
def _atx_h_sub(self, match):
n = len(match.group(1))
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(2),
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(2))
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
text = self._setext_h_re.sub(self._setext_h_sub, text)
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
text = self._atx_h_re.sub(self._atx_h_sub, text)
return text
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
text = text[:start] + self._list_sub(match) + text[end:]
pos = end
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "[email protected]"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
gpl-2.0
|
BrianGladman/pthreads
|
build.vs/build_tests/_msvccompiler.py
|
1
|
22500
|
"""
----------------------------------------------------------------------------
Copyright © 2001-2020 Python Software Foundation; All Rights Reserved
This file is distributed under the terms of this license:
https://docs.python.org/3/license.html
----------------------------------------------------------------------------
distutils._msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for Microsoft Visual Studio 2015.
The module is compatible with VS 2015 and later. You can find legacy support
for older versions in distutils.msvc9compiler and distutils.msvccompiler.
Written by Perry Stoll
hacked by Robin Becker and Thomas Heller to do a better job of
finding DevStudio (through the registry)
ported to VS 2005 and VS 2008 by Christian Heimes
ported to VS 2015 by Steve Dower
----------------------------------------------------------------------------
"""
import os
import shutil
import stat
import subprocess
import winreg
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
from itertools import count
def _find_vc2015():
try:
key = winreg.OpenKeyEx(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\VisualStudio\SxS\VC7",
access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
)
except OSError:
log.debug("Visual C++ is not registered")
return None, None
best_version = 0
best_dir = None
with key:
for i in count():
try:
v, vc_dir, vt = winreg.EnumValue(key, i)
except OSError:
break
if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if version >= 14 and version > best_version:
best_version, best_dir = version, vc_dir
return best_version, best_dir
def _find_vc2017():
"""Returns "15, path" based on the result of invoking vswhere.exe
If no install is found, returns "None, None"
The version is returned to avoid unnecessarily changing the function
result. It may be ignored when the path is not None.
If vswhere.exe is not available, by definition, VS 2017 is not
installed.
"""
root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
if not root:
return None, None
try:
path = subprocess.check_output([
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-latest",
"-prerelease",
"-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-property", "installationPath",
"-products", "*",
], encoding="mbcs", errors="strict").strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return None, None
path = os.path.join(path, "VC", "Auxiliary", "Build")
if os.path.isdir(path):
return 15, path
return None, None
PLAT_SPEC_TO_RUNTIME = {
'x86' : 'x86',
'x86_amd64' : 'x64',
'x86_arm' : 'arm',
'x86_arm64' : 'arm64'
}
def _find_vcvarsall(plat_spec):
_, best_dir = _find_vc2017()
vcruntime = None
if plat_spec in PLAT_SPEC_TO_RUNTIME:
vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
else:
vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
if best_dir:
vcredist = os.path.join(best_dir, "..", "..", "redist", "MSVC", "**",
vcruntime_plat, "Microsoft.VC14*.CRT", "vcruntime140.dll")
try:
import glob
vcruntime = glob.glob(vcredist, recursive=True)[-1]
except (ImportError, OSError, LookupError):
vcruntime = None
if not best_dir:
best_version, best_dir = _find_vc2015()
if best_version:
vcruntime = os.path.join(best_dir, 'redist', vcruntime_plat,
"Microsoft.VC140.CRT", "vcruntime140.dll")
if not best_dir:
log.debug("No suitable Visual C++ version found")
return None, None
vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
if not os.path.isfile(vcvarsall):
log.debug("%s cannot be found", vcvarsall)
return None, None
if not vcruntime or not os.path.isfile(vcruntime):
log.debug("%s cannot be found", vcruntime)
vcruntime = None
return vcvarsall, vcruntime
def _get_vc_env(plat_spec):
if os.getenv("DISTUTILS_USE_SDK"):
return {
key.lower(): value
for key, value in os.environ.items()
}
vcvarsall, vcruntime = _find_vcvarsall(plat_spec)
if not vcvarsall:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
try:
out = subprocess.check_output(
'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
stderr=subprocess.STDOUT,
).decode('utf-16le', errors='replace')
except subprocess.CalledProcessError as exc:
log.error(exc.output)
raise DistutilsPlatformError("Error executing {}"
.format(exc.cmd))
env = {
key.lower(): value
for key, _, value in
(line.partition('=') for line in out.splitlines())
if key and value
}
if vcruntime:
env['py_vcruntime_redist'] = vcruntime
return env
def _find_exe(exe, paths=None):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
if not paths:
paths = os.getenv('path').split(os.pathsep)
for p in paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
return exe
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Always cross-compile from x86 to work with the
# lighter-weight MSVC installs that do not include native 64-bit tools.
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'x86_amd64',
'win-arm32' : 'x86_arm',
'win-arm64' : 'x86_arm64'
}
# A set containing the DLLs that are guaranteed to be available for
# all micro versions of this Python version. Known extension
# dependencies that are not in this set will be copied to the output
# path.
_BUNDLED_DLLS = frozenset(['vcruntime140.dll'])
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
if plat_name not in PLAT_TO_VCVARS:
raise DistutilsPlatformError("--plat-name must be one of {}"
.format(tuple(PLAT_TO_VCVARS)))
# Get the vcvarsall.bat spec for the requested platform.
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = _get_vc_env(plat_spec)
if not vc_env:
raise DistutilsPlatformError("Unable to find a compatible "
"Visual Studio installation.")
self._paths = vc_env.get('path', '')
paths = self._paths.split(os.pathsep)
self.cc = _find_exe("cl.exe", paths)
self.linker = _find_exe("link.exe", paths)
self.lib = _find_exe("lib.exe", paths)
self.rc = _find_exe("rc.exe", paths) # resource compiler
self.mc = _find_exe("mc.exe", paths) # message compiler
self.mt = _find_exe("mt.exe", paths) # message compiler
self._vcruntime_redist = vc_env.get('py_vcruntime_redist', '')
for dir in vc_env.get('include', '').split(os.pathsep):
if dir:
self.add_include_dir(dir.rstrip(os.sep))
for dir in vc_env.get('lib', '').split(os.pathsep):
if dir:
self.add_library_dir(dir.rstrip(os.sep))
self.preprocess_options = None
# If vcruntime_redist is available, link against it dynamically. Otherwise,
# use /MT[d] to build statically, then switch from libucrt[d].lib to ucrt[d].lib
# later to dynamically link to ucrtbase but not vcruntime.
self.compile_options = [
'/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG'
]
self.compile_options.append('/MT' if self._vcruntime_redist else '/MT')
self.compile_options_debug = [
'/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
]
ldflags = [
'/nologo', '/INCREMENTAL:NO', '/LTCG'
]
if not self._vcruntime_redist:
ldflags.extend(('/nodefaultlib:libucrt.lib', '/nodefaultlib:msvcrt.lib', '/nodefaultlib:libcmtd.lib', '/nodefaultlib:msvcrtd.lib', 'ucrt.lib'))
ldflags_debug = [
'/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
]
self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
self.ldflags_static = [*ldflags]
self.ldflags_static_debug = [*ldflags_debug]
self._ldflags = {
(CCompiler.EXECUTABLE, None): self.ldflags_exe,
(CCompiler.EXECUTABLE, False): self.ldflags_exe,
(CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
(CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
(CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
(CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
(CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
(CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
(CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
}
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
ext_map = {
**{ext: self.obj_extension for ext in self.src_extensions},
**{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
}
output_dir = output_dir or ''
def make_out_path(p):
base, ext = os.path.splitext(p)
if strip_dir:
base = os.path.basename(base)
else:
_, base = os.path.splitdrive(base)
if base.startswith((os.path.sep, os.path.altsep)):
base = base[1:]
try:
# XXX: This may produce absurdly long paths. We should check
# the length of the result and trim base until we fit within
# 260 characters.
return os.path.join(output_dir, base + ext_map[ext])
except LookupError:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError("Don't know how to compile {}".format(p))
return list(map(make_out_path, source_filenames))
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None,
defines=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append('/c')
if defines is not None:
compile_opts.extend(defines)
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
add_cpp_opts = False
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
add_cpp_opts = True
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
base, _ = os.path.splitext(os.path.basename (src))
rc_file = os.path.join(rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc, "/fo" + obj, rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile {} to {}"
.format(src, obj))
args = [self.cc] + compile_opts + pp_opts
if add_cpp_opts:
args.append('/EHsc')
args.append(input_opt)
args.append("/Fo" + obj)
args.extend(extra_postargs)
try:
self.spawn(args)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
objects, output_dir = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
libraries, library_dirs, runtime_library_dirs = fixed_args
if runtime_library_dirs:
self.warn("I don't know what to do with 'runtime_library_dirs': "
+ str(runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ldflags = self._ldflags[target_desc, debug]
export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
output_dir = os.path.dirname(os.path.abspath(output_filename))
self.mkpath(output_dir)
try:
log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
self.spawn([self.linker] + ld_args)
self._copy_vcruntime(output_dir)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _copy_vcruntime(self, output_dir):
vcruntime = self._vcruntime_redist
if not vcruntime or not os.path.isfile(vcruntime):
return
if os.path.basename(vcruntime).lower() in _BUNDLED_DLLS:
return
log.debug('Copying "%s"', vcruntime)
vcruntime = shutil.copy(vcruntime, output_dir)
os.chmod(vcruntime, stat.S_IWRITE)
def spawn(self, cmd):
old_path = os.getenv('path')
try:
os.environ['path'] = self._paths
return super().spawn(cmd)
finally:
os.environ['path'] = old_path
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.isfile(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
|
apache-2.0
|
koobonil/Boss2D
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_tensor_test.py
|
54
|
9345
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
sge = stochastic_gradient_estimators
st = stochastic_tensor_impl
class StochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
sigma2 = constant_op.constant([0.1, 0.2, 0.3])
prior_default = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_default.value_type, st.SampleValue))
prior_0 = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma),
dist_value_type=st.SampleValue())
self.assertTrue(isinstance(prior_0.value_type, st.SampleValue))
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.SampleValue))
likelihood = st.StochasticTensor(
normal.Normal(loc=prior, scale=sigma2))
self.assertTrue(isinstance(likelihood.value_type, st.SampleValue))
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_default, prior_0, prior, likelihood])
# Also works: tf.convert_to_tensor(prior)
prior_default = array_ops.identity(prior_default)
prior_0 = array_ops.identity(prior_0)
prior = array_ops.identity(prior)
likelihood = array_ops.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, prior_default_val, _ = sess.run(
[prior_0, prior, prior_default, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
self.assertEqual(prior_default_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
self.assertGreater(np.abs(prior_default_val - prior_val).sum(), 1e-6)
def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.MeanValue))
prior_mean = prior.mean()
prior_value = prior.value()
prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)
def testSampleValueScalar(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = constant_op.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with st.value_type(st.SampleValue()):
prior_single = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))
with st.value_type(st.SampleValue(1)):
prior_single = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_single.value_type, st.SampleValue))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (1, 2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (1, 2, 3))
with st.value_type(st.SampleValue(2)):
prior_double = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (2, 2, 3))
def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
entropy = prior.entropy()
deep_entropy = prior.distribution.entropy()
expected_deep_entropy = normal.Normal(
loc=mu, scale=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])
def testSurrogateLoss(self):
with self.test_session():
mu = [[3.0, -4.0, 5.0], [6.0, -7.0, 8.0]]
sigma = constant_op.constant(1.0)
# With default
with st.value_type(st.MeanValue(stop_gradient=True)):
dt = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose(
dt.distribution.log_prob(mu).eval() * 2.0, loss.eval())
# With passed-in loss_fn.
dt = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma),
dist_value_type=st.MeanValue(stop_gradient=True),
loss_fn=sge.get_score_function_with_constant_baseline(
baseline=constant_op.constant(8.0)))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose((dt.distribution.log_prob(mu) * (2.0 - 8.0)).eval(),
loss.eval())
class ValueTypeTest(test.TestCase):
def testValueType(self):
type_mean = st.MeanValue()
type_reshape = st.SampleValue()
type_full = st.SampleValue()
with st.value_type(type_mean):
self.assertEqual(st.get_current_value_type(), type_mean)
with st.value_type(type_reshape):
self.assertEqual(st.get_current_value_type(), type_reshape)
with st.value_type(type_full):
self.assertEqual(st.get_current_value_type(), type_full)
self.assertEqual(st.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, "No value type currently set"):
st.get_current_value_type()
class ObservedStochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
obs = array_ops.zeros((2, 3))
z = st.ObservedStochasticTensor(
normal.Normal(loc=mu, scale=sigma), value=obs)
[obs_val, z_val] = sess.run([obs, z.value()])
self.assertAllEqual(obs_val, z_val)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z])
def testConstructionWithUnknownShapes(self):
mu = array_ops.placeholder(dtypes.float32)
sigma = array_ops.placeholder(dtypes.float32)
obs = array_ops.placeholder(dtypes.float32)
z = st.ObservedStochasticTensor(
normal.Normal(loc=mu, scale=sigma), value=obs)
mu2 = array_ops.placeholder(dtypes.float32, shape=[None])
sigma2 = array_ops.placeholder(dtypes.float32, shape=[None])
obs2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
z2 = st.ObservedStochasticTensor(
normal.Normal(loc=mu2, scale=sigma2), value=obs2)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z, z2])
def testConstructionErrors(self):
mu = [0., 0.]
sigma = [1., 1.]
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3,)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3, 1)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((1, 2), dtype=dtypes.int32))
if __name__ == "__main__":
test.main()
|
mit
|
Tokyo-Buffalo/tokyosouth
|
env/lib/python3.6/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py
|
713
|
9596
|
from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
|
mit
|
duhzecca/cinder
|
cinder/volume/drivers/vmware/volumeops.py
|
7
|
68492
|
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Implements operations on volumes residing on VMware datastores.
"""
from oslo_log import log as logging
from oslo_utils import units
from oslo_vmware import exceptions
from oslo_vmware import pbm
from oslo_vmware import vim_util
import six
from six.moves import urllib
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions
LOG = logging.getLogger(__name__)
LINKED_CLONE_TYPE = 'linked'
FULL_CLONE_TYPE = 'full'
def split_datastore_path(datastore_path):
"""Split the datastore path to components.
return the datastore name, relative folder path and the file name
E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns
(datastore1, my_volume/, my_volume.vmdk)
:param datastore_path: Datastore path of a file
:return: Parsed datastore name, relative folder path and file name
"""
splits = datastore_path.split('[', 1)[1].split(']', 1)
datastore_name = None
folder_path = None
file_name = None
if len(splits) == 1:
datastore_name = splits[0]
else:
datastore_name, path = splits
# Path will be of form my_volume/my_volume.vmdk
# we need into my_volumes/ and my_volume.vmdk
splits = path.split('/')
file_name = splits[len(splits) - 1]
folder_path = path[:-len(file_name)]
return (datastore_name.strip(), folder_path.strip(), file_name.strip())
class VirtualDiskPath(object):
"""Class representing paths of files comprising a virtual disk."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name)
self._descriptor_ds_file_path = self.get_datastore_file_path(
ds_name, self._descriptor_file_path)
def get_datastore_file_path(self, ds_name, file_path):
"""Get datastore path corresponding to the given file path.
:param ds_name: name of the datastore containing the file represented
by the given file path
:param file_path: absolute path of the file
:return: datastore file path
"""
return "[%s] %s" % (ds_name, file_path)
def get_descriptor_file_path(self):
"""Get absolute file path of the virtual disk descriptor."""
return self._descriptor_file_path
def get_descriptor_ds_file_path(self):
"""Get datastore file path of the virtual disk descriptor."""
return self._descriptor_ds_file_path
class FlatExtentVirtualDiskPath(VirtualDiskPath):
"""Paths of files in a non-monolithic disk with a single flat extent."""
def __init__(self, ds_name, folder_path, disk_name):
"""Creates path object for the given disk.
:param ds_name: name of the datastore where disk is stored
:param folder_path: absolute path of the folder containing the disk
:param disk_name: name of the virtual disk
"""
super(FlatExtentVirtualDiskPath, self).__init__(
ds_name, folder_path, disk_name)
self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path,
disk_name)
self._flat_extent_ds_file_path = self.get_datastore_file_path(
ds_name, self._flat_extent_file_path)
def get_flat_extent_file_path(self):
"""Get absolute file path of the flat extent."""
return self._flat_extent_file_path
def get_flat_extent_ds_file_path(self):
"""Get datastore file path of the flat extent."""
return self._flat_extent_ds_file_path
class MonolithicSparseVirtualDiskPath(VirtualDiskPath):
"""Paths of file comprising a monolithic sparse disk."""
pass
class VirtualDiskType(object):
"""Supported virtual disk types."""
EAGER_ZEROED_THICK = "eagerZeroedThick"
PREALLOCATED = "preallocated"
THIN = "thin"
# thick in extra_spec means lazy-zeroed thick disk
EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK,
'thick': PREALLOCATED,
'thin': THIN
}
@staticmethod
def is_valid(extra_spec_disk_type):
"""Check if the given disk type in extra_spec is valid.
:param extra_spec_disk_type: disk type in extra_spec
:return: True if valid
"""
return (extra_spec_disk_type in
VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT)
@staticmethod
def validate(extra_spec_disk_type):
"""Validate the given disk type in extra_spec.
This method throws an instance of InvalidDiskTypeException if the given
disk type is invalid.
:param extra_spec_disk_type: disk type in extra_spec
:raises: InvalidDiskTypeException
"""
if not VirtualDiskType.is_valid(extra_spec_disk_type):
raise vmdk_exceptions.InvalidDiskTypeException(
disk_type=extra_spec_disk_type)
@staticmethod
def get_virtual_disk_type(extra_spec_disk_type):
"""Return disk type corresponding to the extra_spec disk type.
:param extra_spec_disk_type: disk type in extra_spec
:return: virtual disk type
:raises: InvalidDiskTypeException
"""
VirtualDiskType.validate(extra_spec_disk_type)
return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[
extra_spec_disk_type])
class VirtualDiskAdapterType(object):
"""Supported virtual disk adapter types."""
LSI_LOGIC = "lsiLogic"
BUS_LOGIC = "busLogic"
LSI_LOGIC_SAS = "lsiLogicsas"
IDE = "ide"
@staticmethod
def is_valid(adapter_type):
"""Check if the given adapter type is valid.
:param adapter_type: adapter type to check
:return: True if valid
"""
return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE]
@staticmethod
def validate(extra_spec_adapter_type):
"""Validate the given adapter type in extra_spec.
This method throws an instance of InvalidAdapterTypeException if the
given adapter type is invalid.
:param extra_spec_adapter_type: adapter type in extra_spec
:raises: InvalidAdapterTypeException
"""
if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type):
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=extra_spec_adapter_type)
@staticmethod
def get_adapter_type(extra_spec_adapter_type):
"""Get the adapter type to be used in VirtualDiskSpec.
:param extra_spec_adapter_type: adapter type in the extra_spec
:return: adapter type to be used in VirtualDiskSpec
"""
VirtualDiskAdapterType.validate(extra_spec_adapter_type)
# We set the adapter type as lsiLogic for lsiLogicsas since it is not
# supported by VirtualDiskManager APIs. This won't be a problem because
# we attach the virtual disk to the correct controller type and the
# disk adapter type is always resolved using its controller key.
if extra_spec_adapter_type == VirtualDiskAdapterType.LSI_LOGIC_SAS:
return VirtualDiskAdapterType.LSI_LOGIC
return extra_spec_adapter_type
class ControllerType(object):
"""Encapsulate various controller types."""
LSI_LOGIC = 'VirtualLsiLogicController'
BUS_LOGIC = 'VirtualBusLogicController'
LSI_LOGIC_SAS = 'VirtualLsiLogicSASController'
IDE = 'VirtualIDEController'
CONTROLLER_TYPE_DICT = {
VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC,
VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC,
VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS,
VirtualDiskAdapterType.IDE: IDE}
@staticmethod
def get_controller_type(adapter_type):
"""Get the disk controller type based on the given adapter type.
:param adapter_type: disk adapter type
:return: controller type corresponding to the given adapter type
:raises: InvalidAdapterTypeException
"""
if adapter_type in ControllerType.CONTROLLER_TYPE_DICT:
return ControllerType.CONTROLLER_TYPE_DICT[adapter_type]
raise vmdk_exceptions.InvalidAdapterTypeException(
invalid_type=adapter_type)
@staticmethod
def is_scsi_controller(controller_type):
"""Check if the given controller is a SCSI controller.
:param controller_type: controller type
:return: True if the controller is a SCSI controller
"""
return controller_type in [ControllerType.LSI_LOGIC,
ControllerType.BUS_LOGIC,
ControllerType.LSI_LOGIC_SAS]
class VMwareVolumeOps(object):
"""Manages volume operations."""
def __init__(self, session, max_objects):
self._session = session
self._max_objects = max_objects
self._folder_cache = {}
def get_backing(self, name):
"""Get the backing based on name.
:param name: Name of the backing
:return: Managed object reference to the backing
"""
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'VirtualMachine',
self._max_objects)
while retrieve_result:
vms = retrieve_result.objects
for vm in vms:
if vm.propSet[0].val == name:
# We got the result, so cancel further retrieval.
self.cancel_retrieval(retrieve_result)
return vm.obj
# Result not obtained, continue retrieving results.
retrieve_result = self.continue_retrieval(retrieve_result)
LOG.debug("Did not find any backing with name: %s", name)
def delete_backing(self, backing):
"""Delete the backing.
:param backing: Managed object reference to the backing
"""
LOG.debug("Deleting the VM backing: %s.", backing)
task = self._session.invoke_api(self._session.vim, 'Destroy_Task',
backing)
LOG.debug("Initiated deletion of VM backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted the VM backing: %s."), backing)
# TODO(kartikaditya) Keep the methods not specific to volume in
# a different file
def get_host(self, instance):
"""Get host under which instance is present.
:param instance: Managed object reference of the instance VM
:return: Host managing the instance VM
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, instance,
'runtime.host')
def is_host_usable(self, host):
"""Check if the given ESX host is usable.
A host is usable if it is connected to vCenter server and not in
maintenance mode.
:param host: Managed object reference to the ESX host
:return: True if host is usable, False otherwise
"""
runtime_info = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
host,
'runtime')
return (runtime_info.connectionState == 'connected' and
not runtime_info.inMaintenanceMode)
def get_hosts(self):
"""Get all host from the inventory.
:return: All the hosts from the inventory
"""
return self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'HostSystem', self._max_objects)
def continue_retrieval(self, retrieve_result):
"""Continue retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
return self._session.invoke_api(vim_util, 'continue_retrieval',
self._session.vim, retrieve_result)
def cancel_retrieval(self, retrieve_result):
"""Cancel retrieval of results if necessary.
:param retrieve_result: Result from RetrievePropertiesEx
"""
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._session.vim, retrieve_result)
def _is_usable(self, mount_info):
"""Check if a datastore is usable as per the given mount info.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param mount_info: Host mount information
:return: True if datastore is usable
"""
writable = mount_info.accessMode == 'readWrite'
# If mounted attribute is not set, then default is True
mounted = getattr(mount_info, 'mounted', True)
# If accessible attribute is not set, then default is False
accessible = getattr(mount_info, 'accessible', False)
return writable and mounted and accessible
def get_connected_hosts(self, datastore):
"""Get all the hosts to which the datastore is connected and usable.
The datastore is considered to be usable for a host only if it is
writable, mounted and accessible.
:param datastore: Reference to the datastore entity
:return: List of managed object references of all connected
hosts
"""
summary = self.get_summary(datastore)
if not summary.accessible:
return []
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
if not hasattr(host_mounts, 'DatastoreHostMount'):
return []
connected_hosts = []
for host_mount in host_mounts.DatastoreHostMount:
if self._is_usable(host_mount.mountInfo):
connected_hosts.append(host_mount.key.value)
return connected_hosts
def is_datastore_accessible(self, datastore, host):
"""Check if the datastore is accessible to the given host.
:param datastore: datastore reference
:return: True if the datastore is accessible
"""
hosts = self.get_connected_hosts(datastore)
return host.value in hosts
def _in_maintenance(self, summary):
"""Check if a datastore is entering maintenance or in maintenance.
:param summary: Summary information about the datastore
:return: True if the datastore is entering maintenance or in
maintenance
"""
if hasattr(summary, 'maintenanceMode'):
return summary.maintenanceMode in ['enteringMaintenance',
'inMaintenance']
return False
def _is_valid(self, datastore, host):
"""Check if the datastore is valid for the given host.
A datastore is considered valid for a host only if the datastore is
writable, mounted and accessible. Also, the datastore should not be
in maintenance mode.
:param datastore: Reference to the datastore entity
:param host: Reference to the host entity
:return: True if datastore can be used for volume creation
"""
summary = self.get_summary(datastore)
in_maintenance = self._in_maintenance(summary)
if not summary.accessible or in_maintenance:
return False
host_mounts = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'host')
for host_mount in host_mounts.DatastoreHostMount:
if host_mount.key.value == host.value:
return self._is_usable(host_mount.mountInfo)
return False
def get_dss_rp(self, host):
"""Get accessible datastores and resource pool of the host.
:param host: Managed object reference of the host
:return: Datastores accessible to the host and resource pool to which
the host belongs to
"""
props = self._session.invoke_api(vim_util, 'get_object_properties',
self._session.vim, host,
['datastore', 'parent'])
# Get datastores and compute resource or cluster compute resource
datastores = []
compute_resource = None
for elem in props:
for prop in elem.propSet:
if prop.name == 'datastore' and prop.val:
# Consider only if datastores are present under host
datastores = prop.val.ManagedObjectReference
elif prop.name == 'parent':
compute_resource = prop.val
LOG.debug("Datastores attached to host %(host)s are: %(ds)s.",
{'host': host, 'ds': datastores})
# Filter datastores based on if it is accessible, mounted and writable
valid_dss = []
for datastore in datastores:
if self._is_valid(datastore, host):
valid_dss.append(datastore)
# Get resource pool from compute resource or cluster compute resource
resource_pool = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
compute_resource,
'resourcePool')
if not valid_dss:
msg = _("There are no valid datastores attached to %s.") % host
LOG.error(msg)
raise exceptions.VimException(msg)
else:
LOG.debug("Valid datastores are: %s", valid_dss)
return (valid_dss, resource_pool)
def _get_parent(self, child, parent_type):
"""Get immediate parent of given type via 'parent' property.
:param child: Child entity reference
:param parent_type: Entity type of the parent
:return: Immediate parent of specific type up the hierarchy via
'parent' property
"""
if not child:
return None
if child._type == parent_type:
return child
parent = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, child, 'parent')
return self._get_parent(parent, parent_type)
def get_dc(self, child):
"""Get parent datacenter up the hierarchy via 'parent' property.
:param child: Reference of the child entity
:return: Parent Datacenter of the param child entity
"""
return self._get_parent(child, 'Datacenter')
def get_vmfolder(self, datacenter):
"""Get the vmFolder.
:param datacenter: Reference to the datacenter entity
:return: vmFolder property of the datacenter
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datacenter,
'vmFolder')
def _get_child_folder(self, parent_folder, child_folder_name):
# Get list of child entities for the parent folder
prop_val = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, parent_folder,
'childEntity')
if prop_val and hasattr(prop_val, 'ManagedObjectReference'):
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = self.get_entity_name(child_entity)
if (child_entity_name
and (urllib.parse.unquote(child_entity_name)
== child_folder_name)):
LOG.debug("Child folder: %s exists.", child_folder_name)
return child_entity
def create_folder(self, parent_folder, child_folder_name):
"""Creates child folder with given name under the given parent folder.
The method first checks if a child folder already exists, if it does,
then it returns a moref for the folder, else it creates one and then
return the moref.
:param parent_folder: Reference to the folder entity
:param child_folder_name: Name of the child folder
:return: Reference to the child folder with input name if it already
exists, else create one and return the reference
"""
LOG.debug("Creating folder: %(child_folder_name)s under parent "
"folder: %(parent_folder)s.",
{'child_folder_name': child_folder_name,
'parent_folder': parent_folder})
child_folder = self._get_child_folder(parent_folder, child_folder_name)
if not child_folder:
# Need to create the child folder.
try:
child_folder = self._session.invoke_api(self._session.vim,
'CreateFolder',
parent_folder,
name=child_folder_name)
LOG.debug("Created child folder: %s.", child_folder)
except exceptions.DuplicateName:
# Another thread is trying to create the same folder, ignore
# the exception.
child_folder = self._get_child_folder(parent_folder,
child_folder_name)
return child_folder
def create_vm_inventory_folder(self, datacenter, path_comp):
"""Create and return a VM inventory folder.
This method caches references to inventory folders returned.
:param datacenter: Reference to datacenter
:param path_comp: Path components as a list
"""
LOG.debug("Creating inventory folder: %(path_comp)s under VM folder "
"of datacenter: %(datacenter)s.",
{'path_comp': path_comp,
'datacenter': datacenter})
path = "/" + datacenter.value
parent = self._folder_cache.get(path)
if not parent:
parent = self.get_vmfolder(datacenter)
self._folder_cache[path] = parent
folder = None
for folder_name in path_comp:
path = "/".join([path, folder_name])
folder = self._folder_cache.get(path)
if not folder:
folder = self.create_folder(parent, folder_name)
self._folder_cache[path] = folder
parent = folder
LOG.debug("Inventory folder for path: %(path)s is %(folder)s.",
{'path': path,
'folder': folder})
return folder
def extend_virtual_disk(self, requested_size_in_gb, name, dc_ref,
eager_zero=False):
"""Extend the virtual disk to the requested size.
:param requested_size_in_gb: Size of the volume in GB
:param name: Name of the backing
:param dc_ref: Reference to datacenter
:param eager_zero: Boolean determining if the free space
is zeroed out
"""
LOG.debug("Extending the volume %(name)s to %(size)s GB.",
{'name': name, 'size': requested_size_in_gb})
diskMgr = self._session.vim.service_content.virtualDiskManager
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
name=name,
datacenter=dc_ref,
newCapacityKb=size_in_kb,
eagerZero=eager_zero)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully extended the volume %(name)s to "
"%(size)s GB."),
{'name': name, 'size': requested_size_in_gb})
def _create_controller_config_spec(self, adapter_type):
"""Returns config spec for adding a disk controller."""
cf = self._session.vim.client.factory
controller_type = ControllerType.get_controller_type(adapter_type)
controller_device = cf.create('ns0:%s' % controller_type)
controller_device.key = -100
controller_device.busNumber = 0
if ControllerType.is_scsi_controller(controller_type):
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
return controller_spec
def _create_disk_backing(self, disk_type, vmdk_ds_file_path):
"""Creates file backing for virtual disk."""
cf = self._session.vim.client.factory
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == VirtualDiskType.EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == VirtualDiskType.THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = vmdk_ds_file_path or ''
disk_device_bkng.diskMode = 'persistent'
return disk_device_bkng
def _create_virtual_disk_config_spec(self, size_kb, disk_type,
controller_key, vmdk_ds_file_path):
"""Returns config spec for adding a virtual disk."""
cf = self._session.vim.client.factory
disk_device = cf.create('ns0:VirtualDisk')
# disk size should be at least 1024KB
disk_device.capacityInKB = max(units.Ki, int(size_kb))
if controller_key < 0:
disk_device.key = controller_key - 1
else:
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = controller_key
disk_device.backing = self._create_disk_backing(disk_type,
vmdk_ds_file_path)
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
if vmdk_ds_file_path is None:
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
return disk_spec
def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type,
vmdk_ds_file_path=None):
"""Create controller and disk config specs for adding a new disk.
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: Optional datastore file path of an existing
virtual disk. If specified, file backing is
not created for the virtual disk.
:return: list containing controller and disk config specs
"""
controller_spec = None
if adapter_type == 'ide':
# For IDE disks, use one of the default IDE controllers (with keys
# 200 and 201) created as part of backing VM creation.
controller_key = 200
else:
controller_spec = self._create_controller_config_spec(adapter_type)
controller_key = controller_spec.device.key
disk_spec = self._create_virtual_disk_config_spec(size_kb,
disk_type,
controller_key,
vmdk_ds_file_path)
specs = [disk_spec]
if controller_spec is not None:
specs.append(controller_spec)
return specs
def _get_extra_config_option_values(self, extra_config):
cf = self._session.vim.client.factory
option_values = []
for key, value in six.iteritems(extra_config):
opt = cf.create('ns0:OptionValue')
opt.key = key
opt.value = value
option_values.append(opt)
return option_values
def _get_create_spec_disk_less(self, name, ds_name, profileId=None,
extra_config=None):
"""Return spec for creating disk-less backing.
:param name: Name of the backing
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID for the backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Spec for creation
"""
cf = self._session.vim.client.factory
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.files = vm_file_info
# Set the hardware version to a compatible version supported by
# vSphere 5.0. This will ensure that the backing VM can be migrated
# without any incompatibility issues in a mixed cluster of ESX hosts
# with versions 5.0 or above.
create_spec.version = "vmx-08"
if profileId:
vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vmProfile.profileId = profileId
create_spec.vmProfile = [vmProfile]
if extra_config:
create_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
return create_spec
def get_create_spec(self, name, size_kb, disk_type, ds_name,
profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Return spec for creating backing with a single disk.
:param name: name of the backing
:param size_kb: disk size in KB
:param disk_type: disk provisioning type
:param ds_name: datastore name where the disk is to be provisioned
:param profileId: storage profile ID for the backing
:param adapter_type: disk adapter type
:param extra_config: key-value pairs to be written to backing's
extra-config
:return: spec for creation
"""
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
create_spec.deviceChange = self._create_specs_for_disk_add(
size_kb, disk_type, adapter_type)
return create_spec
def _create_backing_int(self, folder, resource_pool, host, create_spec):
"""Helper for create backing methods."""
LOG.debug("Creating volume backing with spec: %s.", create_spec)
task = self._session.invoke_api(self._session.vim, 'CreateVM_Task',
folder, config=create_spec,
pool=resource_pool, host=host)
task_info = self._session.wait_for_task(task)
backing = task_info.result
LOG.info(_LI("Successfully created volume backing: %s."), backing)
return backing
def create_backing(self, name, size_kb, disk_type, folder, resource_pool,
host, ds_name, profileId=None, adapter_type='lsiLogic',
extra_config=None):
"""Create backing for the volume.
Creates a VM with one VMDK based on the given inputs.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param folder: Folder, where to create the backing under
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Datastore name where the disk is to be provisioned
:param profileId: Storage profile ID to be associated with backing
:param adapter_type: Disk adapter type
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating volume backing with name: %(name)s "
"disk_type: %(disk_type)s size_kb: %(size_kb)s "
"adapter_type: %(adapter_type)s profileId: %(profile)s at "
"folder: %(folder)s resource_pool: %(resource_pool)s "
"host: %(host)s datastore_name: %(ds_name)s.",
{'name': name, 'disk_type': disk_type, 'size_kb': size_kb,
'folder': folder, 'resource_pool': resource_pool,
'ds_name': ds_name, 'profile': profileId, 'host': host,
'adapter_type': adapter_type})
create_spec = self.get_create_spec(
name, size_kb, disk_type, ds_name, profileId=profileId,
adapter_type=adapter_type, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def create_backing_disk_less(self, name, folder, resource_pool,
host, ds_name, profileId=None,
extra_config=None):
"""Create disk-less volume backing.
This type of backing is useful for creating volume from image. The
downloaded image from the image service can be copied to a virtual
disk of desired provisioning type and added to the backing VM.
:param name: Name of the backing
:param folder: Folder where the backing is created
:param resource_pool: Resource pool reference
:param host: Host reference
:param ds_name: Name of the datastore used for VM storage
:param profileId: Storage profile ID to be associated with backing
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Reference to the created backing entity
"""
LOG.debug("Creating disk-less volume backing with name: %(name)s "
"profileId: %(profile)s at folder: %(folder)s "
"resource pool: %(resource_pool)s host: %(host)s "
"datastore_name: %(ds_name)s.",
{'name': name, 'profile': profileId, 'folder': folder,
'resource_pool': resource_pool, 'host': host,
'ds_name': ds_name})
create_spec = self._get_create_spec_disk_less(
name, ds_name, profileId=profileId, extra_config=extra_config)
return self._create_backing_int(folder, resource_pool, host,
create_spec)
def get_datastore(self, backing):
"""Get datastore where the backing resides.
:param backing: Reference to the backing
:return: Datastore reference to which the backing belongs
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'datastore').ManagedObjectReference[0]
def get_summary(self, datastore):
"""Get datastore summary.
:param datastore: Reference to the datastore
:return: 'summary' property of the datastore
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, datastore,
'summary')
def _create_relocate_spec_disk_locator(self, datastore, disk_type,
disk_device):
"""Creates spec for disk type conversion during relocate."""
cf = self._session.vim.client.factory
disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator")
disk_locator.datastore = datastore
disk_locator.diskId = disk_device.key
disk_locator.diskBackingInfo = self._create_disk_backing(disk_type,
None)
return disk_locator
def _get_relocate_spec(self, datastore, resource_pool, host,
disk_move_type, disk_type=None, disk_device=None):
"""Return spec for relocating volume backing.
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_move_type: Disk move type option
:param disk_type: Destination disk type
:param disk_device: Virtual device corresponding to the disk
:return: Spec for relocation
"""
cf = self._session.vim.client.factory
relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec')
relocate_spec.datastore = datastore
relocate_spec.pool = resource_pool
relocate_spec.host = host
relocate_spec.diskMoveType = disk_move_type
if disk_type is not None and disk_device is not None:
disk_locator = self._create_relocate_spec_disk_locator(datastore,
disk_type,
disk_device)
relocate_spec.disk = [disk_locator]
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
type.
:param backing: Reference to the backing
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s.", backing)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully relocated volume backing: %(backing)s "
"to datastore: %(ds)s and resource pool: %(rp)s."),
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
def move_backing_to_folder(self, backing, folder):
"""Move the volume backing to the folder.
:param backing: Reference to the backing
:param folder: Reference to the folder
"""
LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.",
{'backing': backing, 'fol': folder})
task = self._session.invoke_api(self._session.vim,
'MoveIntoFolder_Task', folder,
list=[backing])
LOG.debug("Initiated move of volume backing: %(backing)s into the "
"folder: %(fol)s.", {'backing': backing, 'fol': folder})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully moved volume "
"backing: %(backing)s into the "
"folder: %(fol)s."), {'backing': backing, 'fol': folder})
def create_snapshot(self, backing, name, description, quiesce=False):
"""Create snapshot of the backing with given name and description.
:param backing: Reference to the backing entity
:param name: Snapshot name
:param description: Snapshot description
:param quiesce: Whether to quiesce the backing when taking snapshot
:return: Created snapshot entity reference
"""
LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.",
{'backing': backing, 'name': name})
task = self._session.invoke_api(self._session.vim,
'CreateSnapshot_Task',
backing, name=name,
description=description,
memory=False, quiesce=quiesce)
LOG.debug("Initiated snapshot of volume backing: %(backing)s "
"named: %(name)s.", {'backing': backing, 'name': name})
task_info = self._session.wait_for_task(task)
snapshot = task_info.result
LOG.info(_LI("Successfully created snapshot: %(snap)s for volume "
"backing: %(backing)s."),
{'snap': snapshot, 'backing': backing})
return snapshot
@staticmethod
def _get_snapshot_from_tree(name, root):
"""Get snapshot by name from the snapshot tree root.
:param name: Snapshot name
:param root: Current root node in the snapshot tree
:return: None in the snapshot tree with given snapshot name
"""
if not root:
return None
if root.name == name:
return root.snapshot
if (not hasattr(root, 'childSnapshotList') or
not root.childSnapshotList):
# When root does not have children, the childSnapshotList attr
# is missing sometime. Adding an additional check.
return None
for node in root.childSnapshotList:
snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node)
if snapshot:
return snapshot
def get_snapshot(self, backing, name):
"""Get snapshot of the backing with given name.
:param backing: Reference to the backing entity
:param name: Snapshot name
:return: Snapshot entity of the backing with given name
"""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if not snapshot or not snapshot.rootSnapshotList:
return None
for root in snapshot.rootSnapshotList:
return VMwareVolumeOps._get_snapshot_from_tree(name, root)
def snapshot_exists(self, backing):
"""Check if the given backing contains snapshots."""
snapshot = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot')
if snapshot is None or snapshot.rootSnapshotList is None:
return False
return len(snapshot.rootSnapshotList) != 0
def delete_snapshot(self, backing, name):
"""Delete a given snapshot from volume backing.
:param backing: Reference to the backing entity
:param name: Snapshot name
"""
LOG.debug("Deleting the snapshot: %(name)s from backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
snapshot = self.get_snapshot(backing, name)
if not snapshot:
LOG.info(_LI("Did not find the snapshot: %(name)s for backing: "
"%(backing)s. Need not delete anything."),
{'name': name, 'backing': backing})
return
task = self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task',
snapshot, removeChildren=False)
LOG.debug("Initiated snapshot: %(name)s deletion for backing: "
"%(backing)s.",
{'name': name, 'backing': backing})
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted snapshot: %(name)s of backing: "
"%(backing)s."), {'backing': backing, 'name': name})
def _get_folder(self, backing):
"""Get parent folder of the backing.
:param backing: Reference to the backing entity
:return: Reference to parent folder of the backing entity
"""
return self._get_parent(backing, 'Folder')
def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing,
disk_type, host=None, resource_pool=None,
extra_config=None):
"""Get the clone spec.
:param datastore: Reference to datastore
:param disk_move_type: Disk move type
:param snapshot: Reference to snapshot
:param backing: Source backing VM
:param disk_type: Disk type of clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
:return: Clone spec
"""
if disk_type is not None:
disk_device = self._get_disk_device(backing)
else:
disk_device = None
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type, disk_type,
disk_device)
cf = self._session.vim.client.factory
clone_spec = cf.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = relocate_spec
clone_spec.powerOn = False
clone_spec.template = False
clone_spec.snapshot = snapshot
if extra_config:
config_spec = cf.create('ns0:VirtualMachineConfigSpec')
config_spec.extraConfig = self._get_extra_config_option_values(
extra_config)
clone_spec.config = config_spec
LOG.debug("Spec for cloning the backing: %s.", clone_spec)
return clone_spec
def clone_backing(self, name, backing, snapshot, clone_type, datastore,
disk_type=None, host=None, resource_pool=None,
extra_config=None):
"""Clone backing.
If the clone_type is 'full', then a full clone of the source volume
backing will be created. Else, if it is 'linked', then a linked clone
of the source volume backing will be created.
:param name: Name for the clone
:param backing: Reference to the backing entity
:param snapshot: Snapshot point from which the clone should be done
:param clone_type: Whether a full clone or linked clone is to be made
:param datastore: Reference to the datastore entity
:param disk_type: Disk type of the clone
:param host: Target host
:param resource_pool: Target resource pool
:param extra_config: Key-value pairs to be written to backing's
extra-config
"""
LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, "
"clone type: %(type)s from snapshot: %(snap)s on "
"resource pool: %(resource_pool)s, host: %(host)s, "
"datastore: %(ds)s with disk type: %(disk_type)s.",
{'back': backing, 'name': name, 'type': clone_type,
'snap': snapshot, 'ds': datastore, 'disk_type': disk_type,
'host': host, 'resource_pool': resource_pool})
folder = self._get_folder(backing)
if clone_type == LINKED_CLONE_TYPE:
disk_move_type = 'createNewChildDiskBacking'
else:
disk_move_type = 'moveAllDiskBackingsAndDisallowSharing'
clone_spec = self._get_clone_spec(
datastore, disk_move_type, snapshot, backing, disk_type, host=host,
resource_pool=resource_pool, extra_config=extra_config)
task = self._session.invoke_api(self._session.vim, 'CloneVM_Task',
backing, folder=folder, name=name,
spec=clone_spec)
LOG.debug("Initiated clone of backing: %s.", name)
task_info = self._session.wait_for_task(task)
new_backing = task_info.result
LOG.info(_LI("Successfully created clone: %s."), new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
:param backing: reference to the backing VM
:param size_in_kb: disk size in KB
:param disk_type: virtual disk type
:param adapter_type: disk adapter type
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
specs = self._create_specs_for_disk_add(size_in_kb,
disk_type,
adapter_type,
vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def rename_backing(self, backing, new_name):
"""Rename backing VM.
:param backing: VM to be renamed
:param new_name: new VM name
"""
LOG.info(_LI("Renaming backing VM: %(backing)s to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
rename_task = self._session.invoke_api(self._session.vim,
"Rename_Task",
backing,
newName=new_name)
LOG.debug("Task: %s created for renaming VM.", rename_task)
self._session.wait_for_task(rename_task)
LOG.info(_LI("Backing VM: %(backing)s renamed to %(new_name)s."),
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
vm_profile.dynamicType = 'profile'
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec.vmProfile = [vm_profile]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def update_backing_disk_uuid(self, backing, disk_uuid):
"""Update backing VM's disk UUID.
:param backing: Reference to backing VM
:param disk_uuid: New disk UUID
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID "
"to: %(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
disk_device = self._get_disk_device(backing)
disk_device.backing.uuid = disk_uuid
cf = self._session.vim.client.factory
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.device = disk_device
disk_spec.operation = 'edit'
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
reconfig_spec.deviceChange = [disk_spec]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: "
"%(disk_uuid)s.",
{'backing': backing,
'disk_uuid': disk_uuid})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.
:param file_path: Datastore path of the file or folder
"""
LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.",
{'file': file_path, 'dc': datacenter})
fileManager = self._session.vim.service_content.fileManager
task = self._session.invoke_api(self._session.vim,
'DeleteDatastoreFile_Task',
fileManager,
name=file_path,
datacenter=datacenter)
LOG.debug("Initiated deletion via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully deleted file: %s."), file_path)
def create_datastore_folder(self, ds_name, folder_path, datacenter):
"""Creates a datastore folder.
This method returns silently if the folder already exists.
:param ds_name: datastore name
:param folder_path: path of folder to create
:param datacenter: datacenter of target datastore
"""
fileManager = self._session.vim.service_content.fileManager
ds_folder_path = "[%s] %s" % (ds_name, folder_path)
LOG.debug("Creating datastore folder: %s.", ds_folder_path)
try:
self._session.invoke_api(self._session.vim,
'MakeDirectory',
fileManager,
name=ds_folder_path,
datacenter=datacenter)
LOG.info(_LI("Created datastore folder: %s."), folder_path)
except exceptions.FileAlreadyExistsException:
LOG.debug("Datastore folder: %s already exists.", folder_path)
def get_path_name(self, backing):
"""Get path name of the backing.
:param backing: Reference to the backing entity
:return: Path name of the backing
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'config.files').vmPathName
def get_entity_name(self, entity):
"""Get name of the managed entity.
:param entity: Reference to the entity
:return: Name of the managed entity
"""
return self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, entity, 'name')
def _get_disk_device(self, backing):
"""Get the virtual device corresponding to disk."""
hardware_devices = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
backing,
'config.hardware.device')
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
return device
LOG.error(_LE("Virtual disk device of "
"backing: %s not found."), backing)
raise vmdk_exceptions.VirtualDiskNotFoundException()
def get_vmdk_path(self, backing):
"""Get the vmdk file name of the backing.
The vmdk file path of the backing returned is of the form:
"[datastore1] my_folder/my_vm.vmdk"
:param backing: Reference to the backing
:return: VMDK file path of the backing
"""
disk_device = self._get_disk_device(backing)
backing = disk_device.backing
if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo":
msg = _("Invalid disk backing: %s.") % backing.__class__.__name__
LOG.error(msg)
raise AssertionError(msg)
return backing.fileName
def get_disk_size(self, backing):
"""Get disk size of the backing.
:param backing: backing VM reference
:return: disk size in bytes
"""
disk_device = self._get_disk_device(backing)
return disk_device.capacityInKB * units.Ki
def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type,
disk_type):
"""Return spec for file-backed virtual disk creation."""
cf = self._session.vim.client.factory
spec = cf.create('ns0:FileBackedVirtualDiskSpec')
spec.capacityKb = size_in_kb
spec.adapterType = VirtualDiskAdapterType.get_adapter_type(
adapter_type)
spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type)
return spec
def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb,
adapter_type='busLogic', disk_type='preallocated'):
"""Create virtual disk with the given settings.
:param dc_ref: datacenter reference
:param vmdk_ds_file_path: datastore file path of the virtual disk
:param size_in_kb: disk size in KB
:param adapter_type: disk adapter type
:param disk_type: vmdk type
"""
virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb,
adapter_type,
disk_type)
LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec)
disk_manager = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CreateVirtualDisk_Task',
disk_manager,
name=vmdk_ds_file_path,
datacenter=dc_ref,
spec=virtual_disk_spec)
LOG.debug("Task: %s created for virtual disk creation.", task)
self._session.wait_for_task(task)
LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec)
def create_flat_extent_virtual_disk_descriptor(
self, dc_ref, path, size_in_kb, adapter_type, disk_type):
"""Create descriptor for a single flat extent virtual disk.
To create the descriptor, we create a virtual disk and delete its flat
extent.
:param dc_ref: reference to the datacenter
:param path: descriptor datastore file path
:param size_in_kb: size of the virtual disk in KB
:param adapter_type: virtual disk adapter type
:param disk_type: type of the virtual disk
"""
LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, "
"adapter_type: %(adapter_type)s and disk_type: "
"%(disk_type)s.",
{'path': path.get_descriptor_ds_file_path(),
'size': size_in_kb,
'adapter_type': adapter_type,
'disk_type': disk_type
})
self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(),
size_in_kb, adapter_type, disk_type)
self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref)
LOG.debug("Created descriptor: %s.",
path.get_descriptor_ds_file_path())
def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path,
dest_vmdk_file_path, dest_dc_ref=None):
"""Copy contents of the src vmdk file to dest vmdk file.
:param src_dc_ref: Reference to datacenter containing src datastore
:param src_vmdk_file_path: Source vmdk file path
:param dest_vmdk_file_path: Destination vmdk file path
:param dest_dc_ref: Reference to datacenter of dest datastore.
If unspecified, source datacenter is used.
"""
LOG.debug('Copying disk: %(src)s to %(dest)s.',
{'src': src_vmdk_file_path,
'dest': dest_vmdk_file_path})
dest_dc_ref = dest_dc_ref or src_dc_ref
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'CopyVirtualDisk_Task',
diskMgr,
sourceName=src_vmdk_file_path,
sourceDatacenter=src_dc_ref,
destName=dest_vmdk_file_path,
destDatacenter=dest_dc_ref,
force=True)
LOG.debug("Initiated copying disk data via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Successfully copied disk at: %(src)s to: %(dest)s."),
{'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path})
def delete_vmdk_file(self, vmdk_file_path, dc_ref):
"""Delete given vmdk files.
:param vmdk_file_path: VMDK file path to be deleted
:param dc_ref: Reference to datacenter that contains this VMDK file
"""
LOG.debug("Deleting vmdk file: %s.", vmdk_file_path)
diskMgr = self._session.vim.service_content.virtualDiskManager
task = self._session.invoke_api(self._session.vim,
'DeleteVirtualDisk_Task',
diskMgr,
name=vmdk_file_path,
datacenter=dc_ref)
LOG.debug("Initiated deleting vmdk file via task: %s.", task)
self._session.wait_for_task(task)
LOG.info(_LI("Deleted vmdk file: %s."), vmdk_file_path)
def get_profile(self, backing):
"""Query storage profile associated with the given backing.
:param backing: backing reference
:return: profile name
"""
profile_ids = pbm.get_profiles(self._session, backing)
if profile_ids:
return pbm.get_profiles_by_ids(self._session, profile_ids)[0].name
def _get_all_clusters(self):
clusters = {}
retrieve_result = self._session.invoke_api(vim_util, 'get_objects',
self._session.vim,
'ClusterComputeResource',
self._max_objects)
while retrieve_result:
if retrieve_result.objects:
for cluster in retrieve_result.objects:
name = urllib.parse.unquote(cluster.propSet[0].val)
clusters[name] = cluster.obj
retrieve_result = self.continue_retrieval(retrieve_result)
return clusters
def get_cluster_refs(self, names):
"""Get references to given clusters.
:param names: list of cluster names
:return: Dictionary of cluster names to references
"""
clusters = self._get_all_clusters()
for name in names:
if name not in clusters:
LOG.error(_LE("Compute cluster: %s not found."), name)
raise vmdk_exceptions.ClusterNotFoundException(cluster=name)
return {name: clusters[name] for name in names}
def get_cluster_hosts(self, cluster):
"""Get hosts in the given cluster.
:param cluster: cluster reference
:return: references to hosts in the cluster
"""
hosts = self._session.invoke_api(vim_util,
'get_object_property',
self._session.vim,
cluster,
'host')
host_refs = []
if hosts and hosts.ManagedObjectReference:
host_refs.extend(hosts.ManagedObjectReference)
return host_refs
|
apache-2.0
|
dirn/Simon
|
tests/test_query.py
|
1
|
14495
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import collections
try:
from unittest import mock
except ImportError:
import mock
from pymongo.cursor import Cursor
from simon import connection, query
from simon._compat import range
from .utils import AN_OBJECT_ID, ModelFactory
DefaultModel = ModelFactory('DefaultModel')
MappedModel = ModelFactory('MappedModel', field_map={'fake': 'real'})
class TestQ(unittest.TestCase):
"""Test the `Q` class."""
def test___init__(self):
"""Test the `__init__()` method."""
q = query.Q(a=1)
self.assertEqual(q._filter, {'a': 1})
q = query.Q(a=1, b=2)
self.assertEqual(q._filter, {'a': 1, 'b': 2})
def test___and__(self):
"""Test the `__and__()` method."""
q1 = query.Q(a=1)
q1._add_filter = mock.Mock()
q2 = query.Q(b=2)
q1.__and__(q2)
q1._add_filter.assert_called_with(q2, '$and')
def test___or__(self):
"""Test the `__or__()` method."""
q1 = query.Q(a=1)
q1._add_filter = mock.Mock()
q2 = query.Q(b=2)
q1.__or__(q2)
q1._add_filter.assert_called_with(q2, '$or')
def test__add_filter(self):
"""Test the `_add_filter()` method."""
q1 = query.Q(a=1)
q2 = query.Q(a=1)
expected = {'a': 1}
actual = q1._add_filter(q2, query.Q.AND)._filter
self.assertEqual(actual, expected)
q1 = query.Q(a=1)
q2 = query.Q(b=2)
expected = {'$and': [{'a': 1}, {'b': 2}]}
actual = q1._add_filter(q2, query.Q.AND)._filter
self.assertEqual(actual, expected)
expected = {'$or': [{'a': 1}, {'b': 2}]}
actual = q1._add_filter(q2, query.Q.OR)._filter
self.assertEqual(actual, expected)
def test__add_filter_combine_conditions(self):
"""Test the `_add_filter()` method with different conditions."""
q1 = query.Q(a=1)
q2 = query.Q(b=2)
q3 = query.Q(c=3)
expected = {'$or': [{'$and': [{'a': 1}, {'b': 2}]}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.AND)
actual = tmp._add_filter(q3, query.Q.OR)._filter
self.assertEqual(actual, expected)
expected = {'$and': [{'$or': [{'a': 1}, {'b': 2}]}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.OR)
actual = tmp._add_filter(q3, query.Q.AND)._filter
self.assertEqual(actual, expected)
def test__add_filter_filter_doesnt_exist(self):
"""Test the `_add_filter()` method with a new filter."""
q1 = query.Q(a=1)
q2 = query.Q(b=2)
q3 = query.Q(c=3)
expected = {'$and': [{'a': 1}, {'b': 2}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.AND)
actual = tmp._add_filter(q3, query.Q.AND)._filter
self.assertEqual(actual, expected)
expected = {'$or': [{'a': 1}, {'b': 2}, {'c': 3}]}
tmp = q1._add_filter(q2, query.Q.OR)
actual = tmp._add_filter(q3, query.Q.OR)._filter
self.assertEqual(actual, expected)
def test__add_filter_filter_exists(self):
"""Test the `_add_filter()` method with a filter that exists."""
q1 = query.Q(a=1)
q2 = query.Q(b=2)
expected = {'$and': [{'a': 1}, {'b': 2}]}
tmp = q1._add_filter(q2, query.Q.AND)
actual = tmp._add_filter(q2, query.Q.AND)._filter
self.assertEqual(actual, expected)
expected = {'$or': [{'a': 1}, {'b': 2}]}
tmp = q1._add_filter(q2, query.Q.OR)
actual = tmp._add_filter(q2, query.Q.OR)._filter
self.assertEqual(actual, expected)
def test__add_filter_typeerror(self):
"""Test that `_add_filter()` raises `TypeError`."""
q = query.Q(a=1)
with self.assertRaises(TypeError):
q._add_filter(1, query.Q.AND)
class TestQuerySet(unittest.TestCase):
"""Test :class:`~simon.query.QuerySet` functionality"""
@classmethod
def setUpClass(cls):
with mock.patch('simon.connection.MongoClient'):
cls.connection = connection.connect('localhost', name='test-simon')
def setUp(cls):
cls.cursor = mock.MagicMock(spec=Cursor)
cls.qs = query.QuerySet(cursor=cls.cursor)
cls.model_qs = query.QuerySet(cursor=cls.cursor, cls=DefaultModel)
def test_count(self):
"""Test the `count()` method."""
self.qs.count()
self.cursor.count.assert_called_with(with_limit_and_skip=True)
# cursor.count() should get cached as qs._count, so it should
# only be called once by qs.count()
self.qs.count()
self.cursor.count.assert_not_called()
def test_count_typeerror(self):
"""Test that `count()` raises `TypeError`."""
qs = query.QuerySet()
with self.assertRaises(TypeError):
qs.count()
def test_distinct(self):
"""Test the `distinct()` method."""
self.qs.distinct('a')
self.cursor.distinct.assert_called_with('a')
def test_distinct_field_map(self):
"""Test the `distinct()` method with a name in `field_map`."""
self.model_qs._cls = MappedModel
self.model_qs.distinct('fake')
self.cursor.distinct.assert_called_with('real')
def test_distinct_nested_field(self):
"""Test the `distinct()` method with a nested field."""
self.model_qs.distinct('a__b')
self.cursor.distinct.assert_called_with('a.b')
def test_limit(self):
"""Test the `limit()` method."""
self.qs.limit(1)
self.cursor.clone.assert_called_with()
self.cursor.clone().limit.assert_called_with(1)
self.qs.limit(2)
self.cursor.clone.assert_called_with()
self.cursor.clone().limit.assert_called_with(2)
def test_skip(self):
"""Test the `skip()` method."""
self.qs.skip(1)
self.cursor.clone.assert_called_with()
self.cursor.clone().skip.assert_called_with(1)
self.qs.skip(2)
self.cursor.clone.assert_called_with()
self.cursor.clone().skip.assert_called_with(2)
def test_sort(self):
"""Test the `sort()` method."""
qs = self.qs.sort('_id')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('_id', 1)])
qs._cursor.sort.assert_not_called()
qs = self.qs.sort('-_id')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('_id', -1)])
qs._cursor.sort.assert_not_called()
def test_sort_field_map(self):
"""Test the `sort()` method with a name in `field_map`."""
self.model_qs._cls = MappedModel
qs = self.model_qs.sort('fake')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('real', 1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_ascending(self):
"""Test the `sort()` method for multiple ascending keys."""
qs = self.qs.sort('a', 'b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', 1), ('b', 1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_descending(self):
"""Test the `sort()` method for multiple descending keys."""
qs = self.qs.sort('-a', '-b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', -1), ('b', -1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_ascending_then_descending(self):
"""Test the `sort()` method for multiple keys ascending first."""
qs = self.qs.sort('a', '-b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', 1), ('b', -1)])
qs._cursor.sort.assert_not_called()
def test_sort_multiple_descending_then_ascending(self):
"""Test the `sort()` method for multiple keys descending first."""
qs = self.qs.sort('-a', 'b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a', -1), ('b', 1)])
qs._cursor.sort.assert_not_called()
def test_sort_nested_field(self):
"""Test the `sort()` method with a nested field."""
qs = self.model_qs.sort('a__b')
self.cursor.clone.assert_called_with()
self.assertEqual(qs._sorting, [('a.b', 1)])
qs._cursor.sort.assert_not_called()
def test__fill_to(self):
"""Test the `_fill_to()` method."""
self.cursor.count.return_value = 3
self.qs._fill_to(2)
self.assertEqual(len(self.qs._items), 3)
def test__fill_to_as_documents(self):
"""Test that `_fill_to()` stores documents."""
if hasattr(self.cursor, 'next'):
self.cursor.next.return_value = {'_id': AN_OBJECT_ID}
else:
self.cursor.__next__.return_value = {'_id': AN_OBJECT_ID}
self.cursor.count.return_value = 1
self.qs._fill_to(0)
self.assertIsInstance(self.qs._items[0], dict)
def test__fill_to_as_model(self):
"""Test that `_fill_to()` stores model instances."""
if hasattr(self.cursor, 'next'):
self.cursor.next.return_value = {'_id': AN_OBJECT_ID}
else:
self.cursor.__next__.return_value = {'_id': AN_OBJECT_ID}
self.cursor.count.return_value = 1
self.model_qs._fill_to(0)
self.assertIsInstance(self.model_qs._items[0], self.model_qs._cls)
def test__fill_to_indexes(self):
("Test that `_fill_to()` property fills to the specified "
"index.")
self.cursor.count.return_value = 3
for x in range(3):
self.qs._fill_to(x)
self.assertEqual(len(self.qs._items), x + 1)
def test__fill_to_overfill(self):
("Test that `_fill_to()` correctly handles indexes greater than"
" the maximum index of the result cache.")
self.cursor.count.return_value = 3
self.qs._fill_to(3)
self.assertEqual(len(self.qs._items), 3)
def test__fill_to_sort(self):
"""Test that `_fill_to()` correctly handles sorting."""
self.cursor.count.return_value = 3
self.qs._sorting = [('a', 1)]
self.qs._fill_to(0)
self.cursor.sort.assert_called_with([('a', 1)])
self.assertIsNone(self.qs._sorting)
def test__fill_to_twice(self):
"""Test that `_fill_to()` can be called multiple times."""
self.cursor.count.return_value = 3
self.qs._fill_to(0)
self.assertEqual(len(self.qs._items), 1)
self.qs._fill_to(0)
self.assertEqual(len(self.qs._items), 1)
self.qs._fill_to(3)
self.assertEqual(len(self.qs._items), 3)
self.qs._fill_to(3)
self.assertEqual(len(self.qs._items), 3)
def test___getitem__(self):
"""Test the `__getitem__()` method."""
self.cursor.count.return_value = 3
# qs._fill_to() would normally populate qs._items
self.qs._items = range(3)
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
for x in range(3):
self.assertEqual(self.qs[x], self.qs._items[x])
_fill_to.assert_called_with(x)
def test___getitem___slice(self):
"""Test the `__getitem__()` method with slices."""
self.cursor.count.return_value = 3
# qs._fill_to() would normally populate qs._items
self.qs._items = [0, 1, 2]
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
self.assertEqual(self.qs[1:], self.qs._items[1:])
_fill_to.assert_called_with(2)
self.assertEqual(self.qs[:1], self.qs._items[:1])
_fill_to.assert_called_with(0)
self.assertEqual(self.qs[1:2], self.qs._items[1:2])
_fill_to.assert_called_with(1)
self.assertEqual(self.qs[::2], self.qs._items[::2])
_fill_to.assert_called_with(2)
self.assertEqual(self.qs[1::2], self.qs._items[1::2])
_fill_to.assert_called_with(2)
self.assertEqual(self.qs[::], self.qs._items[::])
_fill_to.assert_called_with(2)
def test___getitem___indexerror(self):
"""Test that `__getitem__()` raises `IndexError`."""
self.cursor.count.return_value = 3
with self.assertRaises(IndexError) as e:
self.model_qs[3]
expected = "No such item in 'QuerySet' for 'DefaultModel' object"
actual = str(e.exception)
self.assertEqual(actual, expected)
with self.assertRaises(IndexError) as e:
self.qs[3]
expected = "No such item in 'QuerySet'"
actual = str(e.exception)
self.assertEqual(actual, expected)
def test___getitem___typeerror(self):
"""Test that `__getitem__()` raises `TypeError`."""
with self.assertRaises(TypeError):
self.qs[-1]
def test___iter__(self):
"""Test the `__iter__()` method."""
self.assertIsInstance(self.qs.__iter__(), collections.Iterable)
def test___iter___fills_cache(self):
"""Test that `__iter__()` fills the result cache."""
self.cursor.count.return_value = 3
def append_to_cache(v):
self.qs._items.append(v)
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
_fill_to.side_effect = append_to_cache
i = 0
for x in self.qs:
_fill_to.assert_called_with(i)
i += 1
self.assertEqual(len(self.qs._items), 3)
def test__iter___fills_cache_partial(self):
"""Test that `__iter__()` fills the rest of the result cache."""
self.cursor.count.return_value = 3
self.qs._items = [0]
def append_to_cache(v):
self.qs._items.append(v)
with mock.patch.object(self.qs, '_fill_to') as _fill_to:
_fill_to.side_effect = append_to_cache
i = 0
for x in self.qs:
if i == 0:
# qs._fill_to(0) will already have been called
_fill_to.assert_not_called()
else:
_fill_to.assert_called_with(i)
i += 1
self.assertEqual(len(self.qs._items), 3)
def test___len__(self):
"""Test the `__len__()` method."""
self.cursor.count.return_value = 3
self.assertEqual(len(self.qs), self.cursor.count())
|
bsd-3-clause
|
sharad/calibre
|
src/calibre/utils/config_base.py
|
1
|
19477
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import os, re, cPickle, traceback
from functools import partial
from collections import defaultdict
from copy import deepcopy
from calibre.utils.lock import LockError, ExclusiveFile
from calibre.constants import config_dir, CONFIG_DIR_MODE
plugin_dir = os.path.join(config_dir, 'plugins')
def make_config_dir():
if not os.path.exists(plugin_dir):
os.makedirs(plugin_dir, mode=CONFIG_DIR_MODE)
class Option(object):
def __init__(self, name, switches=[], help='', type=None, choices=None,
check=None, group=None, default=None, action=None, metavar=None):
if choices:
type = 'choice'
self.name = name
self.switches = switches
self.help = help.replace('%default', repr(default)) if help else None
self.type = type
if self.type is None and action is None and choices is None:
if isinstance(default, float):
self.type = 'float'
elif isinstance(default, int) and not isinstance(default, bool):
self.type = 'int'
self.choices = choices
self.check = check
self.group = group
self.default = default
self.action = action
self.metavar = metavar
def __eq__(self, other):
return self.name == getattr(other, 'name', other)
def __repr__(self):
return 'Option: '+self.name
def __str__(self):
return repr(self)
class OptionValues(object):
def copy(self):
return deepcopy(self)
class OptionSet(object):
OVERRIDE_PAT = re.compile(r'#{3,100} Override Options #{15}(.*?)#{3,100} End Override #{3,100}',
re.DOTALL|re.IGNORECASE)
def __init__(self, description=''):
self.description = description
self.defaults = {}
self.preferences = []
self.group_list = []
self.groups = {}
self.set_buffer = {}
def has_option(self, name_or_option_object):
if name_or_option_object in self.preferences:
return True
for p in self.preferences:
if p.name == name_or_option_object:
return True
return False
def get_option(self, name_or_option_object):
idx = self.preferences.index(name_or_option_object)
if idx > -1:
return self.preferences[idx]
for p in self.preferences:
if p.name == name_or_option_object:
return p
def add_group(self, name, description=''):
if name in self.group_list:
raise ValueError('A group by the name %s already exists in this set'%name)
self.groups[name] = description
self.group_list.append(name)
return partial(self.add_opt, group=name)
def update(self, other):
for name in other.groups.keys():
self.groups[name] = other.groups[name]
if name not in self.group_list:
self.group_list.append(name)
for pref in other.preferences:
if pref in self.preferences:
self.preferences.remove(pref)
self.preferences.append(pref)
def smart_update(self, opts1, opts2):
'''
Updates the preference values in opts1 using only the non-default preference values in opts2.
'''
for pref in self.preferences:
new = getattr(opts2, pref.name, pref.default)
if new != pref.default:
setattr(opts1, pref.name, new)
def remove_opt(self, name):
if name in self.preferences:
self.preferences.remove(name)
def add_opt(self, name, switches=[], help=None, type=None, choices=None,
group=None, default=None, action=None, metavar=None):
'''
Add an option to this section.
:param name: The name of this option. Must be a valid Python identifier.
Must also be unique in this OptionSet and all its subsets.
:param switches: List of command line switches for this option
(as supplied to :module:`optparse`). If empty, this
option will not be added to the command line parser.
:param help: Help text.
:param type: Type checking of option values. Supported types are:
`None, 'choice', 'complex', 'float', 'int', 'string'`.
:param choices: List of strings or `None`.
:param group: Group this option belongs to. You must previously
have created this group with a call to :method:`add_group`.
:param default: The default value for this option.
:param action: The action to pass to optparse. Supported values are:
`None, 'count'`. For choices and boolean options,
action is automatically set correctly.
'''
pref = Option(name, switches=switches, help=help, type=type, choices=choices,
group=group, default=default, action=action, metavar=None)
if group is not None and group not in self.groups.keys():
raise ValueError('Group %s has not been added to this section'%group)
if pref in self.preferences:
raise ValueError('An option with the name %s already exists in this set.'%name)
self.preferences.append(pref)
self.defaults[name] = default
def retranslate_help(self):
t = _
for opt in self.preferences:
if opt.help:
opt.help = t(opt.help)
def option_parser(self, user_defaults=None, usage='', gui_mode=False):
from calibre.utils.config import OptionParser
parser = OptionParser(usage, gui_mode=gui_mode)
groups = defaultdict(lambda : parser)
for group, desc in self.groups.items():
groups[group] = parser.add_option_group(group.upper(), desc)
for pref in self.preferences:
if not pref.switches:
continue
g = groups[pref.group]
action = pref.action
if action is None:
action = 'store'
if pref.default is True or pref.default is False:
action = 'store_' + ('false' if pref.default else 'true')
args = dict(
dest=pref.name,
help=pref.help,
metavar=pref.metavar,
type=pref.type,
choices=pref.choices,
default=getattr(user_defaults, pref.name, pref.default),
action=action,
)
g.add_option(*pref.switches, **args)
return parser
def get_override_section(self, src):
match = self.OVERRIDE_PAT.search(src)
if match:
return match.group()
return ''
def parse_string(self, src):
options = {'cPickle':cPickle}
if src is not None:
try:
if not isinstance(src, unicode):
src = src.decode('utf-8')
src = src.replace(u'PyQt%d.QtCore' % 4, u'PyQt5.QtCore')
exec src in options
except:
print 'Failed to parse options string:'
print repr(src)
traceback.print_exc()
opts = OptionValues()
for pref in self.preferences:
val = options.get(pref.name, pref.default)
formatter = __builtins__.get(pref.type, None)
if callable(formatter):
val = formatter(val)
setattr(opts, pref.name, val)
return opts
def render_group(self, name, desc, opts):
prefs = [pref for pref in self.preferences if pref.group == name]
lines = ['### Begin group: %s'%(name if name else 'DEFAULT')]
if desc:
lines += map(lambda x: '# '+x, desc.split('\n'))
lines.append(' ')
for pref in prefs:
lines.append('# '+pref.name.replace('_', ' '))
if pref.help:
lines += map(lambda x: '# ' + x, pref.help.split('\n'))
lines.append('%s = %s'%(pref.name,
self.serialize_opt(getattr(opts, pref.name, pref.default))))
lines.append(' ')
return '\n'.join(lines)
def serialize_opt(self, val):
if val is val is True or val is False or val is None or \
isinstance(val, (int, float, long, basestring)):
return repr(val)
pickle = cPickle.dumps(val, -1)
return 'cPickle.loads(%s)'%repr(pickle)
def serialize(self, opts):
src = '# %s\n\n'%(self.description.replace('\n', '\n# '))
groups = [self.render_group(name, self.groups.get(name, ''), opts)
for name in [None] + self.group_list]
return src + '\n\n'.join(groups)
class ConfigInterface(object):
def __init__(self, description):
self.option_set = OptionSet(description=description)
self.add_opt = self.option_set.add_opt
self.add_group = self.option_set.add_group
self.remove_opt = self.remove = self.option_set.remove_opt
self.parse_string = self.option_set.parse_string
self.get_option = self.option_set.get_option
self.preferences = self.option_set.preferences
def update(self, other):
self.option_set.update(other.option_set)
def option_parser(self, usage='', gui_mode=False):
return self.option_set.option_parser(user_defaults=self.parse(),
usage=usage, gui_mode=gui_mode)
def smart_update(self, opts1, opts2):
self.option_set.smart_update(opts1, opts2)
class Config(ConfigInterface):
'''
A file based configuration.
'''
def __init__(self, basename, description=''):
ConfigInterface.__init__(self, description)
self.config_file_path = os.path.join(config_dir, basename+'.py')
def parse(self):
src = ''
if os.path.exists(self.config_file_path):
try:
with ExclusiveFile(self.config_file_path) as f:
try:
src = f.read().decode('utf-8')
except ValueError:
print "Failed to parse", self.config_file_path
traceback.print_exc()
except LockError:
raise IOError('Could not lock config file: %s'%self.config_file_path)
return self.option_set.parse_string(src)
def as_string(self):
if not os.path.exists(self.config_file_path):
return ''
try:
with ExclusiveFile(self.config_file_path) as f:
return f.read().decode('utf-8')
except LockError:
raise IOError('Could not lock config file: %s'%self.config_file_path)
def set(self, name, val):
if not self.option_set.has_option(name):
raise ValueError('The option %s is not defined.'%name)
try:
if not os.path.exists(config_dir):
make_config_dir()
with ExclusiveFile(self.config_file_path) as f:
src = f.read()
opts = self.option_set.parse_string(src)
setattr(opts, name, val)
footer = self.option_set.get_override_section(src)
src = self.option_set.serialize(opts)+ '\n\n' + footer + '\n'
f.seek(0)
f.truncate()
if isinstance(src, unicode):
src = src.encode('utf-8')
f.write(src)
except LockError:
raise IOError('Could not lock config file: %s'%self.config_file_path)
class StringConfig(ConfigInterface):
'''
A string based configuration
'''
def __init__(self, src, description=''):
ConfigInterface.__init__(self, description)
self.src = src
def parse(self):
return self.option_set.parse_string(self.src)
def set(self, name, val):
if not self.option_set.has_option(name):
raise ValueError('The option %s is not defined.'%name)
opts = self.option_set.parse_string(self.src)
setattr(opts, name, val)
footer = self.option_set.get_override_section(self.src)
self.src = self.option_set.serialize(opts)+ '\n\n' + footer + '\n'
class ConfigProxy(object):
'''
A Proxy to minimize file reads for widely used config settings
'''
def __init__(self, config):
self.__config = config
self.__opts = None
@property
def defaults(self):
return self.__config.option_set.defaults
def refresh(self):
self.__opts = self.__config.parse()
def retranslate_help(self):
self.__config.option_set.retranslate_help()
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, val):
return self.set(key, val)
def __delitem__(self, key):
self.set(key, self.defaults[key])
def get(self, key):
if self.__opts is None:
self.refresh()
return getattr(self.__opts, key)
def set(self, key, val):
if self.__opts is None:
self.refresh()
setattr(self.__opts, key, val)
return self.__config.set(key, val)
def help(self, key):
return self.__config.get_option(key).help
def _prefs():
c = Config('global', 'calibre wide preferences')
c.add_opt('database_path',
default=os.path.expanduser('~/library1.db'),
help=_('Path to the database in which books are stored'))
c.add_opt('filename_pattern', default=ur'(?P<title>.+) - (?P<author>[^_]+)',
help=_('Pattern to guess metadata from filenames'))
c.add_opt('isbndb_com_key', default='',
help=_('Access key for isbndb.com'))
c.add_opt('network_timeout', default=5,
help=_('Default timeout for network operations (seconds)'))
c.add_opt('library_path', default=None,
help=_('Path to directory in which your library of books is stored'))
c.add_opt('language', default=None,
help=_('The language in which to display the user interface'))
c.add_opt('output_format', default='EPUB',
help=_('The default output format for ebook conversions.'))
c.add_opt('input_format_order', default=['EPUB', 'AZW3', 'MOBI', 'LIT', 'PRC',
'FB2', 'HTML', 'HTM', 'XHTM', 'SHTML', 'XHTML', 'ZIP', 'ODT', 'RTF', 'PDF',
'TXT'],
help=_('Ordered list of formats to prefer for input.'))
c.add_opt('read_file_metadata', default=True,
help=_('Read metadata from files'))
c.add_opt('worker_process_priority', default='normal',
help=_('The priority of worker processes. A higher priority '
'means they run faster and consume more resources. '
'Most tasks like conversion/news download/adding books/etc. '
'are affected by this setting.'))
c.add_opt('swap_author_names', default=False,
help=_('Swap author first and last names when reading metadata'))
c.add_opt('add_formats_to_existing', default=False,
help=_('Add new formats to existing book records'))
c.add_opt('check_for_dupes_on_ctl', default=False,
help=_('Check for duplicates when copying to another library'))
c.add_opt('installation_uuid', default=None, help='Installation UUID')
c.add_opt('new_book_tags', default=[], help=_('Tags to apply to books added to the library'))
c.add_opt('mark_new_books', default=False, help=_(
'Mark newly added books. The mark is a temporary mark that is automatically removed when calibre is restarted.'))
# these are here instead of the gui preferences because calibredb and
# calibre server can execute searches
c.add_opt('saved_searches', default={}, help=_('List of named saved searches'))
c.add_opt('user_categories', default={}, help=_('User-created tag browser categories'))
c.add_opt('manage_device_metadata', default='manual',
help=_('How and when calibre updates metadata on the device.'))
c.add_opt('limit_search_columns', default=False,
help=_('When searching for text without using lookup '
'prefixes, as for example, Red instead of title:Red, '
'limit the columns searched to those named below.'))
c.add_opt('limit_search_columns_to',
default=['title', 'authors', 'tags', 'series', 'publisher'],
help=_('Choose columns to be searched when not using prefixes, '
'as for example, when searching for Red instead of '
'title:Red. Enter a list of search/lookup names '
'separated by commas. Only takes effect if you set the option '
'to limit search columns above.'))
c.add_opt('use_primary_find_in_search', default=True,
help=_(u'Characters typed in the search box will match their '
'accented versions, based on the language you have chosen '
'for the calibre interface. For example, in '
u' English, searching for n will match %s and n, but if '
'your language is Spanish it will only match n. Note that '
'this is much slower than a simple search on very large '
'libraries.')%u'\xf1')
c.add_opt('migrated', default=False, help='For Internal use. Don\'t modify.')
return c
prefs = ConfigProxy(_prefs())
if prefs['installation_uuid'] is None:
import uuid
prefs['installation_uuid'] = str(uuid.uuid4())
# Read tweaks
def read_raw_tweaks():
make_config_dir()
default_tweaks = P('default_tweaks.py', data=True,
allow_user_override=False)
tweaks_file = os.path.join(config_dir, 'tweaks.py')
if not os.path.exists(tweaks_file):
with open(tweaks_file, 'wb') as f:
f.write(default_tweaks)
with open(tweaks_file, 'rb') as f:
return default_tweaks, f.read()
def read_tweaks():
default_tweaks, tweaks = read_raw_tweaks()
l, g = {}, {}
try:
exec tweaks in g, l
except:
import traceback
print 'Failed to load custom tweaks file'
traceback.print_exc()
dl, dg = {}, {}
exec default_tweaks in dg, dl
dl.update(l)
return dl
def write_tweaks(raw):
make_config_dir()
tweaks_file = os.path.join(config_dir, 'tweaks.py')
with open(tweaks_file, 'wb') as f:
f.write(raw)
tweaks = read_tweaks()
def reset_tweaks_to_default():
default_tweaks = P('default_tweaks.py', data=True,
allow_user_override=False)
dl, dg = {}, {}
exec default_tweaks in dg, dl
tweaks.clear()
tweaks.update(dl)
class Tweak(object):
def __init__(self, name, value):
self.name, self.value = name, value
def __enter__(self):
self.origval = tweaks[self.name]
tweaks[self.name] = self.value
def __exit__(self, *args):
tweaks[self.name] = self.origval
|
gpl-3.0
|
Lawrence-Liu/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
258
|
3336
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
vlachoudis/sl4a
|
python/src/Lib/bsddb/db.py
|
194
|
2730
|
#----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and Andrew Kuchling. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
# This module is just a placeholder for possible future expansion, in
# case we ever want to augment the stuff in _db in any way. For now
# it just simply imports everything from _db.
import sys
absolute_import = (sys.version_info[0] >= 3)
if not absolute_import :
if __name__.startswith('bsddb3.') :
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
from _pybsddb import *
from _pybsddb import __version__
else:
from _bsddb import *
from _bsddb import __version__
else :
# Because this syntaxis is not valid before Python 2.5
if __name__.startswith('bsddb3.') :
exec("from ._pybsddb import *")
exec("from ._pybsddb import __version__")
else :
exec("from ._bsddb import *")
exec("from ._bsddb import __version__")
|
apache-2.0
|
agentxan/nzbToMedia
|
libs/beets/plugins.py
|
4
|
15698
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Support for beets plugins."""
from __future__ import division, absolute_import, print_function
import inspect
import traceback
import re
from collections import defaultdict
from functools import wraps
import beets
from beets import logging
from beets import mediafile
PLUGIN_NAMESPACE = 'beetsplug'
# Plugins using the Last.fm API can share the same API key.
LASTFM_KEY = '2dc3914abf35f0d9c92d97d8f8e42b43'
# Global logger.
log = logging.getLogger('beets')
class PluginConflictException(Exception):
"""Indicates that the services provided by one plugin conflict with
those of another.
For example two plugins may define different types for flexible fields.
"""
class PluginLogFilter(logging.Filter):
"""A logging filter that identifies the plugin that emitted a log
message.
"""
def __init__(self, plugin):
self.prefix = u'{0}: '.format(plugin.name)
def filter(self, record):
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
basestring):
# A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = self.prefix + record.msg.msg
elif isinstance(record.msg, basestring):
record.msg = self.prefix + record.msg
return True
# Managing the plugins themselves.
class BeetsPlugin(object):
"""The base class for all beets plugins. Plugins provide
functionality by defining a subclass of BeetsPlugin and overriding
the abstract methods defined here.
"""
def __init__(self, name=None):
"""Perform one-time plugin setup.
"""
self.name = name or self.__module__.split('.')[-1]
self.config = beets.config[self.name]
if not self.template_funcs:
self.template_funcs = {}
if not self.template_fields:
self.template_fields = {}
if not self.album_template_fields:
self.album_template_fields = {}
self.import_stages = []
self._log = log.getChild(self.name)
self._log.setLevel(logging.NOTSET) # Use `beets` logger level.
if not any(isinstance(f, PluginLogFilter) for f in self._log.filters):
self._log.addFilter(PluginLogFilter(self))
def commands(self):
"""Should return a list of beets.ui.Subcommand objects for
commands that should be added to beets' CLI.
"""
return ()
def get_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages.
The callables are wrapped versions of the functions in
`self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return [self._set_log_level_and_params(logging.WARNING, import_stage)
for import_stage in self.import_stages]
def _set_log_level_and_params(self, base_log_level, func):
"""Wrap `func` to temporarily set this plugin's logger level to
`base_log_level` + config options (and restore it to its previous
value after the function returns). Also determines which params may not
be sent for backwards-compatibility.
"""
argspec = inspect.getargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
assert self._log.level == logging.NOTSET
verbosity = beets.config['verbose'].get(int)
log_level = max(logging.DEBUG, base_log_level - 10 * verbosity)
self._log.setLevel(log_level)
try:
try:
return func(*args, **kwargs)
except TypeError as exc:
if exc.args[0].startswith(func.__name__):
# caused by 'func' and not stuff internal to 'func'
kwargs = dict((arg, val) for arg, val in kwargs.items()
if arg in argspec.args)
return func(*args, **kwargs)
else:
raise
finally:
self._log.setLevel(logging.NOTSET)
return wrapper
def queries(self):
"""Should return a dict mapping prefixes to Query subclasses.
"""
return {}
def track_distance(self, item, info):
"""Should return a Distance object to be added to the
distance for every track comparison.
"""
return beets.autotag.hooks.Distance()
def album_distance(self, items, album_info, mapping):
"""Should return a Distance object to be added to the
distance for every album-level comparison.
"""
return beets.autotag.hooks.Distance()
def candidates(self, items, artist, album, va_likely):
"""Should return a sequence of AlbumInfo objects that match the
album whose items are provided.
"""
return ()
def item_candidates(self, item, artist, title):
"""Should return a sequence of TrackInfo objects that match the
item provided.
"""
return ()
def album_for_id(self, album_id):
"""Return an AlbumInfo object or None if no matching release was
found.
"""
return None
def track_for_id(self, track_id):
"""Return a TrackInfo object or None if no matching release was
found.
"""
return None
def add_media_field(self, name, descriptor):
"""Add a field that is synchronized between media files and items.
When a media field is added ``item.write()`` will set the name
property of the item's MediaFile to ``item[name]`` and save the
changes. Similarly ``item.read()`` will set ``item[name]`` to
the value of the name property of the media file.
``descriptor`` must be an instance of ``mediafile.MediaField``.
"""
# Defer impor to prevent circular dependency
from beets import library
mediafile.MediaFile.add_field(name, descriptor)
library.Item._media_fields.add(name)
_raw_listeners = None
listeners = None
def register_listener(self, event, func):
"""Add a function as a listener for the specified event.
"""
wrapped_func = self._set_log_level_and_params(logging.WARNING, func)
cls = self.__class__
if cls.listeners is None or cls._raw_listeners is None:
cls._raw_listeners = defaultdict(list)
cls.listeners = defaultdict(list)
if func not in cls._raw_listeners[event]:
cls._raw_listeners[event].append(func)
cls.listeners[event].append(wrapped_func)
template_funcs = None
template_fields = None
album_template_fields = None
@classmethod
def template_func(cls, name):
"""Decorator that registers a path template function. The
function will be invoked as ``%name{}`` from path format
strings.
"""
def helper(func):
if cls.template_funcs is None:
cls.template_funcs = {}
cls.template_funcs[name] = func
return func
return helper
@classmethod
def template_field(cls, name):
"""Decorator that registers a path template field computation.
The value will be referenced as ``$name`` from path format
strings. The function must accept a single parameter, the Item
being formatted.
"""
def helper(func):
if cls.template_fields is None:
cls.template_fields = {}
cls.template_fields[name] = func
return func
return helper
_classes = set()
def load_plugins(names=()):
"""Imports the modules for a sequence of plugin names. Each name
must be the name of a Python module under the "beetsplug" namespace
package in sys.path; the module indicated should contain the
BeetsPlugin subclasses desired.
"""
for name in names:
modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name)
try:
try:
namespace = __import__(modname, None, None)
except ImportError as exc:
# Again, this is hacky:
if exc.args[0].endswith(' ' + name):
log.warn(u'** plugin {0} not found', name)
else:
raise
else:
for obj in getattr(namespace, name).__dict__.values():
if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \
and obj != BeetsPlugin and obj not in _classes:
_classes.add(obj)
except:
log.warn(
u'** error loading plugin {}:\n{}',
name,
traceback.format_exc(),
)
_instances = {}
def find_plugins():
"""Returns a list of BeetsPlugin subclass instances from all
currently loaded beets plugins. Loads the default plugin set
first.
"""
load_plugins()
plugins = []
for cls in _classes:
# Only instantiate each plugin class once.
if cls not in _instances:
_instances[cls] = cls()
plugins.append(_instances[cls])
return plugins
# Communication with plugins.
def commands():
"""Returns a list of Subcommand objects from all loaded plugins.
"""
out = []
for plugin in find_plugins():
out += plugin.commands()
return out
def queries():
"""Returns a dict mapping prefix strings to Query subclasses all loaded
plugins.
"""
out = {}
for plugin in find_plugins():
out.update(plugin.queries())
return out
def types(model_cls):
# Gives us `item_types` and `album_types`
attr_name = '{0}_types'.format(model_cls.__name__.lower())
types = {}
for plugin in find_plugins():
plugin_types = getattr(plugin, attr_name, {})
for field in plugin_types:
if field in types and plugin_types[field] != types[field]:
raise PluginConflictException(
u'Plugin {0} defines flexible field {1} '
u'which has already been defined with '
u'another type.'.format(plugin.name, field)
)
types.update(plugin_types)
return types
def track_distance(item, info):
"""Gets the track distance calculated by all loaded plugins.
Returns a Distance object.
"""
from beets.autotag.hooks import Distance
dist = Distance()
for plugin in find_plugins():
dist.update(plugin.track_distance(item, info))
return dist
def album_distance(items, album_info, mapping):
"""Returns the album distance calculated by plugins."""
from beets.autotag.hooks import Distance
dist = Distance()
for plugin in find_plugins():
dist.update(plugin.album_distance(items, album_info, mapping))
return dist
def candidates(items, artist, album, va_likely):
"""Gets MusicBrainz candidates for an album from each plugin.
"""
out = []
for plugin in find_plugins():
out.extend(plugin.candidates(items, artist, album, va_likely))
return out
def item_candidates(item, artist, title):
"""Gets MusicBrainz candidates for an item from the plugins.
"""
out = []
for plugin in find_plugins():
out.extend(plugin.item_candidates(item, artist, title))
return out
def album_for_id(album_id):
"""Get AlbumInfo objects for a given ID string.
"""
out = []
for plugin in find_plugins():
res = plugin.album_for_id(album_id)
if res:
out.append(res)
return out
def track_for_id(track_id):
"""Get TrackInfo objects for a given ID string.
"""
out = []
for plugin in find_plugins():
res = plugin.track_for_id(track_id)
if res:
out.append(res)
return out
def template_funcs():
"""Get all the template functions declared by plugins as a
dictionary.
"""
funcs = {}
for plugin in find_plugins():
if plugin.template_funcs:
funcs.update(plugin.template_funcs)
return funcs
def import_stages():
"""Get a list of import stage functions defined by plugins."""
stages = []
for plugin in find_plugins():
stages += plugin.get_import_stages()
return stages
# New-style (lazy) plugin-provided fields.
def item_field_getters():
"""Get a dictionary mapping field names to unary functions that
compute the field's value.
"""
funcs = {}
for plugin in find_plugins():
if plugin.template_fields:
funcs.update(plugin.template_fields)
return funcs
def album_field_getters():
"""As above, for album fields.
"""
funcs = {}
for plugin in find_plugins():
if plugin.album_template_fields:
funcs.update(plugin.album_template_fields)
return funcs
# Event dispatch.
def event_handlers():
"""Find all event handlers from plugins as a dictionary mapping
event names to sequences of callables.
"""
all_handlers = defaultdict(list)
for plugin in find_plugins():
if plugin.listeners:
for event, handlers in plugin.listeners.items():
all_handlers[event] += handlers
return all_handlers
def send(event, **arguments):
"""Send an event to all assigned event listeners.
`event` is the name of the event to send, all other named arguments
are passed along to the handlers.
Return a list of non-None values returned from the handlers.
"""
log.debug(u'Sending event: {0}', event)
results = []
for handler in event_handlers()[event]:
result = handler(**arguments)
if result is not None:
results.append(result)
return results
def feat_tokens(for_artist=True):
"""Return a regular expression that matches phrases like "featuring"
that separate a main artist or a song title from secondary artists.
The `for_artist` option determines whether the regex should be
suitable for matching artist fields (the default) or title fields.
"""
feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.']
if for_artist:
feat_words += ['with', 'vs', 'and', 'con', '&']
return '(?<=\s)(?:{0})(?=\s)'.format(
'|'.join(re.escape(x) for x in feat_words)
)
def sanitize_choices(choices, choices_all):
"""Clean up a stringlist configuration attribute: keep only choices
elements present in choices_all, remove duplicate elements, expand '*'
wildcard while keeping original stringlist order.
"""
seen = set()
others = [x for x in choices_all if x not in choices]
res = []
for s in choices:
if s in list(choices_all) + ['*']:
if not (s in seen or seen.add(s)):
res.extend(list(others) if s == '*' else [s])
return res
|
gpl-3.0
|
mulkieran/pyblk
|
tests/test_traversal.py
|
1
|
4103
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <[email protected]>
"""
tests.test_traversal
====================
Tests traversing the sysfs hierarchy.
.. moduleauthor:: mulhern <[email protected]>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pyblk
import pytest
from hypothesis import given
from hypothesis import strategies
from hypothesis import Settings
from ._constants import BOTHS
from ._constants import CONTEXT
from ._constants import EITHERS
from ._constants import HOLDERS
from ._constants import SLAVES
NUM_TESTS = 5
# Use conditional to avoid processing tests if number of examples is too small.
# pytest.mark.skipif allows the test to be built, resulting in a hypothesis
# error if SLAVES or HOLDERS is empty.
if len(BOTHS) == 0:
@pytest.mark.skipif(
True,
reason="no slaves or holders data for tests"
)
class TestTraversal(object):
# pylint: disable=too-few-public-methods
"""
An empty test class which is always skipped.
"""
def test_dummy(self):
"""
A dummy test, for which pytest can show a skip message.
"""
pass
else:
class TestTraversal(object):
"""
A class for testing sysfs traversals.
"""
@given(
strategies.sampled_from(SLAVES),
settings=Settings(max_examples=NUM_TESTS)
)
def test_slaves(self, device):
"""
Verify slaves do not contain originating device.
"""
assert device not in pyblk.slaves(CONTEXT, device)
@given(
strategies.sampled_from(HOLDERS),
settings=Settings(max_examples=NUM_TESTS)
)
def test_holders(self, device):
"""
Verify holders do not contain originating device.
"""
assert device not in pyblk.holders(CONTEXT, device)
@given(
strategies.sampled_from(EITHERS),
strategies.booleans(),
settings=Settings(max_examples=2 * NUM_TESTS)
)
def test_inverse(self, device, recursive):
"""
Verify that a round-trip traversal will encounter the original
device.
:param device: the device to test
:param bool recursive: if True, test recursive relationship
If recursive is True, test ancestor/descendant relationship.
If recursive is False, tests parent/child relationship.
"""
# pylint: disable=too-many-function-args
slaves = list(pyblk.slaves(CONTEXT, device, recursive))
for slave in slaves:
assert device in list(
pyblk.holders(CONTEXT, slave, recursive)
)
holders = list(pyblk.holders(CONTEXT, device, recursive))
for holder in holders:
assert device in list(
pyblk.slaves(CONTEXT, holder, recursive)
)
|
gpl-2.0
|
zaina/nova
|
nova/tests/unit/api/ec2/test_ec2utils.py
|
84
|
2549
|
# Copyright 2014 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.ec2 import ec2utils
from nova import context
from nova import objects
from nova import test
class EC2UtilsTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
ec2utils.reset_cache()
super(EC2UtilsTestCase, self).setUp()
def test_get_int_id_from_snapshot_uuid(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
self.assertEqual(smap.id, smap_id)
def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
self.assertEqual('fake-uuid', smap.uuid)
def test_get_snapshot_uuid_from_int_id(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
self.assertEqual(smap.uuid, smap_uuid)
def test_id_to_glance_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
self.assertEqual(uuid, s3imap.uuid)
def test_glance_id_to_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
self.assertEqual(s3imap_id, s3imap.id)
def test_glance_id_to_id_creates_mapping(self):
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
self.assertEqual('fake-uuid', s3imap.uuid)
|
apache-2.0
|
thedemz/M101P
|
chapter2/hw2-2.py
|
1
|
1698
|
import sys
import os
file_path = os.path.abspath(__file__)
dir_path = os.path.dirname(file_path)
lib_path = os.path.join(dir_path, "lib")
sys.path.insert(0, lib_path)
import pymongo
# connnecto to the db on standard port
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.students # attach to db
def get_students():
collection = db.grades # specify the colllection
print("There are 200 students")
print("There should be 800 grades")
grades = collection.find().count()
print("Counted Grades:", grades)
result = collection.find({'type': 'homework'}, {"student_id": 1, "score": 1, "type":1, "_id": 1}).sort(
[("student_id", 1), ("score", 1)]
)
print("Counted With type Homework:", result.count())
return result
def mark_lowest_score( grades ):
collection = db.grades # specify the colllection
student_id = None
for ix in grades:
ix["lowest"] = False
print(student_id, ix["student_id"])
if student_id != ix["student_id"]:
student_id = ix["student_id"]
ix["lowest"] = True
print("True")
else:
print("False")
collection.save( ix )
def delete_lowest():
collection = db.grades # specify the colllection
grades = collection.find().count()
if grades == 800:
print("Removing lowest grades from total", grades)
collection.remove({"lowest": True})
else:
print("Already deleted!", grades)
if __name__ == "__main__":
print("import the data with:")
grades = get_students()
mark_lowest_score(grades)
delete_lowest()
|
apache-2.0
|
chiffa/numpy
|
numpy/distutils/conv_template.py
|
38
|
9684
|
#!/usr/bin/python
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exclude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exclude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
from numpy.distutils.compat import get_exception
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurrences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values:\n%s = %s" % (name, vals)
raise ValueError(msg)
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub) :
tmp = {}
for name, vals in names :
tmp[name] = vals[i]
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@([\w]+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg)
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError:
e = get_exception()
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError:
e = get_exception()
raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError:
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
|
bsd-3-clause
|
spisneha25/django
|
tests/sitemaps_tests/test_https.py
|
205
|
3608
|
from __future__ import unicode_literals
from datetime import date
from django.test import ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango110Warning
from .base import SitemapTestsBase
@override_settings(ROOT_URLCONF='sitemaps_tests.urls.https')
class HTTPSSitemapTests(SitemapTestsBase):
protocol = 'https'
@ignore_warnings(category=RemovedInDjango110Warning)
def test_secure_sitemap_index(self):
"A secure sitemap index can be rendered"
# The URL for views.sitemap in tests/urls/https.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/secure/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/secure/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_secure_sitemap_section(self):
"A secure sitemap section can be rendered"
response = self.client.get('/secure/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(SECURE_PROXY_SSL_HEADER=False)
class HTTPSDetectionSitemapTests(SitemapTestsBase):
extra = {'wsgi.url_scheme': 'https'}
@ignore_warnings(category=RemovedInDjango110Warning)
def test_sitemap_index_with_https_request(self):
"A sitemap index requested in HTTPS is rendered with HTTPS links"
# The URL for views.sitemap in tests/urls/https.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url.replace('http://', 'https://')
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_section_with_https_request(self):
"A sitemap section requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/sitemap-simple.xml', **self.extra)
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url.replace('http://', 'https://'), date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
|
bsd-3-clause
|
catapult-project/catapult
|
third_party/gsutil/third_party/pyu2f/pyu2f/tests/hardware_test.py
|
7
|
7337
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pyu2f.hardware."""
import sys
import mock
from pyu2f import errors
from pyu2f import hardware
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest # pylint: disable=g-import-not-at-top
else:
import unittest # pylint: disable=g-import-not-at-top
class HardwareTest(unittest.TestCase):
def testSimpleCommands(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
sk.CmdBlink(5)
mock_transport.SendBlink.assert_called_once_with(5)
sk.CmdWink()
mock_transport.SendWink.assert_called_once_with()
sk.CmdPing(bytearray(b'foo'))
mock_transport.SendPing.assert_called_once_with(bytearray(b'foo'))
def testRegisterInvalidParams(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
self.assertRaises(errors.InvalidRequestError, sk.CmdRegister, '1234',
'1234')
def testRegisterSuccess(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
mock_transport.SendMsgBytes.return_value = bytearray(
[0x01, 0x02, 0x90, 0x00])
reply = sk.CmdRegister(challenge_param, app_param)
self.assertEquals(reply, bytearray([0x01, 0x02]))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x01, 0x03, 0x00]))
self.assertEquals(sent_msg[7:-2], bytearray(challenge_param + app_param))
def testRegisterTUPRequired(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
mock_transport.SendMsgBytes.return_value = bytearray([0x69, 0x85])
self.assertRaises(errors.TUPRequiredError, sk.CmdRegister, challenge_param,
app_param)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
def testVersion(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
mock_transport.SendMsgBytes.return_value = bytearray(b'U2F_V2\x90\x00')
reply = sk.CmdVersion()
self.assertEquals(reply, bytearray(b'U2F_V2'))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg, bytearray(
[0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00]))
def testVersionFallback(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
mock_transport.SendMsgBytes.side_effect = [
bytearray([0x67, 0x00]),
bytearray(b'U2F_V2\x90\x00')]
reply = sk.CmdVersion()
self.assertEquals(reply, bytearray(b'U2F_V2'))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 2)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args_list[0]
self.assertEquals(len(sent_msg), 7)
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x03, 0x00, 0x00]))
self.assertEquals(sent_msg[4:7], bytearray([0x00, 0x00, 0x00])) # Le
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args_list[1]
self.assertEquals(len(sent_msg), 9)
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x03, 0x00, 0x00]))
self.assertEquals(sent_msg[4:7], bytearray([0x00, 0x00, 0x00])) # Lc
self.assertEquals(sent_msg[7:9], bytearray([0x00, 0x00])) # Le
def testVersionErrors(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
mock_transport.SendMsgBytes.return_value = bytearray([0xfa, 0x05])
self.assertRaises(errors.ApduError, sk.CmdVersion)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
def testAuthenticateSuccess(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray(
[0x01, 0x02, 0x90, 0x00])
reply = sk.CmdAuthenticate(challenge_param, app_param, key_handle)
self.assertEquals(reply, bytearray([0x01, 0x02]))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x02, 0x03, 0x00]))
self.assertEquals(
sent_msg[7:-2],
bytearray(challenge_param + app_param + bytearray([4, 1, 2, 3, 4])))
def testAuthenticateCheckOnly(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray(
[0x01, 0x02, 0x90, 0x00])
reply = sk.CmdAuthenticate(challenge_param,
app_param,
key_handle,
check_only=True)
self.assertEquals(reply, bytearray([0x01, 0x02]))
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
(sent_msg,), _ = mock_transport.SendMsgBytes.call_args
self.assertEquals(sent_msg[0:4], bytearray([0x00, 0x02, 0x07, 0x00]))
self.assertEquals(
sent_msg[7:-2],
bytearray(challenge_param + app_param + bytearray([4, 1, 2, 3, 4])))
def testAuthenticateTUPRequired(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray([0x69, 0x85])
self.assertRaises(errors.TUPRequiredError, sk.CmdAuthenticate,
challenge_param, app_param, key_handle)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
def testAuthenticateInvalidKeyHandle(self):
mock_transport = mock.MagicMock()
sk = hardware.SecurityKey(mock_transport)
challenge_param = b'01234567890123456789012345678901'
app_param = b'01234567890123456789012345678901'
key_handle = b'\x01\x02\x03\x04'
mock_transport.SendMsgBytes.return_value = bytearray([0x6a, 0x80])
self.assertRaises(errors.InvalidKeyHandleError, sk.CmdAuthenticate,
challenge_param, app_param, key_handle)
self.assertEquals(mock_transport.SendMsgBytes.call_count, 1)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
ASlave2Audio/Restaurant-App
|
mingw/bin/lib/xdrlib.py
|
197
|
5563
|
"""Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf.write(struct.pack('>f', x))
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf.write(struct.pack('>d', x))
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
|
mit
|
dhhjx880713/GPy
|
GPy/likelihoods/mixed_noise.py
|
7
|
3266
|
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats, special
from . import link_functions
from .likelihood import Likelihood
from .gaussian import Gaussian
from ..core.parameterization import Param
from paramz.transformations import Logexp
from ..core.parameterization import Parameterized
import itertools
class MixedNoise(Likelihood):
def __init__(self, likelihoods_list, name='mixed_noise'):
#NOTE at the moment this likelihood only works for using a list of gaussians
super(Likelihood, self).__init__(name=name)
self.link_parameters(*likelihoods_list)
self.likelihoods_list = likelihoods_list
self.log_concave = False
def gaussian_variance(self, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
variance = np.zeros(ind.size)
for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))):
variance[ind==j] = lik.variance
return variance
def betaY(self,Y,Y_metadata):
#TODO not here.
return Y/self.gaussian_variance(Y_metadata=Y_metadata)[:,None]
def update_gradients(self, gradients):
self.gradient = gradients
def exact_inference_gradients(self, dL_dKdiag, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))])
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
ind = Y_metadata['output_index'].flatten()
_variance = np.array([self.likelihoods_list[j].variance for j in ind ])
if full_cov:
var += np.eye(var.shape[0])*_variance
else:
var += _variance
return mu, var
def predictive_variance(self, mu, sigma, Y_metadata):
_variance = self.gaussian_variance(Y_metadata)
return _variance + sigma**2
def predictive_quantiles(self, mu, var, quantiles, Y_metadata):
ind = Y_metadata['output_index'].flatten()
outputs = np.unique(ind)
Q = np.zeros( (mu.size,len(quantiles)) )
for j in outputs:
q = self.likelihoods_list[j].predictive_quantiles(mu[ind==j,:],
var[ind==j,:],quantiles,Y_metadata=None)
Q[ind==j,:] = np.hstack(q)
return [q[:,None] for q in Q.T]
def samples(self, gp, Y_metadata):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
N1, N2 = gp.shape
Ysim = np.zeros((N1,N2))
ind = Y_metadata['output_index'].flatten()
for j in np.unique(ind):
flt = ind==j
gp_filtered = gp[flt,:]
n1 = gp_filtered.shape[0]
lik = self.likelihoods_list[j]
_ysim = np.array([np.random.normal(lik.gp_link.transf(gpj), scale=np.sqrt(lik.variance), size=1) for gpj in gp_filtered.flatten()])
Ysim[flt,:] = _ysim.reshape(n1,N2)
return Ysim
|
bsd-3-clause
|
sdgathman/cjdns
|
node_build/dependencies/libuv/build/gyp/test/ninja/action_dependencies/gyptest-action-dependencies.py
|
54
|
1972
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that building an object file correctly depends on running actions in
dependent targets, but not the targets themselves.
"""
import os
import sys
import TestGyp
# NOTE(piman): This test will not work with other generators because:
# - it explicitly tests the optimization, which is not implemented (yet?) on
# other generators
# - it relies on the exact path to output object files, which is generator
# dependent, and actually, relies on the ability to build only that object file,
# which I don't think is available on all generators.
# TODO(piman): Extend to other generators when possible.
test = TestGyp.TestGyp(formats=['ninja'])
# xcode-ninja doesn't support building single object files by design.
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('action_dependencies.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
objext = '.obj' if sys.platform == 'win32' else '.o'
test.build('action_dependencies.gyp',
os.path.join('obj', 'b.b' + objext),
chdir=chdir)
# The 'a' actions should be run (letting b.c compile), but the a static library
# should not be built.
test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist(os.path.join('obj', 'b.b' + objext), chdir=chdir)
test.build('action_dependencies.gyp',
os.path.join('obj', 'c.c' + objext),
chdir=chdir)
# 'a' and 'b' should be built, so that the 'c' action succeeds, letting c.c
# compile
test.built_file_must_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist('b', type=test.EXECUTABLE, chdir=chdir)
test.built_file_must_exist(os.path.join('obj', 'c.c' + objext), chdir=chdir)
test.pass_test()
|
gpl-3.0
|
Distrotech/bzr
|
bzrlib/tests/test_reconcile.py
|
2
|
2802
|
# Copyright (C) 2006, 2008-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for reconiliation behaviour that is repository independent."""
from bzrlib import (
bzrdir,
errors,
tests,
)
from bzrlib.reconcile import reconcile, Reconciler
from bzrlib.tests import per_repository
class TestWorksWithSharedRepositories(per_repository.TestCaseWithRepository):
def test_reweave_empty(self):
# we want a repo capable format
parent = bzrdir.BzrDirMetaFormat1().initialize('.')
parent.create_repository(shared=True)
parent.root_transport.mkdir('child')
child = bzrdir.BzrDirMetaFormat1().initialize('child')
self.assertRaises(errors.NoRepositoryPresent, child.open_repository)
reconciler = Reconciler(child)
reconciler.reconcile()
# smoke test for reconcile appears to work too.
reconcile(child)
# no inconsistent parents should have been found
# but the values should have been set.
self.assertEqual(0, reconciler.inconsistent_parents)
# and no garbage inventories
self.assertEqual(0, reconciler.garbage_inventories)
class TestReconciler(tests.TestCaseWithTransport):
def test_reconciler_with_no_branch(self):
repo = self.make_repository('repo')
reconciler = Reconciler(repo.bzrdir)
reconciler.reconcile()
# no inconsistent parents should have been found
# but the values should have been set.
self.assertEqual(0, reconciler.inconsistent_parents)
# and no garbage inventories
self.assertEqual(0, reconciler.garbage_inventories)
self.assertIs(None, reconciler.fixed_branch_history)
def test_reconciler_finds_branch(self):
a_branch = self.make_branch('a_branch')
reconciler = Reconciler(a_branch.bzrdir)
reconciler.reconcile()
# It should have checked the repository, and the branch
self.assertEqual(0, reconciler.inconsistent_parents)
self.assertEqual(0, reconciler.garbage_inventories)
self.assertIs(False, reconciler.fixed_branch_history)
|
gpl-2.0
|
aspidites/django
|
tests/multiple_database/models.py
|
282
|
2472
|
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
@python_2_unicode_compatible
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, models.SET_NULL, null=True, related_name='edited')
reviews = GenericRelation(Review)
pages = models.IntegerField(default=100)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person, models.CASCADE)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, models.SET_NULL, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
|
bsd-3-clause
|
ryfx/modrana
|
modules/mod_cron.py
|
1
|
13149
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A timing and scheduling module for modRana.
#----------------------------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from __future__ import with_statement # for python 2.5
from modules.base_module import RanaModule
import threading
# only import GKT libs if GTK GUI is used
from core import gs
if gs.GUIString == "GTK":
import gobject
elif gs.GUIString.lower() == "qt5":
import pyotherside
elif gs.GUIString.lower() == "qml":
from PySide import QtCore
def getModule(*args, **kwargs):
"""
return module version corresponding to the currently used toolkit
(eq. one that uses the timers provided by the toolkit
- gobject.timeout_add, QTimer, etc.
"""
if gs.GUIString.lower() == 'qt5':
return CronQt5(*args, **kwargs)
if gs.GUIString == 'QML':
return CronQt(*args, **kwargs)
elif gs.GUIString == 'GTK': # GTK for now
return CronGTK(*args, **kwargs)
else:
return Cron(*args, **kwargs)
class Cron(RanaModule):
"""A timing and scheduling module for modRana"""
# -> this is an abstract class
# that specifies and interface for concrete implementations
#
# Why is there a special module for timing ?
# The reason is twofold:
# Toolkit independence and power saving/monitoring.
#
# If all timing calls go through this module,
# the underlying engine (currently glibs gobject)
# can be more easily changed than rewriting code everywhere.
#
# Also, modRana targets mobile devices with limited power budget.
# If all timing goes through this module, rogue modules many frequent
# timers can be easily identified.
# It might be also possible to stop or pause some/all of the timers
# after a period of inactivity, or some such.
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
def addIdle(self, callback, args):
"""add a callback that is called once the main loop becomes idle"""
pass
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events"""
if not args: args = []
pass
def _doTimeout(self, timeoutId, callback, args):
"""wrapper about the timeout function, which makes it possible to check
if a timeout is still in progress from the "outside"
- like this, the underlying timer should also be easily replaceable
"""
if callback(*args) == False:
# the callback returned False,
# that means it wants to quit the timeout
# stop tracking
self.removeTimeout(timeoutId)
# propagate the quit signal
return False
else:
return True # just run the loop
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
pass
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
pass
class CronGTK(Cron):
"""A GTK timing and scheduling module for modRana"""
def __init__(self, *args, **kwargs):
Cron.__init__(self, *args, **kwargs)
gui = self.modrana.gui
self.nextId = 0
# cronTab and activeIds should be in sync
self.cronTab = {"idle": {}, "timeout": {}}
self.dataLock = threading.RLock()
def _getID(self):
"""get an unique id for timing related request that can be
returned to the callers and used as a handle
TODO: can int overflow in Python ?"""
timeoutId = self.nextId
self.nextId += 1
return timeoutId
def addIdle(self, callback, args):
"""add a callback that is called once the main loop becomes idle"""
gobject.idle_add(callback, *args)
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events"""
if not args: args = []
timeoutId = self._getID()
realId = gobject.timeout_add(timeout, self._doTimeout, timeoutId, callback, args)
timeoutTuple = (callback, args, timeout, caller, description, realId)
with self.dataLock:
self.cronTab['timeout'][timeoutId] = timeoutTuple
return timeoutId
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
(callback, args, timeout, caller, description, realId) = self.cronTab['timeout'][timeoutId]
del self.cronTab['timeout'][timeoutId]
gobject.source_remove(realId)
else:
self.log.error("can't remove timeout, wrong id: %s", timeoutId)
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
# load the timeout description
(callback, args, timeout, caller, description, realId) = self.cronTab['timeout'][timeoutId]
gobject.source_remove(realId) # remove the old timeout
realId = gobject.timeout_add(newTimeout, self._doTimeout, timeoutId, callback, args) # new timeout
# update the timeout description
self.cronTab['timeout'][timeoutId] = (callback, args, newTimeout, caller, description, realId)
else:
self.log.error("can't modify timeout, wrong id: %s", timeoutId)
class CronQt(Cron):
"""A Qt timing and scheduling module for modRana"""
def __init__(self, *args, **kwargs):
Cron.__init__(self, *args, **kwargs)
self.nextId = 0
# cronTab and activeIds should be in sync
self.cronTab = {"idle": {}, "timeout": {}}
self.dataLock = threading.RLock()
def _getID(self):
"""get an unique id for timing related request that can be
returned to the callers and used as a handle
TODO: can int overflow in Python ?
TODO: id recycling ?"""
with self.dataLock:
timeoutId = self.nextId
self.nextId += 1
return timeoutId
def addIdle(self, callback, args):
"""add a callback that is called once the main loop becomes idle"""
pass
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events
"""
if not args: args = []
# create and configure the timer
timer = QtCore.QTimer()
# timer.setInterval(timeout)
timeoutId = self._getID()
# create a new function that calls the callback processing function
# with thh provided arguments"""
handleThisTimeout = lambda: self._doTimeout(timeoutId, callback, args)
# connect this function to the timeout
timer.timeout.connect(handleThisTimeout)
# store timer data
timeoutTuple = (callback, args, timeout, caller, description, timeoutId, timer)
with self.dataLock:
self.cronTab['timeout'][timeoutId] = timeoutTuple
# start the timer
timer.start(timeout)
# return the id
return timeoutId
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
(callback, args, timeout, caller, description, timeoutId, timer) = self.cronTab['timeout'][timeoutId]
timer.stop()
del self.cronTab['timeout'][timeoutId]
else:
self.log.error("can't remove timeout, wrong id: %s", timeoutId)
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
# load the timeout data
(callback, args, timeout, caller, description, timeoutId, timer) = self.cronTab['timeout'][timeoutId]
# reset the timeout duration
timer.setInterval(newTimeout)
# update the timeout data
self.cronTab['timeout'][timeoutId] = (callback, args, newTimeout, caller, description, timeoutId, timer)
else:
self.log.error("can't modify timeout, wrong id: %s", timeoutId)
class CronQt5(Cron):
"""A Qt 5 timing and scheduling module for modRana"""
def __init__(self, *args, **kwargs):
Cron.__init__(self, *args, **kwargs)
self.nextId = 0
# cronTab and activeIds should be in sync
self.cronTab = {"idle": {}, "timeout": {}}
self.dataLock = threading.RLock()
def _timerTriggered(self, timerId):
with self.dataLock:
timerTuple = self.cronTab['timeout'].get(timerId)
if timerTuple:
call = timerTuple[6]
call()
else:
self.log.error("unknown timer triggered: %s", timerId)
def _getID(self):
"""get an unique id for timing related request that can be
returned to the callers and used as a handle
TODO: can int overflow in Python ?
TODO: id recycling ?"""
with self.dataLock:
timeoutId = self.nextId
self.nextId += 1
return timeoutId
def addTimeout(self, callback, timeout, caller, description, args=None):
"""the callback will be called timeout + time needed to execute the callback
and other events
"""
if not args: args = []
timeoutId = self._getID()
self.log.debug("qt5: adding a %s ms timeout from %s as %s", timeout, caller, timeoutId)
# create a new function that calls the callback processing function
# with thh provided arguments
handleThisTimeout = lambda: self._doTimeout(timeoutId, callback, args)
# store timer data
# - we don't actually have a Python-side timer object, so we just store
# the callback function and tell QML to add the timer
timeoutTuple = (callback, args, timeout, caller, description, timeoutId, handleThisTimeout)
with self.dataLock:
self.cronTab['timeout'][timeoutId] = timeoutTuple
pyotherside.send("addTimer", timeoutId, timeout)
# return the id
return timeoutId
def removeTimeout(self, timeoutId):
"""remove timeout with a given id"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
caller = self.cronTab['timeout'][timeoutId][3]
del self.cronTab['timeout'][timeoutId]
pyotherside.send("removeTimer", timeoutId)
self.log.debug("qt5: timeout %s from %s has been removed", timeoutId, caller)
else:
self.log.error("can't remove timeout, wrong id: %s", timeoutId)
def modifyTimeout(self, timeoutId, newTimeout):
"""modify the duration of a timeout in progress"""
with self.dataLock:
if timeoutId in self.cronTab['timeout'].keys():
# we don't store the timeout value Python-side,
# so we just notify QML about the change
pyotherside.send("modifyTimerTimeout", timeoutId, newTimeout)
else:
self.log.error("can't modify timeout, wrong id: %s", timeoutId)
# def _addInfo(self, id, info):
# """add a message for a timeout handler to read"""
# with self.dataLock:
# if id in self.info:
# self.info[id].append(info) # add message to queue
# else:
# self.info[id] = [info] # create message queue
#
# def _popInfo(self, id):
# with self.dataLock:
# if id in self.info:
# try:
# return self.info[id].pop() # try to return the message
# except IndexError:
# del self.info[id] # message queue empty, delete it
# return None
# else:
# return None
|
gpl-3.0
|
samueldotj/TeeRISC-Simulator
|
util/o3-pipeview.py
|
58
|
15788
|
#! /usr/bin/env python
# Copyright (c) 2011 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Giacomo Gabrielli
# Pipeline activity viewer for the O3 CPU model.
import optparse
import os
import sys
import copy
# Temporary storage for instructions. The queue is filled in out-of-order
# until it reaches 'max_threshold' number of instructions. It is then
# sorted out and instructions are printed out until their number drops to
# 'min_threshold'.
# It is assumed that the instructions are not out of order for more then
# 'min_threshold' places - otherwise they will appear out of order.
insts = {
'queue': [] , # Instructions to print.
'max_threshold':2000, # Instructions are sorted out and printed when
# their number reaches this threshold.
'min_threshold':1000, # Printing stops when this number is reached.
'sn_start':0, # The first instruction seq. number to be printed.
'sn_stop':0, # The last instruction seq. number to be printed.
'tick_start':0, # The first tick to be printed
'tick_stop':0, # The last tick to be printed
'tick_drift':2000, # Used to calculate the start and the end of main
# loop. We assume here that the instructions are not
# out of order for more then 2000 CPU ticks,
# otherwise the print may not start/stop
# at the time specified by tick_start/stop.
'only_committed':0, # Set if only committed instructions are printed.
}
def process_trace(trace, outfile, cycle_time, width, color, timestamps,
committed_only, store_completions, start_tick, stop_tick, start_sn, stop_sn):
global insts
insts['sn_start'] = start_sn
insts['sn_stop'] = stop_sn
insts['tick_start'] = start_tick
insts['tick_stop'] = stop_tick
insts['tick_drift'] = insts['tick_drift'] * cycle_time
insts['only_committed'] = committed_only
line = None
fields = None
# Skip lines up to the starting tick
if start_tick != 0:
while True:
line = trace.readline()
if not line: return
fields = line.split(':')
if fields[0] != 'O3PipeView': continue
if int(fields[2]) >= start_tick: break
elif start_sn != 0:
while True:
line = trace.readline()
if not line: return
fields = line.split(':')
if fields[0] != 'O3PipeView': continue
if fields[1] == 'fetch' and int(fields[5]) >= start_sn: break
else:
line = trace.readline()
if not line: return
fields = line.split(':')
# Skip lines up to next instruction fetch
while fields[0] != 'O3PipeView' or fields[1] != 'fetch':
line = trace.readline()
if not line: return
fields = line.split(':')
# Print header
outfile.write('// f = fetch, d = decode, n = rename, p = dispatch, '
'i = issue, c = complete, r = retire')
if store_completions:
outfile.write(', s = store-complete')
outfile.write('\n\n')
outfile.write(' ' + 'timeline'.center(width) +
' ' + 'tick'.center(15) +
' ' + 'pc.upc'.center(12) +
' ' + 'disasm'.ljust(25) +
' ' + 'seq_num'.center(10))
if timestamps:
outfile.write('timestamps'.center(25))
outfile.write('\n')
# Region of interest
curr_inst = {}
while True:
if fields[0] == 'O3PipeView':
curr_inst[fields[1]] = int(fields[2])
if fields[1] == 'fetch':
if ((stop_tick > 0 and int(fields[2]) > stop_tick+insts['tick_drift']) or
(stop_sn > 0 and int(fields[5]) > (stop_sn+insts['max_threshold']))):
print_insts(outfile, cycle_time, width, color, timestamps, 0)
return
(curr_inst['pc'], curr_inst['upc']) = fields[3:5]
curr_inst['sn'] = int(fields[5])
curr_inst['disasm'] = ' '.join(fields[6][:-1].split())
elif fields[1] == 'retire':
if curr_inst['retire'] == 0:
curr_inst['disasm'] = '-----' + curr_inst['disasm']
if store_completions:
curr_inst[fields[3]] = int(fields[4])
queue_inst(outfile, curr_inst, cycle_time, width, color, timestamps, store_completions)
line = trace.readline()
if not line:
print_insts(outfile, cycle_time, width, color, timestamps, store_completions, 0)
return
fields = line.split(':')
#Sorts out instructions according to sequence number
def compare_by_sn(a, b):
return cmp(a['sn'], b['sn'])
# Puts new instruction into the print queue.
# Sorts out and prints instructions when their number reaches threshold value
def queue_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions):
global insts
l_copy = copy.deepcopy(inst)
insts['queue'].append(l_copy)
if len(insts['queue']) > insts['max_threshold']:
print_insts(outfile, cycle_time, width, color, timestamps, store_completions, insts['min_threshold'])
# Sorts out and prints instructions in print queue
def print_insts(outfile, cycle_time, width, color, timestamps, store_completions, lower_threshold):
global insts
insts['queue'].sort(compare_by_sn)
while len(insts['queue']) > lower_threshold:
print_item=insts['queue'].pop(0)
# As the instructions are processed out of order the main loop starts
# earlier then specified by start_sn/tick and finishes later then what
# is defined in stop_sn/tick.
# Therefore, here we have to filter out instructions that reside out of
# the specified boundaries.
if (insts['sn_start'] > 0 and print_item['sn'] < insts['sn_start']):
continue; # earlier then the starting sequence number
if (insts['sn_stop'] > 0 and print_item['sn'] > insts['sn_stop']):
continue; # later then the ending sequence number
if (insts['tick_start'] > 0 and print_item['fetch'] < insts['tick_start']):
continue; # earlier then the starting tick number
if (insts['tick_stop'] > 0 and print_item['fetch'] > insts['tick_stop']):
continue; # later then the ending tick number
if (insts['only_committed'] != 0 and print_item['retire'] == 0):
continue; # retire is set to zero if it hasn't been completed
print_inst(outfile, print_item, cycle_time, width, color, timestamps, store_completions)
# Prints a single instruction
def print_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions):
if color:
from m5.util.terminal import termcap
else:
from m5.util.terminal import no_termcap as termcap
# Pipeline stages
stages = [{'name': 'fetch',
'color': termcap.Blue + termcap.Reverse,
'shorthand': 'f'},
{'name': 'decode',
'color': termcap.Yellow + termcap.Reverse,
'shorthand': 'd'},
{'name': 'rename',
'color': termcap.Magenta + termcap.Reverse,
'shorthand': 'n'},
{'name': 'dispatch',
'color': termcap.Green + termcap.Reverse,
'shorthand': 'p'},
{'name': 'issue',
'color': termcap.Red + termcap.Reverse,
'shorthand': 'i'},
{'name': 'complete',
'color': termcap.Cyan + termcap.Reverse,
'shorthand': 'c'},
{'name': 'retire',
'color': termcap.Blue + termcap.Reverse,
'shorthand': 'r'}
]
if store_completions:
stages.append(
{'name': 'store',
'color': termcap.Yellow + termcap.Reverse,
'shorthand': 's'})
# Print
time_width = width * cycle_time
base_tick = (inst['fetch'] / time_width) * time_width
# Find out the time of the last event - it may not
# be 'retire' if the instruction is not comlpeted.
last_event_time = max(inst['fetch'], inst['decode'],inst['rename'],
inst['dispatch'],inst['issue'], inst['complete'], inst['retire'])
if store_completions:
last_event_time = max(last_event_time, inst['store'])
# Timeline shorter then time_width is printed in compact form where
# the print continues at the start of the same line.
if ((last_event_time - inst['fetch']) < time_width):
num_lines = 1 # compact form
else:
num_lines = ((last_event_time - base_tick) / time_width) + 1
curr_color = termcap.Normal
# This will visually distinguish completed and abandoned intructions.
if inst['retire'] == 0: dot = '=' # abandoned instruction
else: dot = '.' # completed instruction
for i in range(num_lines):
start_tick = base_tick + i * time_width
end_tick = start_tick + time_width
if num_lines == 1: # compact form
end_tick += (inst['fetch'] - base_tick)
events = []
for stage_idx in range(len(stages)):
tick = inst[stages[stage_idx]['name']]
if tick != 0:
if tick >= start_tick and tick < end_tick:
events.append((tick % time_width,
stages[stage_idx]['name'],
stage_idx, tick))
events.sort()
outfile.write('[')
pos = 0
if num_lines == 1 and events[0][2] != 0: # event is not fetch
curr_color = stages[events[0][2] - 1]['color']
for event in events:
if (stages[event[2]]['name'] == 'dispatch' and
inst['dispatch'] == inst['issue']):
continue
outfile.write(curr_color + dot * ((event[0] / cycle_time) - pos))
outfile.write(stages[event[2]]['color'] +
stages[event[2]]['shorthand'])
if event[3] != last_event_time: # event is not the last one
curr_color = stages[event[2]]['color']
else:
curr_color = termcap.Normal
pos = (event[0] / cycle_time) + 1
outfile.write(curr_color + dot * (width - pos) + termcap.Normal +
']-(' + str(base_tick + i * time_width).rjust(15) + ') ')
if i == 0:
outfile.write('%s.%s %s [%s]' % (
inst['pc'].rjust(10),
inst['upc'],
inst['disasm'].ljust(25),
str(inst['sn']).rjust(10)))
if timestamps:
outfile.write(' f=%s, r=%s' % (inst['fetch'], inst['retire']))
outfile.write('\n')
else:
outfile.write('...'.center(12) + '\n')
def validate_range(my_range):
my_range = [int(i) for i in my_range.split(':')]
if (len(my_range) != 2 or
my_range[0] < 0 or
my_range[1] > 0 and my_range[0] >= my_range[1]):
return None
return my_range
def main():
# Parse options
usage = ('%prog [OPTION]... TRACE_FILE')
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-o',
dest='outfile',
default=os.path.join(os.getcwd(), 'o3-pipeview.out'),
help="output file (default: '%default')")
parser.add_option(
'-t',
dest='tick_range',
default='0:-1',
help="tick range (default: '%default'; -1 == inf.)")
parser.add_option(
'-i',
dest='inst_range',
default='0:-1',
help="instruction range (default: '%default'; -1 == inf.)")
parser.add_option(
'-w',
dest='width',
type='int', default=80,
help="timeline width (default: '%default')")
parser.add_option(
'--color',
action='store_true', default=False,
help="enable colored output (default: '%default')")
parser.add_option(
'-c', '--cycle-time',
type='int', default=1000,
help="CPU cycle time in ticks (default: '%default')")
parser.add_option(
'--timestamps',
action='store_true', default=False,
help="print fetch and retire timestamps (default: '%default')")
parser.add_option(
'--only_committed',
action='store_true', default=False,
help="display only committed (completed) instructions (default: '%default')")
parser.add_option(
'--store_completions',
action='store_true', default=False,
help="additionally display store completion ticks (default: '%default')")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
sys.exit(1)
tick_range = validate_range(options.tick_range)
if not tick_range:
parser.error('invalid range')
sys.exit(1)
inst_range = validate_range(options.inst_range)
if not inst_range:
parser.error('invalid range')
sys.exit(1)
# Process trace
print 'Processing trace... ',
with open(args[0], 'r') as trace:
with open(options.outfile, 'w') as out:
process_trace(trace, out, options.cycle_time, options.width,
options.color, options.timestamps,
options.only_committed, options.store_completions,
*(tick_range + inst_range))
print 'done!'
if __name__ == '__main__':
sys.path.append(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'src', 'python'))
main()
|
bsd-3-clause
|
drewp/tahoe-lafs
|
setuptools-0.6c16dev3.egg/setuptools/command/develop.py
|
7
|
5478
|
from setuptools.command.easy_install import easy_install
from distutils.util import convert_path
from pkg_resources import Distribution, PathMetadata, normalize_path
from distutils import log
from distutils.errors import *
import sys, os, setuptools, glob
class develop(easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
self.old_run()
if sys.platform == "win32":
from setuptools.command.scriptsetup import do_scriptsetup
do_scriptsetup()
def old_run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
raise DistutilsError(
"Please rename %r to %r before using 'develop'"
% (ei.egg_info, ei.broken_egg_info)
)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link')
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = normalize_path(self.egg_base)
if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to "+target
)
# Make a distribution for the package's source
self.dist = Distribution(
target,
PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name = ei.egg_name
)
p = self.egg_base.replace(os.sep,'/')
if p!= os.curdir:
p = '../' * (p.count('/')+1)
self.setup_path = p
p = normalize_path(os.path.join(self.install_dir, self.egg_path, p))
if p != normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", p, normalize_path(os.curdir))
def install_for_development(self):
# Ensure metadata is up-to-date
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
f = open(self.egg_link,"w")
f.write(self.egg_path + "\n" + self.setup_path)
f.close()
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
contents = [line.rstrip() for line in file(self.egg_link)]
if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self,dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
f = open(script_path,'rU')
script_text = f.read()
f.close()
self.install_script(dist, script_name, script_text, script_path)
|
gpl-2.0
|
odoousers2014/LibrERP
|
account_financial_report_webkit/wizard/general_ledger_wizard.py
|
2
|
6208
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright Camptocamp SA 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields, osv
class AccountReportGeneralLedgerWizard(osv.osv_memory):
"""Will launch general ledger report and pass requiered args"""
_inherit = "account.common.account.report"
_name = "general.ledger.webkit"
_description = "General Ledger Report"
def _get_account_ids(self, cr, uid, context=None):
res = False
if context.get('active_model', False) == 'account.account' and context.get('active_ids', False):
res = context['active_ids']
return res
_columns = {
'amount_currency': fields.boolean("With Currency",
help="It adds the currency column"),
'display_account': fields.selection([('bal_all', 'All'),
('bal_mix', 'With transactions or non zero balance')],
'Display accounts',
required=True),
'account_ids': fields.many2many('account.account', string='Filter on accounts',
help="""Only selected accounts will be printed. Leave empty to print all accounts."""),
'centralize': fields.boolean('Activate Centralization', help='Uncheck to display all the details of centralized accounts.')
}
_defaults = {
'amount_currency': False,
'display_account': 'bal_mix',
'account_ids': _get_account_ids,
'centralize': True,
}
def _check_fiscalyear(self, cr, uid, ids, context=None):
obj = self.read(cr, uid, ids[0], ['fiscalyear_id', 'filter'], context=context)
if not obj['fiscalyear_id'] and obj['filter'] == 'filter_no':
return False
return True
_constraints = [
(_check_fiscalyear, 'When no Fiscal year is selected, you must choose to filter by periods or by date.', ['filter']),
]
def pre_print_report(self, cr, uid, ids, data, context=None):
data = super(AccountReportGeneralLedgerWizard, self).pre_print_report(cr, uid, ids, data, context)
if context is None:
context = {}
# will be used to attach the report on the main account
data['ids'] = [data['form']['chart_account_id']]
vals = self.read(cr, uid, ids,
['amount_currency',
'display_account',
'account_ids',
'centralize'],
context=context)[0]
data['form'].update(vals)
return data
def onchange_filter(self, cr, uid, ids, filter='filter_no', fiscalyear_id=False, context=None):
res = {}
if filter == 'filter_no':
res['value'] = {'period_from': False, 'period_to': False, 'date_from': False ,'date_to': False}
if filter == 'filter_date':
if fiscalyear_id:
fyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_id, context=context)
date_from = fyear.date_start
date_to = fyear.date_stop > time.strftime('%Y-%m-%d') and time.strftime('%Y-%m-%d') or fyear.date_stop
else:
date_from, date_to = time.strftime('%Y-01-01'), time.strftime('%Y-%m-%d')
res['value'] = {'period_from': False, 'period_to': False, 'date_from': date_from, 'date_to': date_to}
if filter == 'filter_period' and fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_start ASC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
AND COALESCE(p.special, FALSE) = FALSE
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = end_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period, 'date_from': False, 'date_to': False}
return res
def _print_report(self, cursor, uid, ids, data, context=None):
context = context or {}
# we update form with display account value
data = self.pre_print_report(cursor, uid, ids, data, context=context)
return {'type': 'ir.actions.report.xml',
'report_name': 'account.account_report_general_ledger_webkit',
'datas': data}
AccountReportGeneralLedgerWizard()
|
agpl-3.0
|
john-wang-metro/metro-openerp
|
metro_mrp/__openerp__.py
|
2
|
2231
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Metro MRP',
'version': '1.0',
'category': 'Metro',
'description': """
Metro MRP Extension:
1.Add CNC Work Order
(Ported to OpenERP v 7.0 by Metro Tower Trucks.
""",
'author': 'Metro Tower Trucks',
'website': 'http://www.metrotowtrucks.com',
'depends': ["metro", "sale", "metro_stock", "product_manufacturer", "document", "mrp_operations", "procurement", "mrp","project"],
'data': [
'security/ir.model.access.csv',
'security/mrp_security.xml',
'res_config_view.xml',
'wizard/work_order_cnc_line_done_view.xml',
'wizard/wo_material_request_view.xml',
'wizard/mo_actions_view.xml',
'work_order_cnc_view.xml',
'mrp_view.xml',
'mrp_sequence.xml',
'wizard/add_common_bom_view.xml',
'wizard/bom_import_view.xml',
'mrp_workflow.xml',
'pdm.xml',
'procurement_view.xml'
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
thispc/download-manager
|
module/plugins/crypter/CryptCat.py
|
8
|
1949
|
# -*- coding: utf-8 -*-
import re
from ..internal.SimpleCrypter import SimpleCrypter
class CryptCat(SimpleCrypter):
__name__ = "CryptCat"
__type__ = "crypter"
__version__ = "0.04"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?crypt\.cat/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """crypt.cat decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
OFFLINE_PATTERN = r'Folder not available!'
LINK_PATTERN = r'<input .+?readonly="" value="\s*(.+?)" type="text">'
def get_links(self):
baseurl = self.req.http.lastEffectiveURL
url, inputs = self.parse_html_form()
if ">Enter your password.<" in self.data:
password = self.get_password()
if not password:
self.fail(_("Password required"))
inputs['Pass1'] = password
elif "Enter Captcha" in self.data:
m = re.search(r'<img src="(.+?)"', self.data)
if m is not None:
captcha_code = self.captcha.decrypt(
m.group(1), input_type="jpeg")
inputs['security_code'] = captcha_code
else:
return []
else:
return []
self.data = self.load(baseurl, post=inputs, ref=baseurl)
if "You have entered an incorrect password." in self.data:
self.fail(_("Wrong password"))
elif "Your filled the captcha wrongly!" in self.data:
self.retry_captcha()
return re.findall(self.LINK_PATTERN, self.data)
|
gpl-3.0
|
simonmonk/squid
|
build/lib.linux-armv6l-2.7/squid.py
|
1
|
1424
|
#squid.py Library
import RPi.GPIO as GPIO
import time
WHITE = (30, 30, 30)
OFF = (0, 0, 0)
RED = (100, 0, 0)
GREEN = (0, 100, 0)
BLUE = (0, 0, 100)
YELLOW = (50, 50, 0)
PURPLE = (50, 0, 50)
CYAN = (0, 50, 50)
class Squid:
RED_PIN = 0
GREEN_PIN = 0
BLUE_PIN = 0
red_pwm = 0
green_pwm = 0
blue_pwm = 0
def __init__(self, red_pin, green_pin, blue_pin):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
self.RED_PIN, self.GREEN_PIN, self.BLUE_PIN = red_pin, green_pin, blue_pin
GPIO.setup(self.RED_PIN, GPIO.OUT)
self.red_pwm = GPIO.PWM(self.RED_PIN, 500)
self.red_pwm.start(0)
GPIO.setup(self.GREEN_PIN, GPIO.OUT)
self.green_pwm = GPIO.PWM(self.GREEN_PIN, 500)
self.green_pwm.start(0)
GPIO.setup(self.BLUE_PIN, GPIO.OUT)
self.blue_pwm = GPIO.PWM(self.BLUE_PIN, 500)
self.blue_pwm.start(0)
def set_red(self, brightness):
self.red_pwm.ChangeDutyCycle(brightness)
def set_green(self, brightness):
self.green_pwm.ChangeDutyCycle(brightness)
def set_blue(self, brightness):
self.blue_pwm.ChangeDutyCycle(brightness)
def set_color(self, (r, g, b), brightness = 100):
self.set_red(r * brightness / 100)
self.set_green(g * brightness / 100)
self.set_blue(b * brightness / 100)
|
mit
|
hyowon/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_mock.py
|
496
|
5168
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for mock module."""
import Queue
import threading
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import mock
class MockConnTest(unittest.TestCase):
"""A unittest for MockConn class."""
def setUp(self):
self._conn = mock.MockConn('ABC\r\nDEFG\r\n\r\nHIJK')
def test_readline(self):
self.assertEqual('ABC\r\n', self._conn.readline())
self.assertEqual('DEFG\r\n', self._conn.readline())
self.assertEqual('\r\n', self._conn.readline())
self.assertEqual('HIJK', self._conn.readline())
self.assertEqual('', self._conn.readline())
def test_read(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n\r\nHI', self._conn.read(9))
self.assertEqual('JK', self._conn.read(10))
self.assertEqual('', self._conn.read(10))
def test_read_and_readline(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n', self._conn.readline())
self.assertEqual('\r\nHIJK', self._conn.read(9))
self.assertEqual('', self._conn.readline())
def test_write(self):
self._conn.write('Hello\r\n')
self._conn.write('World\r\n')
self.assertEqual('Hello\r\nWorld\r\n', self._conn.written_data())
class MockBlockingConnTest(unittest.TestCase):
"""A unittest for MockBlockingConn class."""
def test_read(self):
"""Tests that data put to MockBlockingConn by put_bytes method can be
read from it.
"""
class LineReader(threading.Thread):
"""A test class that launches a thread, calls readline on the
specified conn repeatedly and puts the read data to the specified
queue.
"""
def __init__(self, conn, queue):
threading.Thread.__init__(self)
self._queue = queue
self._conn = conn
self.setDaemon(True)
self.start()
def run(self):
while True:
data = self._conn.readline()
self._queue.put(data)
conn = mock.MockBlockingConn()
queue = Queue.Queue()
reader = LineReader(conn, queue)
self.failUnless(queue.empty())
conn.put_bytes('Foo bar\r\n')
read = queue.get()
self.assertEqual('Foo bar\r\n', read)
class MockTableTest(unittest.TestCase):
"""A unittest for MockTable class."""
def test_create_from_dict(self):
table = mock.MockTable({'Key': 'Value'})
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_list(self):
table = mock.MockTable([('Key', 'Value')])
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_tuple(self):
table = mock.MockTable((('Key', 'Value'),))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_set_and_get(self):
table = mock.MockTable()
self.assertEqual(None, table.get('Key'))
table['Key'] = 'Value'
self.assertEqual('Value', table.get('Key'))
self.assertEqual('Value', table.get('key'))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['Key'])
self.assertEqual('Value', table['key'])
self.assertEqual('Value', table['KEY'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
mpl-2.0
|
hlieberman/debian-ansible
|
docsite/build-site.py
|
35
|
3249
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of the Ansible Documentation
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__docformat__ = 'restructuredtext'
import os
import sys
import traceback
try:
from sphinx.application import Sphinx
except ImportError:
print "#################################"
print "Dependency missing: Python Sphinx"
print "#################################"
sys.exit(1)
import os
class SphinxBuilder(object):
"""
Creates HTML documentation using Sphinx.
"""
def __init__(self):
"""
Run the DocCommand.
"""
print "Creating html documentation ..."
try:
buildername = 'html'
outdir = os.path.abspath(os.path.join(os.getcwd(), "htmlout"))
# Create the output directory if it doesn't exist
if not os.access(outdir, os.F_OK):
os.mkdir(outdir)
doctreedir = os.path.join('./', '.doctrees')
confdir = os.path.abspath('./')
srcdir = os.path.abspath('rst')
freshenv = True
# Create the builder
app = Sphinx(srcdir,
confdir,
outdir,
doctreedir,
buildername,
{},
sys.stdout,
sys.stderr,
freshenv)
app.builder.build_all()
except ImportError, ie:
traceback.print_exc()
except Exception, ex:
print >> sys.stderr, "FAIL! exiting ... (%s)" % ex
def build_docs(self):
self.app.builder.build_all()
def build_rst_docs():
docgen = SphinxBuilder()
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
print "This script builds the html documentation from rst/asciidoc sources.\n"
print " Run 'make docs' to build everything."
print " Run 'make viewdocs' to build and then preview in a web browser."
sys.exit(0)
# The 'htmldocs' make target will call this scrip twith the 'rst'
# parameter' We don't need to run the 'htmlman' target then.
if "rst" in sys.argv:
build_rst_docs()
else:
# By default, preform the rst->html transformation and then
# the asciidoc->html trasnformation
build_rst_docs()
if "view" in sys.argv:
import webbrowser
if not webbrowser.open('htmlout/index.html'):
print >> sys.stderr, "Could not open on your webbrowser."
|
gpl-3.0
|
bestvibes/neo4j-social-network
|
mac_env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py
|
2755
|
9226
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
mit
|
Maximilian-Reuter/SickRage-1
|
lib/twilio/rest/resources/pricing/voice.py
|
35
|
4195
|
from .. import NextGenInstanceResource, NextGenListResource
class Voice(object):
"""Holds references to the Voice pricing resources."""
name = "Voice"
key = "voice"
def __init__(self, base_uri, auth, timeout):
self.uri = "%s/Voice" % base_uri
self.countries = VoiceCountries(self.uri, auth, timeout)
self.numbers = VoiceNumbers(self.uri, auth, timeout)
class VoiceCountry(NextGenInstanceResource):
"""Pricing information for Twilio Voice services in a specific country.
.. attribute:: country
The full name of the country.
.. attribute:: iso_country
The country's 2-character ISO code.
.. attribute:: price_unit
The currency in which prices are measured, in ISO 4127 format
(e.g. 'usd', 'eur', 'jpy').
.. attribute:: outbound_prefix_prices
A list of dicts containing pricing information as follows:
- prefix_list: a list of number prefixes in the requested country
that have the same pricing
- friendly_name: a descriptive name for this prefix set
- call_base_price: the base price per minute for calls to numbers
matching any of these prefixes
- call_current_price: the current price per minute (including
volume discounts, etc.) for your account to make calls to
numbers matching these prefixes
.. attribute:: inbound_call_prices
A list of dicts containing pricing information for inbound calls:
- number_type: 'local', 'mobile', 'national', or 'toll_free'
- call_base_price: the base price per minute to receive a call
to this number type
- call_current_price: the current price per minute (including
volume discounts, etc.) for your account to receive a call
to this number type
"""
id_key = "iso_country"
class VoiceCountries(NextGenListResource):
instance = VoiceCountry
key = "countries"
name = "Countries"
def get(self, iso_country):
"""Retrieve pricing information for Twilio Voice in the specified
country.
:param iso_country: The two-letter ISO code for the country
"""
return self.get_instance(iso_country)
def list(self):
"""Retrieve the list of countries in which Twilio Voice is
available."""
resp, page = self.request("GET", self.uri)
return [self.load_instance(i) for i in page[self.key]]
class VoiceNumber(NextGenInstanceResource):
"""Pricing information for Twilio Voice services to and from a given
phone number.
.. attribute:: phone_number
The E.164-formatted phone number this pricing information applies to
.. attribute:: country
The name of the country this phone number belongs to
.. attribute:: iso_country
The two-character ISO code for the country
.. attribute:: outbound_call_price
A dict containing pricing information for outbound calls to this
number:
- base_price: the base price per minute for a call to this number
- current_price: the current price per minute (including discounts,
etc.) for a call to this number
.. attribute:: inbound_call_price
A dict containing pricing information for inbound call to this number,
or null if this number is not Twilio-hosted.
- number_type: "local", "mobile", "national", or "toll_free"
- call_base_price: the base price per minute to receive a call to
this number
- call_current_price: the current price per minute (including
discounts, etc.) to receive a call to this number
"""
id_key = "number"
class VoiceNumbers(NextGenListResource):
instance = VoiceNumber
key = "numbers"
name = "Numbers"
def get(self, phone_number):
""" Retrieve pricing information for a specific phone number.
:param phone_number: the E.164-formatted number to retrieve info for
:return: a :class:`VoiceNumber` instance
"""
return self.get_instance(phone_number)
|
gpl-3.0
|
taedori81/django-oscar
|
src/oscar/apps/address/migrations/0001_initial.py
|
58
|
4480
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('iso_3166_1_a2', models.CharField(primary_key=True, max_length=2, verbose_name='ISO 3166-1 alpha-2', serialize=False)),
('iso_3166_1_a3', models.CharField(max_length=3, verbose_name='ISO 3166-1 alpha-3', blank=True)),
('iso_3166_1_numeric', models.CharField(max_length=3, verbose_name='ISO 3166-1 numeric', blank=True)),
('printable_name', models.CharField(max_length=128, verbose_name='Country name')),
('name', models.CharField(max_length=128, verbose_name='Official name')),
('display_order', models.PositiveSmallIntegerField(default=0, verbose_name='Display order', db_index=True, help_text='Higher the number, higher the country in the list.')),
('is_shipping_country', models.BooleanField(default=False, db_index=True, verbose_name='Is shipping country')),
],
options={
'ordering': ('-display_order', 'printable_name'),
'verbose_name_plural': 'Countries',
'verbose_name': 'Country',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(verbose_name='Title', max_length=64, blank=True, choices=[('Mr', 'Mr'), ('Miss', 'Miss'), ('Mrs', 'Mrs'), ('Ms', 'Ms'), ('Dr', 'Dr')])),
('first_name', models.CharField(max_length=255, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name='Last name', blank=True)),
('line1', models.CharField(max_length=255, verbose_name='First line of address')),
('line2', models.CharField(max_length=255, verbose_name='Second line of address', blank=True)),
('line3', models.CharField(max_length=255, verbose_name='Third line of address', blank=True)),
('line4', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State/County', blank=True)),
('postcode', oscar.models.fields.UppercaseCharField(max_length=64, verbose_name='Post/Zip-code', blank=True)),
('search_text', models.TextField(editable=False, verbose_name='Search text - used only for searching addresses')),
('phone_number', oscar.models.fields.PhoneNumberField(verbose_name='Phone number', help_text='In case we need to call you about your order', blank=True)),
('notes', models.TextField(verbose_name='Instructions', help_text='Tell us anything we should know when delivering your order.', blank=True)),
('is_default_for_shipping', models.BooleanField(default=False, verbose_name='Default shipping address?')),
('is_default_for_billing', models.BooleanField(default=False, verbose_name='Default billing address?')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Number of Orders')),
('hash', models.CharField(max_length=255, editable=False, db_index=True, verbose_name='Address Hash')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('country', models.ForeignKey(verbose_name='Country', to='address.Country')),
('user', models.ForeignKey(verbose_name='User', related_name='addresses', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-num_orders'],
'verbose_name_plural': 'User addresses',
'verbose_name': 'User address',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='useraddress',
unique_together=set([('user', 'hash')]),
),
]
|
bsd-3-clause
|
ssavvides/posix-omni-parser
|
parsers/Parser.py
|
1
|
4623
|
"""
<Started>
July 2013
<Author>
Savvas Savvides <[email protected]>
<Purpose>
Acts as the parent for all parsers. Defines some abstract methods required by
all parsers and some helper methods that can be used by any parser.
"""
import pickle
class Parser():
def __init__(self, trace_path):
"""
<Purpose>
Creates an Parser object which acts as the parent of parsers targeting
specific interposition utilities.
<Arguments>
trace_path:
The path to the trace file containing the traced system calls. This file
should contain the output of the strace utility.
<Exceptions>
IOError:
If the pickle file containing the system call definitions is not found.
(this file should come as part of this program)
<Side Effects>
None
<Returns>
None
"""
self.trace_path = trace_path
# get the system call definitions from the pickle file. These will be used
# to parse the parameters of each system call.
self.syscall_definitions = None
pickle_file = None
try:
pickle_file = open("syscall_definitions.pickle", 'rb')
self.syscall_definitions = pickle.load(pickle_file)
except IOError:
raise IOError("The pickle file holding the system call definitions " +
"was not found.")
finally:
if pickle_file != None:
pickle_file.close()
# detect the options used in with the tracing utility. These options will be later used to
# parse all the trace lines of the file.
self.trace_options = self._detect_trace_options()
# get the HOME environment variable. Normally the environment variable appears as arguments
# of the execve system call. execve syscall should be the first system call in a trace. The
# HOME environment variable is useful as general information about the trace and in
# particular when a file bundle needs to be generated. To generate a file bundle all the
# files referenced in the trace must be located and included in the bundle. The location of
# these files is in respect to the HOME variable if one is found, otherwise the home
# directory is assumed to be the current directory (pwd)
self.home_env = self._get_home_environment()
""" ABSTRACT METHODS """
def _get_home_environment(self):
raise NotImplementedError
def _detect_trace_options(self):
raise NotImplementedError
def parse_trace(self):
raise NotImplementedError
def _merge_quote_args(self, args_list):
"""
<Purpose>
Used to fix errors on parsed arguments. Specifically, if a string value in
the trace contains ", " the string will be wrongly split in two arguments.
This method searches for arguments that start with a double quote and if
that argument does not end with a double quote (an un-escaped double quote)
then the argument must have been wrongly split into two. Reconstruct the
original argument by joining the current part of the argument with the next
part in the arguments list.
<Arguments>
args_list:
A list of string arguments.
<Exceptions>
None
<Side Effects>
None
<Returns>
line_parts:
The updated line_parts.
"""
if len(args_list) <= 1:
return args_list
index = 0
while index < len(args_list):
# if the current argument starts with a quote but does not end with a quote,
# then the argument must have been wrongly split.
if args_list[index].startswith("\""):
while index + 1 < len(args_list):
if self._ends_in_unescaped_quote(args_list[index].strip(".")):
break
args_list[index] += ", " + args_list[index + 1]
args_list.pop(index + 1)
index += 1
return args_list
def _ends_in_unescaped_quote(self, string):
"""
Helper method for _merge_quote_args
"""
if not string or string[-1] != '"':
return False
for index in range(-2, -len(string) - 1, -1):
if string[index] != '\\':
return index % 2 == 0
return False
|
apache-2.0
|
atsao72/sympy
|
sympy/matrices/expressions/hadamard.py
|
91
|
2443
|
from __future__ import print_function, division
from sympy.core import Mul, sympify
from sympy.strategies import unpack, flatten, condition, exhaust, do_one
from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError
def hadamard_product(*matrices):
"""
Return the elementwise (aka Hadamard) product of matrices.
Examples
========
>>> from sympy.matrices import hadamard_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 3)
>>> B = MatrixSymbol('B', 2, 3)
>>> hadamard_product(A)
A
>>> hadamard_product(A, B)
A.*B
>>> hadamard_product(A, B)[0, 1]
A[0, 1]*B[0, 1]
"""
if not matrices:
raise TypeError("Empty Hadamard product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return HadamardProduct(*matrices).doit()
class HadamardProduct(MatrixExpr):
"""
Elementwise product of matrix expressions
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()``.
>>> from sympy.matrices import hadamard_product, HadamardProduct, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
"""
is_HadamardProduct = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check' , True)
if check:
validate(*args)
return super(HadamardProduct, cls).__new__(cls, *args)
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Mul(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **ignored):
return canonicalize(self)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned" % (A, B))
rules = (unpack,
flatten)
canonicalize = exhaust(condition(lambda x: isinstance(x, HadamardProduct),
do_one(*rules)))
|
bsd-3-clause
|
paplorinc/intellij-community
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/pgen2/driver.py
|
212
|
5164
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import StringIO
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(message)s')
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
|
apache-2.0
|
Forage/Gramps
|
gramps/gen/db/backup.py
|
1
|
6924
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# gen/db/backup.py
# $Id$
#
"""
Description
===========
This module Provides backup and restore functions for a database. The
backup function saves the data into backup files, while the restore
function loads the data back into a database.
You should only restore the data into an empty database.
Implementation
==============
Not all of the database tables need to be backed up, since many are
automatically generated from the others. The tables that are backed up
are the primary tables and the metadata table.
The database consists of a table of "pickled" tuples. Each of the
primary tables is "walked", and the pickled tuple is extracted, and
written to the backup file.
Restoring the data is just as simple. The backup file is parsed an
entry at a time, and inserted into the associated database table. The
derived tables are built automatically as the items are entered into
db.
"""
#-------------------------------------------------------------------------
#
# load standard python libraries
#
#-------------------------------------------------------------------------
import os
import sys
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import pickle
#------------------------------------------------------------------------
#
# Gramps libs
#
#------------------------------------------------------------------------
from .exceptions import DbException
from .write import FAMILY_TBL, PLACES_TBL, SOURCES_TBL, MEDIA_TBL, \
EVENTS_TBL, PERSON_TBL, REPO_TBL, NOTE_TBL, TAG_TBL, META, CITATIONS_TBL
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".Backup")
def backup(database):
"""
Exports the database to a set of backup files. These files consist
of the pickled database tables, one file for each table.
The heavy lifting is done by the private __do__export function. The
purpose of this function is to catch any exceptions that occur.
@param database: database instance to backup
@type database: DbDir
"""
try:
__do_export(database)
except (OSError, IOError) as msg:
raise DbException(str(msg))
def __mk_backup_name(database, base):
"""
Return the backup name of the database table
@param database: database instance
@type database: DbDir
@param base: base name of the table
@type base: str
"""
return os.path.join(database.get_save_path(), base + ".gbkp")
def __mk_tmp_name(database, base):
"""
Return the temporary backup name of the database table
@param database: database instance
@type database: DbDir
@param base: base name of the table
@type base: str
"""
return os.path.join(database.get_save_path(), base + ".gbkp.new")
def __do_export(database):
"""
Loop through each table of the database, saving the pickled data
a file.
@param database: database instance to backup
@type database: DbDir
"""
try:
for (base, tbl) in __build_tbl_map(database):
backup_name = __mk_tmp_name(database, base)
backup_table = open(backup_name, 'wb')
cursor = tbl.cursor()
data = cursor.first()
while data:
pickle.dump(data, backup_table, 2)
data = cursor.next()
cursor.close()
backup_table.close()
except (IOError,OSError):
return
for (base, tbl) in __build_tbl_map(database):
new_name = __mk_backup_name(database, base)
old_name = __mk_tmp_name(database, base)
if os.path.isfile(new_name):
os.unlink(new_name)
os.rename(old_name, new_name)
def restore(database):
"""
Restores the database to a set of backup files. These files consist
of the pickled database tables, one file for each table.
The heavy lifting is done by the private __do__restore function. The
purpose of this function is to catch any exceptions that occur.
@param database: database instance to restore
@type database: DbDir
"""
try:
__do_restore(database)
except (OSError, IOError) as msg:
raise DbException(str(msg))
def __do_restore(database):
"""
Loop through each table of the database, restoring the pickled data
to the appropriate database file.
@param database: database instance to backup
@type database: DbDir
"""
for (base, tbl) in __build_tbl_map(database):
backup_name = __mk_backup_name(database, base)
backup_table = open(backup_name, 'rb')
__load_tbl_txn(database, backup_table, tbl)
database.rebuild_secondary()
def __load_tbl_txn(database, backup_table, tbl):
"""
Return the temporary backup name of the database table
@param database: database instance
@type database: DbDir
@param backup_table: file containing the backup data
@type backup_table: file
@param tbl: Berkeley db database table
@type tbl: Berkeley db database table
"""
try:
while True:
data = pickle.load(backup_table)
txn = database.env.txn_begin()
tbl.put(data[0], data[1], txn=txn)
txn.commit()
except EOFError:
backup_table.close()
def __build_tbl_map(database):
"""
Builds a table map of names to database tables.
@param database: database instance to backup
@type database: DbDir
"""
return [
( PERSON_TBL, database.person_map.db),
( FAMILY_TBL, database.family_map.db),
( PLACES_TBL, database.place_map.db),
( SOURCES_TBL, database.source_map.db),
( CITATIONS_TBL, database.citation_map.db),
( REPO_TBL, database.repository_map.db),
( NOTE_TBL, database.note_map.db),
( MEDIA_TBL, database.media_map.db),
( EVENTS_TBL, database.event_map.db),
( TAG_TBL, database.tag_map.db),
( META, database.metadata.db),
]
|
gpl-2.0
|
cd80/UtilizedLLVM
|
tools/clang/bindings/python/clang/enumerations.py
|
307
|
1077
|
#===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']
|
unlicense
|
erhuabushuo/crossbar
|
crossbar/router/test/test_observation.py
|
2
|
12951
|
#####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import
import unittest
import pickle
from io import BytesIO as StringIO
from autobahn.wamp.message import Subscribe
from crossbar._compat import long
from crossbar.router.observation import ExactUriObservation, \
PrefixUriObservation, WildcardUriObservation, UriObservationMap
class FakeObserver:
pass
class TestObservation(unittest.TestCase):
def test_create_exact(self):
"""
Create an exact-matching observation.
"""
obs1 = ExactUriObservation(u"com.example.uri1")
self.assertTrue(isinstance(obs1.id, (int, long)))
self.assertEqual(obs1.uri, u"com.example.uri1")
self.assertEqual(obs1.match, u"exact")
self.assertEqual(obs1.observers, set())
def test_create_prefix(self):
"""
Create a prefix-matching observation.
"""
obs1 = PrefixUriObservation(u"com.example.uri1")
self.assertTrue(isinstance(obs1.id, (int, long)))
self.assertEqual(obs1.uri, u"com.example.uri1")
self.assertEqual(obs1.match, u"prefix")
self.assertEqual(obs1.observers, set())
def test_create_wildcard(self):
"""
Create a wildcard-matching observation.
"""
obs1 = WildcardUriObservation(u"com.example..create")
self.assertTrue(isinstance(obs1.id, (int, long)))
self.assertEqual(obs1.uri, u"com.example..create")
self.assertEqual(obs1.match, u"wildcard")
self.assertEqual(obs1.observers, set())
self.assertEqual(obs1.pattern, (False, False, True, False))
self.assertEqual(obs1.pattern_len, 4)
def test_pickle(self):
"""
Test pickling of observations (__getstate__, __setstate__).
"""
obsvs = [
ExactUriObservation(u"com.example.uri1"),
PrefixUriObservation(u"com.example.uri1"),
WildcardUriObservation(u"com.example..create"),
]
for sub in obsvs:
data = StringIO()
pickle.dump(sub, data)
read_fd = StringIO(data.getvalue())
obs2 = pickle.load(read_fd)
self.assertEqual(sub.id, obs2.id)
self.assertEqual(sub.uri, obs2.uri)
self.assertEqual(sub.match, obs2.match)
self.assertEqual(obs2.observers, set())
class TestUriObservationMap(unittest.TestCase):
def test_match_observations_empty(self):
"""
An empty observer map returns an empty observer set for any URI.
"""
obs_map = UriObservationMap()
for uri in [u"com.example.uri1", u"com.example.uri2", u""]:
obsvs = obs_map.match_observations(uri)
self.assertEqual(obsvs, [])
def test_add_observer(self):
"""
When a observer is added, a observation is returned.
"""
obs_map = UriObservationMap()
uri1 = u"com.example.uri1"
obs1 = FakeObserver()
observation, was_already_observed, is_first_observer = obs_map.add_observer(obs1, uri1)
self.assertIsInstance(observation, ExactUriObservation)
self.assertFalse(was_already_observed)
self.assertTrue(is_first_observer)
def test_add_observer_was_already_observed(self):
"""
When a observer is added, the ``was_already_observed`` flag in
the return is correct.
"""
obs_map = UriObservationMap()
uri1 = u"com.example.uri1"
obs1 = FakeObserver()
observation1, was_already_observed, _ = obs_map.add_observer(obs1, uri1)
self.assertFalse(was_already_observed)
observation2, was_already_observed, _ = obs_map.add_observer(obs1, uri1)
self.assertTrue(was_already_observed)
self.assertEqual(observation1, observation2)
def test_add_observer_is_first_observer(self):
"""
When a observer is added, the ``is_first_observer`` flag in the
return is correct.
"""
obs_map = UriObservationMap()
uri1 = u"com.example.uri1"
obs1 = FakeObserver()
obs2 = FakeObserver()
_, _, is_first_observer = obs_map.add_observer(obs1, uri1)
self.assertTrue(is_first_observer)
_, _, is_first_observer = obs_map.add_observer(obs2, uri1)
self.assertFalse(is_first_observer)
def test_match_observations_match_exact(self):
"""
When a observer observes an URI (match exact), the observer
is returned for the URI upon lookup.
"""
obs_map = UriObservationMap()
uri1 = u"com.example.uri1"
obs1 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, uri1)
observations = obs_map.match_observations(uri1)
self.assertEqual(observations, [observation1])
def test_match_observations_match_exact_same(self):
"""
When multiple different observers observe the same URI (match exact),
all get the same observation.
"""
obs_map = UriObservationMap()
uri1 = u"com.example.uri1"
obs1 = FakeObserver()
obs2 = FakeObserver()
obs3 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, uri1)
observation2, _, _ = obs_map.add_observer(obs2, uri1)
observation3, _, _ = obs_map.add_observer(obs3, uri1)
observations = obs_map.match_observations(uri1)
self.assertEqual(observations, [observation1])
self.assertEqual(observations[0].observers, set([obs1, obs2, obs3]))
def test_match_observations_match_exact_multi(self):
"""
When the same observer is added multiple times to the same URI (match exact),
the observation is only returned once, and every time the same observation ID is returned.
"""
obs_map = UriObservationMap()
uri1 = u"com.example.uri1"
obs1 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, uri1)
observation2, _, _ = obs_map.add_observer(obs1, uri1)
observation3, _, _ = obs_map.add_observer(obs1, uri1)
self.assertEqual(observation1, observation2)
self.assertEqual(observation1, observation3)
observations = obs_map.match_observations(uri1)
self.assertEqual(observations, [observation1])
self.assertEqual(observations[0].observers, set([obs1]))
def test_match_observations_match_prefix(self):
"""
When a observer observes an URI (match prefix), the observer is
returned for all uris upon lookup where the observed URI is a prefix.
"""
obs_map = UriObservationMap()
obs1 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, u"com.example", match=Subscribe.MATCH_PREFIX)
# test matches
for uri in [u"com.example.uri1.foobar.barbaz",
u"com.example.uri1.foobar",
u"com.example.uri1",
u"com.example.topi",
u"com.example.",
u"com.example2",
u"com.example"]:
observations = obs_map.match_observations(uri)
self.assertEqual(observations, [observation1])
self.assertEqual(observations[0].observers, set([obs1]))
# test non-matches
for uri in [u"com.foobar.uri1",
u"com.exampl.uri1",
u"com.exampl",
u"com",
u""]:
observations = obs_map.match_observations(uri)
self.assertEqual(observations, [])
def test_match_observations_match_wildcard_single(self):
"""
When a observer observes to a uri (wildcard prefix), the observer is
returned for all uris upon lookup where the observed uri matches
the wildcard pattern.
"""
obs_map = UriObservationMap()
obs1 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, u"com.example..create", match=Subscribe.MATCH_WILDCARD)
# test matches
for uri in [u"com.example.foobar.create",
u"com.example.1.create"
]:
observations = obs_map.match_observations(uri)
self.assertEqual(observations, [observation1])
self.assertEqual(observations[0].observers, set([obs1]))
# test non-matches
for uri in [u"com.example.foobar.delete",
u"com.example.foobar.create2",
u"com.example.foobar.create.barbaz"
u"com.example.foobar",
u"com.example.create",
u"com.example"
]:
observations = obs_map.match_observations(uri)
self.assertEqual(observations, [])
def test_match_observations_match_wildcard_multi(self):
"""
Test with multiple wildcards in wildcard-matching observation.
"""
obs_map = UriObservationMap()
obs1 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, u"com...create", match=Subscribe.MATCH_WILDCARD)
# test matches
for uri in [u"com.example.foobar.create",
u"com.example.1.create",
u"com.myapp.foobar.create",
u"com.myapp.1.create",
]:
observations = obs_map.match_observations(uri)
self.assertEqual(observations, [observation1])
self.assertEqual(observations[0].observers, set([obs1]))
# test non-matches
for uri in [u"com.example.foobar.delete",
u"com.example.foobar.create2",
u"com.example.foobar.create.barbaz"
u"com.example.foobar",
u"org.example.foobar.create",
u"org.example.1.create",
u"org.myapp.foobar.create",
u"org.myapp.1.create",
]:
observations = obs_map.match_observations(uri)
self.assertEqual(observations, [])
def test_match_observations_match_multimode(self):
"""
When a observer is observed to multiple observations each matching
a given uri looked up, the observer is returned in each observation.
"""
obs_map = UriObservationMap()
obs1 = FakeObserver()
observation1, _, _ = obs_map.add_observer(obs1, u"com.example.product.create", match=Subscribe.MATCH_EXACT)
observation2, _, _ = obs_map.add_observer(obs1, u"com.example.product", match=Subscribe.MATCH_PREFIX)
observation3, _, _ = obs_map.add_observer(obs1, u"com.example..create", match=Subscribe.MATCH_WILDCARD)
observations = obs_map.match_observations(u"com.example.product.create")
self.assertEqual(observations, [observation1, observation2, observation3])
self.assertEqual(observations[0].observers, set([obs1]))
self.assertEqual(observations[1].observers, set([obs1]))
self.assertEqual(observations[2].observers, set([obs1]))
observations = obs_map.match_observations(u"com.example.foobar.create")
self.assertEqual(observations, [observation3])
self.assertEqual(observations[0].observers, set([obs1]))
observations = obs_map.match_observations(u"com.example.product.delete")
self.assertEqual(observations, [observation2])
self.assertEqual(observations[0].observers, set([obs1]))
|
agpl-3.0
|
hesseltuinhof/mxnet
|
example/gluon/dcgan.py
|
1
|
8010
|
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random_normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_params(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_params(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_params(os.path.join(outf, 'generator.params'))
netD.save_params(os.path.join(outf, 'discriminator.params'))
|
apache-2.0
|
frankinit/ThinkStats2
|
code/chap14soln.py
|
68
|
4396
|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import random
import first
import normal
import thinkstats2
import thinkplot
def PlotPregLengths(live, firsts, others):
"""Plots sampling distributions under the null and alternate hypotheses.
live, firsts, others: DataFrames
Results:
null hypothesis N(0, 0.00319708)
0.0837707042554 0.0837707042554 (90% CI)
estimated params N(0.0780373, 0.00321144)
-0.0151758158699 0.171250349425 (90% CI)
Sampling distribution under the null hypothesis is centered
around 0.
Sampling distribution under the null hypothesis is centered
around the observed difference, 0.078.
The variance of the two distributions is very similar; in practice,
you could reasonably compute whichever one is easier.
"""
print('prglngth example')
delta = firsts.prglngth.mean() - others.prglngth.mean()
print(delta)
dist1 = normal.SamplingDistMean(live.prglngth, len(firsts))
dist2 = normal.SamplingDistMean(live.prglngth, len(others))
dist = dist1 - dist2
print('null hypothesis', dist)
print(dist.Prob(-delta), 1 - dist.Prob(delta))
thinkplot.PrePlot(2)
thinkplot.Plot(dist, label='null hypothesis')
dist1 = normal.SamplingDistMean(firsts.prglngth, len(firsts))
dist2 = normal.SamplingDistMean(others.prglngth, len(others))
dist = dist1 - dist2
print('estimated params', dist)
print(dist.Percentile(5), dist.Percentile(95))
thinkplot.Plot(dist, label='estimated params')
thinkplot.Show(xlabel='difference in means (weeks)',
ylabel='CDF')
def GenerateAdultWeight(birth_weights, n):
"""Generate a random adult weight by simulating annual gain.
birth_weights: sequence of birth weights in lbs
n: number of years to simulate
returns: adult weight in lbs
"""
bw = random.choice(birth_weights)
factors = np.random.normal(1.09, 0.03, n)
aw = bw * np.prod(factors)
return aw
def PlotAdultWeights(live):
"""Makes a normal probability plot of log10 adult weight.
live: DataFrame of live births
results:
With n=40 the distribution is approximately lognormal except for
the lowest weights.
Actual distribution might deviate from lognormal because it is
a mixture of people at different ages, or because annual weight
gains are correlated.
"""
birth_weights = live.totalwgt_lb.dropna().values
aws = [GenerateAdultWeight(birth_weights, 40) for _ in range(1000)]
log_aws = np.log10(aws)
thinkstats2.NormalProbabilityPlot(log_aws)
thinkplot.Show(xlabel='standard normal values',
ylabel='adult weight (log10 lbs)')
def TestIntervention():
"""Tests whether reported changes are statistically significant.
Results:
-1.66 4.73095323208e-05
-0.26 0.125267987207
1.4 0.00182694836898
Conclusions:
1) Gender gap before intervention was 1.66 points (p-value 5e-5)
2) Genger gap after was 0.26 points (p-value 0.13, no significant)
3) Change in gender gap was 1.4 points (p-value 0.002, significant).
"""
male_before = normal.Normal(3.57, 0.28**2)
male_after = normal.Normal(3.44, 0.16**2)
female_before = normal.Normal(1.91, 0.32**2)
female_after = normal.Normal(3.18, 0.16**2)
diff_before = female_before - male_before
print('mean, p-value', diff_before.mu, 1-diff_before.Prob(0))
print('CI', diff_before.Percentile(5), diff_before.Percentile(95))
print('stderr', diff_before.sigma)
diff_after = female_after - male_after
print('mean, p-value', diff_after.mu, 1-diff_after.Prob(0))
print('CI', diff_after.Percentile(5), diff_after.Percentile(95))
print('stderr', diff_after.sigma)
diff = diff_after - diff_before
print('mean, p-value', diff.mu, diff.Prob(0))
print('CI', diff.Percentile(5), diff.Percentile(95))
print('stderr', diff.sigma)
def main():
thinkstats2.RandomSeed(17)
TestIntervention()
return
live, firsts, others = first.MakeFrames()
PlotAdultWeights(live)
PlotPregLengths(live, firsts, others)
if __name__ == '__main__':
main()
|
gpl-3.0
|
dob71/x2swn
|
skeinforge/skeinforge_application/skeinforge_plugins/craft_plugins/coil.py
|
8
|
9991
|
"""
This page is in the table of contents.
Coil is a script to coil wire or filament around an object.
==Operation==
The default 'Activate Coil' checkbox is on. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Minimum Tool Distance===
Default is twenty millimeters.
Defines the minimum distance between the wire dispenser and the object. The 'Minimum Tool Distance' should be set to the maximum radius of the wire dispenser, times at least 1.3 to get a reasonable safety margin.
==Examples==
The following examples coil the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and coil.py.
> python coil.py
This brings up the coil dialog.
> python coil.py Screw Holder Bottom.stl
The coil tool is parsing the file:
Screw Holder Bottom.stl
..
The coil tool has created the file:
Screw Holder Bottom_coil.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, gcodeText = '', repository=None):
"Coil the file or gcodeText."
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, gcodeText), repository )
def getCraftedTextFromText(gcodeText, repository=None):
"Coil a gcode linear move gcodeText."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'coil'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( CoilRepository() )
if not repository.activateCoil.value:
return gcodeText
return CoilSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return CoilRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Coil a gcode linear move file."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'coil', shouldAnalyze)
class CoilRepository:
"A class to handle the coil settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.coil.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Coil', self, '')
self.activateCoil = settings.BooleanSetting().getFromValue('Activate Coil', self, True )
self.minimumToolDistance = settings.FloatSpin().getFromValue( 10.0, 'Minimum Tool Distance (millimeters):', self, 50.0, 20.0 )
self.executeTitle = 'Coil'
def execute(self):
"Coil button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class CoilSkein:
"A class to coil a skein of extrusions."
def __init__(self):
self.boundaryLayers = []
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.edgeWidth = 0.6
self.lineIndex = 0
self.lines = None
self.oldLocationComplex = complex()
self.shutdownLines = []
def addCoilLayer( self, boundaryLayers, radius, z ):
"Add a coil layer."
self.distanceFeedRate.addLine('(<layer> %s )' % z ) # Indicate that a new layer is starting.
self.distanceFeedRate.addLine('(<nestedRing>)')
thread = []
for boundaryLayerIndex in xrange(1, len(boundaryLayers) - 1):
boundaryLayer = boundaryLayers[boundaryLayerIndex]
boundaryLayerBegin = boundaryLayers[boundaryLayerIndex - 1]
boundaryLayerEnd = boundaryLayers[boundaryLayerIndex + 1]
beginLocation = Vector3(0.0, 0.0, 0.5 * (boundaryLayerBegin.z + boundaryLayer.z))
outsetLoop = intercircle.getLargestInsetLoopFromLoop(boundaryLayer.loops[0], - radius)
self.addCoilToThread(beginLocation, 0.5 * (boundaryLayer.z + boundaryLayerEnd.z), outsetLoop, thread)
self.addGcodeFromThread(thread)
self.distanceFeedRate.addLine('(</nestedRing>)')
self.distanceFeedRate.addLine('(</layer>)')
def addCoilLayers(self):
"Add the coil layers."
numberOfLayersFloat = round( self.edgeWidth / self.layerHeight )
numberOfLayers = int( numberOfLayersFloat )
halfLayerThickness = 0.5 * self.layerHeight
startOutset = self.repository.minimumToolDistance.value + halfLayerThickness
startZ = self.boundaryLayers[0].z + halfLayerThickness
zRange = self.boundaryLayers[-1].z - self.boundaryLayers[0].z
zIncrement = 0.0
if zRange >= 0.0:
zIncrement = zRange / numberOfLayersFloat
for layerIndex in xrange( numberOfLayers ):
settings.printProgressByNumber(layerIndex, numberOfLayers, 'coil')
boundaryLayers = self.boundaryLayers
if layerIndex % 2 == 1:
boundaryLayers = self.boundaryReverseLayers
radius = startOutset + layerIndex * self.layerHeight
z = startZ + layerIndex * zIncrement
self.addCoilLayer( boundaryLayers, radius, z )
def addCoilToThread(self, beginLocation, endZ, loop, thread):
"Add a coil to the thread."
if len(loop) < 1:
return
loop = euclidean.getLoopStartingClosest(self.halfEdgeWidth, self.oldLocationComplex, loop)
length = euclidean.getLoopLength(loop)
if length <= 0.0:
return
oldPoint = loop[0]
pathLength = 0.0
for point in loop[1 :]:
pathLength += abs(point - oldPoint)
along = pathLength / length
z = (1.0 - along) * beginLocation.z + along * endZ
location = Vector3(point.real, point.imag, z)
thread.append(location)
oldPoint = point
self.oldLocationComplex = loop[-1]
def addGcodeFromThread( self, thread ):
"Add a thread to the output."
if len(thread) > 0:
firstLocation = thread[0]
self.distanceFeedRate.addGcodeMovementZ( firstLocation.dropAxis(), firstLocation.z )
else:
print("zero length vertex positions array which was skipped over, this should never happen")
if len(thread) < 2:
print("thread of only one point in addGcodeFromThread in coil, this should never happen")
print(thread)
return
self.distanceFeedRate.addLine('M101') # Turn extruder on.
for location in thread[1 :]:
self.distanceFeedRate.addGcodeMovementZ( location.dropAxis(), location.z )
self.distanceFeedRate.addLine('M103') # Turn extruder off.
def getCraftedGcode(self, gcodeText, repository):
"Parse gcode text and store the coil gcode."
self.repository = repository
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.parseBoundaries()
self.parseUntilLayer()
self.addCoilLayers()
self.distanceFeedRate.addLines( self.shutdownLines )
return self.distanceFeedRate.output.getvalue()
def parseBoundaries(self):
"Parse the boundaries and add them to the boundary layers."
boundaryLoop = None
boundaryLayer = None
for line in self.lines[self.lineIndex :]:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if len( self.shutdownLines ) > 0:
self.shutdownLines.append(line)
if firstWord == '(</boundaryPerimeter>)':
boundaryLoop = None
elif firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine(None, splitLine)
if boundaryLoop == None:
boundaryLoop = []
boundaryLayer.loops.append(boundaryLoop)
boundaryLoop.append(location.dropAxis())
elif firstWord == '(<layer>':
boundaryLayer = euclidean.LoopLayer(float(splitLine[1]))
self.boundaryLayers.append(boundaryLayer)
elif firstWord == '(</crafting>)':
self.shutdownLines = [ line ]
for boundaryLayer in self.boundaryLayers:
if not euclidean.isWiddershins( boundaryLayer.loops[0] ):
boundaryLayer.loops[0].reverse()
self.boundaryReverseLayers = self.boundaryLayers[:]
self.boundaryReverseLayers.reverse()
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('coil')
return
elif firstWord == '(<layerHeight>':
self.layerHeight = float(splitLine[1])
elif firstWord == '(<edgeWidth>':
self.edgeWidth = float(splitLine[1])
self.halfEdgeWidth = 0.5 * self.edgeWidth
self.distanceFeedRate.addLine(line)
def parseUntilLayer(self):
"Parse until the layer line and add it to the coil skein."
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(<layer>':
return
self.distanceFeedRate.addLine(line)
def main():
"Display the coil dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
|
gpl-3.0
|
vnsofthe/odoo-dev
|
addons/hr_gamification/__openerp__.py
|
320
|
1676
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'website': 'https://www.odoo.com/page/employees',
'depends': ['gamification', 'hr'],
'description': """Use the HR ressources for the gamification process.
The HR officer can now manage challenges and badges.
This allow the user to send badges to employees instead of simple users.
Badge received are displayed on the user profile.
""",
'data': [
'security/ir.model.access.csv',
'security/gamification_security.xml',
'wizard/grant_badge.xml',
'views/gamification.xml',
'views/hr_gamification.xml',
],
'auto_install': True,
}
|
agpl-3.0
|
dostavro/dotfiles
|
sublime2/Packages/SublimeCodeIntel/libs/process.py
|
2
|
26248
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os
import sys
import time
import types
if sys.platform != "win32":
import signal # used by kill() method on Linux/Mac
import logging
import threading
import warnings
#-------- Globals -----------#
log = logging.getLogger("process")
# log.setLevel(logging.DEBUG)
try:
from subprocess32 import Popen, PIPE
except ImportError:
# Not available on Windows - fallback to using regular subprocess module.
from subprocess import Popen, PIPE
if sys.platform != "win32":
log.warn(
"Could not import subprocess32 module, falling back to subprocess module")
CREATE_NEW_CONSOLE = 0x10 # same as win32process.CREATE_NEW_CONSOLE
CREATE_NEW_PROCESS_GROUP = 0x200 # same as win32process.CREATE_NEW_PROCESS_GROUP
CREATE_NO_WINDOW = 0x8000000 # same as win32process.CREATE_NO_WINDOW
CTRL_BREAK_EVENT = 1 # same as win32con.CTRL_BREAK_EVENT
WAIT_TIMEOUT = 258 # same as win32event.WAIT_TIMEOUT
#-------- Classes -----------#
# XXX - TODO: Work out what exceptions raised by SubProcess and turn into
# ProcessError?
class ProcessError(Exception):
def __init__(self, msg, errno=-1):
Exception.__init__(self, msg)
self.errno = errno
# Check if this is Windows NT and above.
if sys.platform == "win32" and sys.getwindowsversion()[3] == 2:
import winprocess
from subprocess import pywintypes, list2cmdline, STARTUPINFO
try:
# These subprocess variables have moved around between Python versions.
from subprocess import (SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW,
GetVersion, CreateProcess, TerminateProcess)
except ImportError:
import subprocess
SW_HIDE = subprocess._subprocess.SW_HIDE
STARTF_USESTDHANDLES = subprocess._subprocess.STARTF_USESTDHANDLES
STARTF_USESHOWWINDOW = subprocess._subprocess.STARTF_USESHOWWINDOW
GetVersion = subprocess._subprocess.GetVersion
CreateProcess = subprocess._subprocess.CreateProcess
TerminateProcess = subprocess._subprocess.TerminateProcess
# This fix is for killing child processes on windows, based on:
# http://www.microsoft.com/msj/0698/win320698.aspx
# It works by creating a uniquely named job object that will contain our
# process(es), starts the process in a suspended state, maps the process
# to a specific job object, resumes the process, from now on every child
# it will create will be assigned to the same job object. We can then
# later terminate this job object (and all of it's child processes).
#
# This code is based upon Benjamin Smedberg's killableprocess, see:
# http://benjamin.smedbergs.us/blog/2006-12-11/killableprocesspy/
class WindowsKillablePopen(Popen):
_job = None
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= CREATE_NEW_CONSOLE
# We create a new job for this process, so that we can kill
# the process and any sub-processes
self._job = winprocess.CreateJobObject()
creationflags |= winprocess.CREATE_SUSPENDED
# Vista will launch Komodo in a job object itself, so we need
# to specify that the created process is not part of the Komodo
# job object, but instead specify that it will be using a
# separate breakaway job object, bug 83001.
creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
except WindowsError:
log.error(
"process.py: can't execute %r (%s)", executable, args)
raise
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
if self._job:
# Resume the thread.
winprocess.AssignProcessToJobObject(self._job, int(hp))
winprocess.ResumeThread(int(ht))
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def terminate(self):
"""Terminates the process"""
if self._job:
winprocess.TerminateJobObject(self._job, 127)
self.returncode = 127
else:
# Cannot call the parent class, as there is no terminate method
# defined at the class level (it's added upon instantiation),
# so this is a copy of subprocess.Popen.terminate() code.
TerminateProcess(self._handle, 1)
kill = terminate
# Use our own killable process instead of the regular Popen.
Popen = WindowsKillablePopen
class ProcessOpen(Popen):
def __init__(self, cmd, cwd=None, env=None, flags=None,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True):
"""Create a child process.
"cmd" is the command to run, either a list of arguments or a string.
"cwd" is a working directory in which to start the child process.
"env" is an environment dictionary for the child.
"flags" are system-specific process creation flags. On Windows
this can be a bitwise-OR of any of the win32process.CREATE_*
constants (Note: win32process.CREATE_NEW_PROCESS_GROUP is always
OR'd in). On Unix, this is currently ignored.
"stdin", "stdout", "stderr" can be used to specify file objects
to handle read (stdout/stderr) and write (stdin) events from/to
the child. By default a file handle will be created for each
io channel automatically, unless set explicitly to None. When set
to None, the parent io handles will be used, which can mean the
output is redirected to Komodo's log files.
"universal_newlines": On by default (the opposite of subprocess).
"""
self._child_created = False
self.__use_killpg = False
auto_piped_stdin = False
preexec_fn = None
shell = False
if not isinstance(cmd, (list, tuple)):
# The cmd is the already formatted, ready for the shell. Otherwise
# subprocess.Popen will treat this as simply one command with
# no arguments, resulting in an unknown command.
shell = True
if sys.platform.startswith("win"):
# On Windows, cmd requires some special handling of multiple quoted
# arguments, as this is what cmd will do:
# See if the first character is a quote character and if so,
# strip the leading character and remove the last quote character
# on the command line, preserving any text after the last quote
# character.
if cmd and shell and cmd.count('"') > 2:
if not cmd.startswith('""') or not cmd.endswith('""'):
# Needs to be a re-quoted with additional double quotes.
# http://bugs.activestate.com/show_bug.cgi?id=75467
cmd = '"%s"' % (cmd, )
# XXX - subprocess needs to be updated to use the wide string API.
# subprocess uses a Windows API that does not accept unicode, so
# we need to convert all the environment variables to strings
# before we make the call. Temporary fix to bug:
# http://bugs.activestate.com/show_bug.cgi?id=72311
if env:
encoding = sys.getfilesystemencoding()
_enc_env = {}
for key, value in env.items():
try:
_enc_env[key.encode(encoding)] = value.encode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
# Could not encode it, warn we are dropping it.
log.warn("Could not encode environment variable %r "
"so removing it", key)
env = _enc_env
if flags is None:
flags = CREATE_NO_WINDOW
# If we don't have standard handles to pass to the child process
# (e.g. we don't have a console app), then
# `subprocess.GetStdHandle(...)` will return None. `subprocess.py`
# handles that (http://bugs.python.org/issue1124861)
#
# However, if Komodo is started from the command line, then
# the shell's stdin handle is inherited, i.e. in subprocess.py:
# p2cread = GetStdHandle(STD_INPUT_HANDLE) # p2cread == 3
# A few lines later this leads to:
# Traceback (most recent call last):
# ...
# File "...\lib\mozilla\python\komodo\process.py", line 130, in __init__
# creationflags=flags)
# File "...\lib\python\lib\subprocess.py", line 588, in __init__
# errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# File "...\lib\python\lib\subprocess.py", line 709, in _get_handles
# p2cread = self._make_inheritable(p2cread)
# File "...\lib\python\lib\subprocess.py", line 773, in _make_inheritable
# DUPLICATE_SAME_ACCESS)
# WindowsError: [Error 6] The handle is invalid
#
# I suspect this indicates that the stdin handle inherited by
# the subsystem:windows komodo.exe process is invalid -- perhaps
# because of mis-used of the Windows API for passing that handle
# through. The same error can be demonstrated in PythonWin:
# from _subprocess import *
# from subprocess import *
# h = GetStdHandle(STD_INPUT_HANDLE)
# p = Popen("python -c '1'")
# p._make_interitable(h)
#
# I don't understand why the inherited stdin is invalid for
# `DuplicateHandle`, but here is how we are working around this:
# If we detect the condition where this can fail, then work around
# it by setting the handle to `subprocess.PIPE`, resulting in
# a different and workable code path.
if self._needToHackAroundStdHandles() \
and not (flags & CREATE_NEW_CONSOLE):
if self._checkFileObjInheritable(sys.stdin, "STD_INPUT_HANDLE"):
stdin = PIPE
auto_piped_stdin = True
if self._checkFileObjInheritable(sys.stdout, "STD_OUTPUT_HANDLE"):
stdout = PIPE
if self._checkFileObjInheritable(sys.stderr, "STD_ERROR_HANDLE"):
stderr = PIPE
else:
# Set flags to 0, subprocess raises an exception otherwise.
flags = 0
# Set a preexec function, this will make the sub-process create it's
# own session and process group - bug 80651, bug 85693.
preexec_fn = os.setsid
# Mark as requiring progressgroup killing. This will allow us to
# later kill both the spawned shell and the sub-process in one go
# (see the kill method) - bug 85693.
self.__use_killpg = True
# Internal attributes.
self.__cmd = cmd
self.__retval = None
self.__hasTerminated = threading.Condition()
# Launch the process.
# print "Process: %r in %r" % (cmd, cwd)
Popen.__init__(self, cmd, cwd=cwd, env=env, shell=shell,
stdin=stdin, stdout=stdout, stderr=stderr,
preexec_fn=preexec_fn,
universal_newlines=universal_newlines,
creationflags=flags)
if auto_piped_stdin:
self.stdin.close()
__needToHackAroundStdHandles = None
@classmethod
def _needToHackAroundStdHandles(cls):
if cls.__needToHackAroundStdHandles is None:
if sys.platform != "win32":
cls.__needToHackAroundStdHandles = False
else:
from _subprocess import GetStdHandle, STD_INPUT_HANDLE
stdin_handle = GetStdHandle(STD_INPUT_HANDLE)
if stdin_handle is not None:
cls.__needToHackAroundStdHandles = True
if stdin_handle != 3:
log.warn("`GetStdHandle(STD_INPUT_HANDLE)` != 3: "
"something has changed w.r.t. std handle "
"inheritance in Komodo that may affect "
"subprocess launching")
else:
cls.__needToHackAroundStdHandles = False
return cls.__needToHackAroundStdHandles
@classmethod
def _checkFileObjInheritable(cls, fileobj, handle_name):
"""Check if a given file-like object (or whatever else subprocess.Popen
takes as a handle/stream) can be correctly inherited by a child process.
This just duplicates the code in subprocess.Popen._get_handles to make
sure we go down the correct code path; this to catch some non-standard
corner cases."""
import _subprocess
import ctypes
import msvcrt
new_handle = None
try:
if fileobj is None:
handle = _subprocess.GetStdHandle(getattr(_subprocess,
handle_name))
if handle is None:
return True # No need to check things we create
elif fileobj == subprocess.PIPE:
return True # No need to check things we create
elif isinstance(fileobj, int):
handle = msvcrt.get_osfhandle(fileobj)
else:
# Assuming file-like object
handle = msvcrt.get_osfhandle(fileobj.fileno())
new_handle = self._make_inheritable(handle)
return True
except:
return False
finally:
CloseHandle = ctypes.windll.kernel32.CloseHandle
if new_handle is not None:
CloseHandle(new_handle)
# Override the returncode handler (used by subprocess.py), this is so
# we can notify any listeners when the process has finished.
def _getReturncode(self):
return self.__returncode
def _setReturncode(self, value):
self.__returncode = value
if value is not None:
# Notify that the process is done.
self.__hasTerminated.acquire()
self.__hasTerminated.notifyAll()
self.__hasTerminated.release()
returncode = property(fget=_getReturncode, fset=_setReturncode)
# Setup the retval handler. This is a readonly wrapper around returncode.
def _getRetval(self):
# Ensure the returncode is set by subprocess if the process is
# finished.
self.poll()
return self.returncode
retval = property(fget=_getRetval)
def wait(self, timeout=None):
"""Wait for the started process to complete.
"timeout" is a floating point number of seconds after
which to timeout. Default is None, which is to never timeout.
If the wait time's out it will raise a ProcessError. Otherwise it
will return the child's exit value. Note that in the case of a timeout,
the process is still running. Use kill() to forcibly stop the process.
"""
if timeout is None or timeout < 0:
# Use the parent call.
try:
return Popen.wait(self)
except OSError, ex:
# If the process has already ended, that is fine. This is
# possible when wait is called from a different thread.
if ex.errno != 10: # No child process
raise
return self.returncode
# We poll for the retval, as we cannot rely on self.__hasTerminated
# to be called, as there are some code paths that do not trigger it.
# The accuracy of this wait call is between 0.1 and 1 second.
time_now = time.time()
time_end = time_now + timeout
# These values will be used to incrementally increase the wait period
# of the polling check, starting from the end of the list and working
# towards the front. This is to avoid waiting for a long period on
# processes that finish quickly, see bug 80794.
time_wait_values = [1.0, 0.5, 0.2, 0.1]
while time_now < time_end:
result = self.poll()
if result is not None:
return result
# We use hasTerminated here to get a faster notification.
self.__hasTerminated.acquire()
if time_wait_values:
wait_period = time_wait_values.pop()
self.__hasTerminated.wait(wait_period)
self.__hasTerminated.release()
time_now = time.time()
# last chance
result = self.poll()
if result is not None:
return result
raise ProcessError("Process timeout: waited %d seconds, "
"process not yet finished." % (timeout,),
WAIT_TIMEOUT)
# For backward compatibility with older process.py
def close(self):
pass
# For backward compatibility with older process.py
def kill(self, exitCode=-1, gracePeriod=None, sig=None):
"""Kill process.
"exitCode" this sets what the process return value will be.
"gracePeriod" [deprecated, not supported]
"sig" (Unix only) is the signal to use to kill the process. Defaults
to signal.SIGKILL. See os.kill() for more information.
"""
if gracePeriod is not None:
import warnings
warnings.warn("process.kill() gracePeriod is no longer used",
DeprecationWarning)
# Need to ensure stdin is closed, makes it easier to end the process.
if self.stdin is not None:
self.stdin.close()
if sys.platform.startswith("win"):
# TODO: 1) It would be nice if we could give the process(es) a
# chance to exit gracefully first, rather than having to
# resort to a hard kill.
# 2) May need to send a WM_CLOSE event in the case of a GUI
# application, like the older process.py was doing.
Popen.kill(self)
else:
if sig is None:
sig = signal.SIGKILL
try:
if self.__use_killpg:
os.killpg(self.pid, sig)
else:
os.kill(self.pid, sig)
except OSError, ex:
if ex.errno != 3:
# Ignore: OSError: [Errno 3] No such process
raise
self.returncode = exitCode
class AbortableProcessHelper(object):
"""A helper class that is able to run a process and have the process be
killed/aborted (possibly by another thread) if it is still running.
"""
STATUS_INITIALIZED = 0 # Ready to run.
STATUS_RUNNING = 1 # A process is running.
STATUS_FINISHED_NORMALLY = 2 # The command/process finished normally.
STATUS_ABORTED = 3 # The command/process was aborted.
def __init__(self):
self._process = None
self._process_status = self.STATUS_INITIALIZED
self._process_status_lock = threading.Lock()
def ProcessOpen(self, *args, **kwargs):
"""Create a new process and return it."""
self._process_status_lock.acquire()
try:
self._process_status = self.STATUS_RUNNING
self._process = ProcessOpen(*args, **kwargs)
return self._process
finally:
self._process_status_lock.release()
def ProcessDone(self):
"""Mark the process as being completed, does not need to be aborted."""
self._process_status_lock.acquire()
try:
self._process = None
self._process_status = self.STATUS_FINISHED_NORMALLY
finally:
self._process_status_lock.release()
def ProcessAbort(self):
"""Kill the process if it is still running."""
self._process_status_lock.acquire()
try:
self._process_status = self.STATUS_ABORTED
if self._process:
self._process.kill()
self._process = None
finally:
self._process_status_lock.release()
## Deprecated process classes ##
class Process(ProcessOpen):
def __init__(self, *args, **kwargs):
warnings.warn("'process.%s' is now deprecated. Please use 'process.ProcessOpen'." %
(self.__class__.__name__))
ProcessOpen.__init__(self, *args, **kwargs)
class ProcessProxy(Process):
pass
|
mit
|
ZenithDK/mopidy
|
tests/http/test_handlers.py
|
17
|
3205
|
from __future__ import absolute_import, unicode_literals
import os
import mock
import tornado.testing
import tornado.web
import tornado.websocket
import mopidy
from mopidy.http import handlers
class StaticFileHandlerTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
return tornado.web.Application([
(r'/(.*)', handlers.StaticFileHandler, {
'path': os.path.dirname(__file__),
'default_filename': 'test_handlers.py'
})
])
def test_static_handler(self):
response = self.fetch('/test_handlers.py', method='GET')
self.assertEqual(200, response.code)
self.assertEqual(
response.headers['X-Mopidy-Version'], mopidy.__version__)
self.assertEqual(
response.headers['Cache-Control'], 'no-cache')
def test_static_default_filename(self):
response = self.fetch('/', method='GET')
self.assertEqual(200, response.code)
self.assertEqual(
response.headers['X-Mopidy-Version'], mopidy.__version__)
self.assertEqual(
response.headers['Cache-Control'], 'no-cache')
# We aren't bothering with skipIf as then we would need to "backport" gen_test
if hasattr(tornado.websocket, 'websocket_connect'):
class WebSocketHandlerTest(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
self.core = mock.Mock()
return tornado.web.Application([
(r'/ws/?', handlers.WebSocketHandler, {'core': self.core})
])
def connection(self):
url = self.get_url('/ws').replace('http', 'ws')
return tornado.websocket.websocket_connect(url, self.io_loop)
@tornado.testing.gen_test
def test_invalid_json_rpc_request_doesnt_crash_handler(self):
# An uncaught error would result in no message, so this is just a
# simplistic test to verify this.
conn = yield self.connection()
conn.write_message('invalid request')
message = yield conn.read_message()
self.assertTrue(message)
@tornado.testing.gen_test
def test_broadcast_makes_it_to_client(self):
conn = yield self.connection()
handlers.WebSocketHandler.broadcast('message')
message = yield conn.read_message()
self.assertEqual(message, 'message')
@tornado.testing.gen_test
def test_broadcast_to_client_that_just_closed_connection(self):
conn = yield self.connection()
conn.stream.close()
handlers.WebSocketHandler.broadcast('message')
@tornado.testing.gen_test
def test_broadcast_to_client_without_ws_connection_present(self):
yield self.connection()
# Tornado checks for ws_connection and raises WebSocketClosedError
# if it is missing, this test case simulates winning a race were
# this has happened but we have not yet been removed from clients.
for client in handlers.WebSocketHandler.clients:
client.ws_connection = None
handlers.WebSocketHandler.broadcast('message')
|
apache-2.0
|
clovett/MissionPlanner
|
Lib/site-packages/numpy/random/__init__.py
|
54
|
4688
|
"""
========================
Random Number Generation
========================
==================== =========================================================
Utility functions
==============================================================================
random Uniformly distributed values of a given shape.
bytes Uniformly distributed random bytes.
random_integers Uniformly distributed integers in a given range.
random_sample Uniformly distributed floats in a given range.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
==================== =========================================================
==================== =========================================================
Compatibility functions
==============================================================================
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
randint Uniformly distributed integers in a given range.
==================== =========================================================
==================== =========================================================
Univariate distributions
==============================================================================
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== =========================================================
Multivariate distributions
==============================================================================
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== =========================================================
==================== =========================================================
Standard distributions
==============================================================================
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
==============================================================================
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
# To get sub-modules
from info import __doc__, __all__
from mtrand import *
# Some aliases:
ranf = random = sample = random_sample
__all__.extend(['ranf','random','sample'])
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
"""
return RandomState()
from numpy.testing import Tester
test = Tester(__file__).test
bench = Tester(__file__).bench
|
gpl-3.0
|
drammock/mne-python
|
tutorials/preprocessing/59_head_positions.py
|
5
|
3467
|
"""
.. _tut-head-pos:
================================================
Extracting and visualizing subject head movement
================================================
Continuous head movement can be encoded during MEG recordings by use of
HPI coils that continuously emit sinusoidal signals. These signals can then be
extracted from the recording and used to estimate head position as a function
of time. Here we show an example of how to do this, and how to visualize
the result.
HPI frequencies
---------------
First let's load a short bit of raw data where the subject intentionally moved
their head during the recording. Its power spectral density shows five peaks
(most clearly visible in the gradiometers) corresponding to the HPI coil
frequencies, plus other peaks related to power line interference (60 Hz and
harmonics).
"""
# Authors: Eric Larson <[email protected]>
# Richard Höchenberger <[email protected]>
#
# License: BSD (3-clause)
from os import path as op
import mne
print(__doc__)
data_path = op.join(mne.datasets.testing.data_path(verbose=True), 'SSS')
fname_raw = op.join(data_path, 'test_move_anon_raw.fif')
raw = mne.io.read_raw_fif(fname_raw, allow_maxshield='yes').load_data()
raw.plot_psd()
###############################################################################
# We can use `mne.chpi.get_chpi_info` to retrieve the coil frequencies,
# the index of the channel indicating when which coil was switched on, and the
# respective "event codes" associated with each coil's activity.
chpi_freqs, ch_idx, chpi_codes = mne.chpi.get_chpi_info(info=raw.info)
print(f'cHPI coil frequencies extracted from raw: {chpi_freqs} Hz')
###############################################################################
# Estimating continuous head position
# -----------------------------------
#
# First, let's extract the HPI coil amplitudes as a function of time:
chpi_amplitudes = mne.chpi.compute_chpi_amplitudes(raw)
###############################################################################
# Second, let's compute time-varying HPI coil locations from these:
chpi_locs = mne.chpi.compute_chpi_locs(raw.info, chpi_amplitudes)
###############################################################################
# Lastly, compute head positions from the coil locations:
head_pos = mne.chpi.compute_head_pos(raw.info, chpi_locs, verbose=True)
###############################################################################
# Note that these can then be written to disk or read from disk with
# :func:`mne.chpi.write_head_pos` and :func:`mne.chpi.read_head_pos`,
# respectively.
#
# Visualizing continuous head position
# ------------------------------------
#
# We can plot as traces, which is especially useful for long recordings:
# sphinx_gallery_thumbnail_number = 2
mne.viz.plot_head_positions(head_pos, mode='traces')
###############################################################################
# Or we can visualize them as a continuous field (with the vectors pointing
# in the head-upward direction):
mne.viz.plot_head_positions(head_pos, mode='field')
###############################################################################
# These head positions can then be used with
# :func:`mne.preprocessing.maxwell_filter` to compensate for movement,
# or with :func:`mne.preprocessing.annotate_movement` to mark segments as
# bad that deviate too much from the average head position.
|
bsd-3-clause
|
nanolearningllc/edx-platform-cypress-2
|
common/djangoapps/third_party_auth/tests/specs/base.py
|
36
|
38777
|
"""Base integration test for provider implementations."""
import unittest
import json
import mock
from django import test
from django.contrib import auth
from django.contrib.auth import models as auth_models
from django.contrib.messages.storage import fallback
from django.contrib.sessions.backends import cache
from django.test import utils as django_utils
from django.conf import settings as django_settings
from edxmako.tests import mako_middleware_process_request
from social import actions, exceptions
from social.apps.django_app import utils as social_utils
from social.apps.django_app import views as social_views
from student import models as student_models
from student import views as student_views
from student_account.views import account_settings_context
from third_party_auth import middleware, pipeline
from third_party_auth import settings as auth_settings
from third_party_auth.tests import testutil
@unittest.skipUnless(
testutil.AUTH_FEATURES_KEY in django_settings.FEATURES, testutil.AUTH_FEATURES_KEY + ' not in settings.FEATURES')
@django_utils.override_settings() # For settings reversion on a method-by-method basis.
class IntegrationTest(testutil.TestCase, test.TestCase):
"""Abstract base class for provider integration tests."""
# Override setUp and set this:
provider = None
# Methods you must override in your children.
def get_response_data(self):
"""Gets a dict of response data of the form given by the provider.
To determine what the provider returns, drop into a debugger in your
provider's do_auth implementation. Providers may merge different kinds
of data (for example, data about the user and data about the user's
credentials).
"""
raise NotImplementedError
def get_username(self):
"""Gets username based on response data from a provider.
Each provider has different logic for username generation. Sadly,
this is not extracted into its own method in python-social-auth, so we
must provide a getter ourselves.
Note that this is the *initial* value the framework will attempt to use.
If it collides, the pipeline will generate a new username. We extract
it here so we can force collisions in a polymorphic way.
"""
raise NotImplementedError
# Asserts you can optionally override and make more specific.
def assert_redirect_to_provider_looks_correct(self, response):
"""Asserts the redirect to the provider's site looks correct.
When we hit /auth/login/<provider>, we should be redirected to the
provider's site. Here we check that we're redirected, but we don't know
enough about the provider to check what we're redirected to. Child test
implementations may optionally strengthen this assertion with, for
example, more details about the format of the Location header.
"""
self.assertEqual(302, response.status_code)
self.assertTrue(response.has_header('Location'))
def assert_register_response_in_pipeline_looks_correct(self, response, pipeline_kwargs):
"""Performs spot checks of the rendered register.html page.
When we display the new account registration form after the user signs
in with a third party, we prepopulate the form with values sent back
from the provider. The exact set of values varies on a provider-by-
provider basis and is generated by
provider.BaseProvider.get_register_form_data. We provide some stock
assertions based on the provider's implementation; if you want more
assertions in your test, override this method.
"""
self.assertEqual(200, response.status_code)
# Check that the correct provider was selected.
self.assertIn('successfully signed in with <strong>%s</strong>' % self.provider.name, response.content)
# Expect that each truthy value we've prepopulated the register form
# with is actually present.
for prepopulated_form_value in self.provider.get_register_form_data(pipeline_kwargs).values():
if prepopulated_form_value:
self.assertIn(prepopulated_form_value, response.content)
# Implementation details and actual tests past this point -- no more
# configuration needed.
def setUp(self):
super(IntegrationTest, self).setUp()
self.request_factory = test.RequestFactory()
@property
def backend_name(self):
""" Shortcut for the backend name """
return self.provider.backend_name
# pylint: disable=invalid-name
def assert_account_settings_context_looks_correct(self, context, _user, duplicate=False, linked=None):
"""Asserts the user's account settings page context is in the expected state.
If duplicate is True, we expect context['duplicate_provider'] to contain
the duplicate provider backend name. If linked is passed, we conditionally
check that the provider is included in context['auth']['providers'] and
its connected state is correct.
"""
if duplicate:
self.assertEqual(context['duplicate_provider'], self.provider.backend_name)
else:
self.assertIsNone(context['duplicate_provider'])
if linked is not None:
expected_provider = [
provider for provider in context['auth']['providers'] if provider['name'] == self.provider.name
][0]
self.assertIsNotNone(expected_provider)
self.assertEqual(expected_provider['connected'], linked)
def assert_exception_redirect_looks_correct(self, expected_uri, auth_entry=None):
"""Tests middleware conditional redirection.
middleware.ExceptionMiddleware makes sure the user ends up in the right
place when they cancel authentication via the provider's UX.
"""
exception_middleware = middleware.ExceptionMiddleware()
request, _ = self.get_request_and_strategy(auth_entry=auth_entry)
response = exception_middleware.process_exception(
request, exceptions.AuthCanceled(request.backend))
location = response.get('Location')
self.assertEqual(302, response.status_code)
self.assertIn('canceled', location)
self.assertIn(self.backend_name, location)
self.assertTrue(location.startswith(expected_uri + '?'))
def assert_first_party_auth_trumps_third_party_auth(self, email=None, password=None, success=None):
"""Asserts first party auth was used in place of third party auth.
Args:
email: string. The user's email. If not None, will be set on POST.
password: string. The user's password. If not None, will be set on
POST.
success: None or bool. Whether we expect auth to be successful. Set
to None to indicate we expect the request to be invalid (meaning
one of username or password will be missing).
"""
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, email, password, self.get_username(), skip_social_auth=True)
strategy.request.POST = dict(strategy.request.POST)
if email:
strategy.request.POST['email'] = email
if password:
strategy.request.POST['password'] = 'bad_' + password if success is False else password
self.assert_pipeline_running(strategy.request)
payload = json.loads(student_views.login_user(strategy.request).content)
if success is None:
# Request malformed -- just one of email/password given.
self.assertFalse(payload.get('success'))
self.assertIn('There was an error receiving your login information', payload.get('value'))
elif success:
# Request well-formed and credentials good.
self.assertTrue(payload.get('success'))
else:
# Request well-formed but credentials bad.
self.assertFalse(payload.get('success'))
self.assertIn('incorrect', payload.get('value'))
def assert_json_failure_response_is_inactive_account(self, response):
"""Asserts failure on /login for inactive account looks right."""
self.assertEqual(200, response.status_code) # Yes, it's a 200 even though it's a failure.
payload = json.loads(response.content)
self.assertFalse(payload.get('success'))
self.assertIn('This account has not been activated', payload.get('value'))
def assert_json_failure_response_is_missing_social_auth(self, response):
"""Asserts failure on /login for missing social auth looks right."""
self.assertEqual(403, response.status_code)
self.assertIn(
"successfully logged into your %s account, but this account isn't linked" % self.provider.name,
response.content
)
def assert_json_failure_response_is_username_collision(self, response):
"""Asserts the json response indicates a username collision."""
self.assertEqual(400, response.status_code)
payload = json.loads(response.content)
self.assertFalse(payload.get('success'))
self.assertIn('already exists', payload.get('value'))
def assert_json_success_response_looks_correct(self, response):
"""Asserts the json response indicates success and redirection."""
self.assertEqual(200, response.status_code)
payload = json.loads(response.content)
self.assertTrue(payload.get('success'))
self.assertEqual(pipeline.get_complete_url(self.provider.backend_name), payload.get('redirect_url'))
def assert_login_response_before_pipeline_looks_correct(self, response):
"""Asserts a GET of /login not in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
# The combined login/registration page dynamically generates the login button,
# but we can still check that the provider name is passed in the data attribute
# for the container element.
self.assertIn(self.provider.name, response.content)
def assert_login_response_in_pipeline_looks_correct(self, response):
"""Asserts a GET of /login in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
def assert_password_overridden_by_pipeline(self, username, password):
"""Verifies that the given password is not correct.
The pipeline overrides POST['password'], if any, with random data.
"""
self.assertIsNone(auth.authenticate(password=password, username=username))
def assert_pipeline_running(self, request):
"""Makes sure the given request is running an auth pipeline."""
self.assertTrue(pipeline.running(request))
def assert_redirect_to_dashboard_looks_correct(self, response):
"""Asserts a response would redirect to /dashboard."""
self.assertEqual(302, response.status_code)
# pylint: disable=protected-access
self.assertEqual(auth_settings._SOCIAL_AUTH_LOGIN_REDIRECT_URL, response.get('Location'))
def assert_redirect_to_login_looks_correct(self, response):
"""Asserts a response would redirect to /login."""
self.assertEqual(302, response.status_code)
self.assertEqual('/login', response.get('Location'))
def assert_redirect_to_register_looks_correct(self, response):
"""Asserts a response would redirect to /register."""
self.assertEqual(302, response.status_code)
self.assertEqual('/register', response.get('Location'))
def assert_register_response_before_pipeline_looks_correct(self, response):
"""Asserts a GET of /register not in the pipeline looks correct."""
self.assertEqual(200, response.status_code)
# The combined login/registration page dynamically generates the register button,
# but we can still check that the provider name is passed in the data attribute
# for the container element.
self.assertIn(self.provider.name, response.content)
def assert_social_auth_does_not_exist_for_user(self, user, strategy):
"""Asserts a user does not have an auth with the expected provider."""
social_auths = strategy.storage.user.get_social_auth_for_user(
user, provider=self.provider.backend_name)
self.assertEqual(0, len(social_auths))
def assert_social_auth_exists_for_user(self, user, strategy):
"""Asserts a user has a social auth with the expected provider."""
social_auths = strategy.storage.user.get_social_auth_for_user(
user, provider=self.provider.backend_name)
self.assertEqual(1, len(social_auths))
self.assertEqual(self.backend_name, social_auths[0].provider)
def create_user_models_for_existing_account(self, strategy, email, password, username, skip_social_auth=False):
"""Creates user, profile, registration, and (usually) social auth.
This synthesizes what happens during /register.
See student.views.register and student.views._do_create_account.
"""
response_data = self.get_response_data()
uid = strategy.request.backend.get_user_id(response_data, response_data)
user = social_utils.Storage.user.create_user(email=email, password=password, username=username)
profile = student_models.UserProfile(user=user)
profile.save()
registration = student_models.Registration()
registration.register(user)
registration.save()
if not skip_social_auth:
social_utils.Storage.user.create_social_auth(user, uid, self.provider.backend_name)
return user
def fake_auth_complete(self, strategy):
"""Fake implementation of social.backends.BaseAuth.auth_complete.
Unlike what the docs say, it does not need to return a user instance.
Sometimes (like when directing users to the /register form) it instead
returns a response that 302s to /register.
"""
args = ()
kwargs = {
'request': strategy.request,
'backend': strategy.request.backend,
'user': None,
'response': self.get_response_data(),
}
return strategy.authenticate(*args, **kwargs)
def get_registration_post_vars(self, overrides=None):
"""POST vars generated by the registration form."""
defaults = {
'username': 'username',
'name': 'First Last',
'gender': '',
'year_of_birth': '',
'level_of_education': '',
'goals': '',
'honor_code': 'true',
'terms_of_service': 'true',
'password': 'password',
'mailing_address': '',
'email': '[email protected]',
}
if overrides:
defaults.update(overrides)
return defaults
def get_request_and_strategy(self, auth_entry=None, redirect_uri=None):
"""Gets a fully-configured request and strategy.
These two objects contain circular references, so we create them
together. The references themselves are a mixture of normal __init__
stuff and monkey-patching done by python-social-auth. See, for example,
social.apps.django_apps.utils.strategy().
"""
request = self.request_factory.get(
pipeline.get_complete_url(self.backend_name) +
'?redirect_state=redirect_state_value&code=code_value&state=state_value')
request.user = auth_models.AnonymousUser()
request.session = cache.SessionStore()
request.session[self.backend_name + '_state'] = 'state_value'
if auth_entry:
request.session[pipeline.AUTH_ENTRY_KEY] = auth_entry
strategy = social_utils.load_strategy(request=request)
request.social_strategy = strategy
request.backend = social_utils.load_backend(strategy, self.backend_name, redirect_uri)
return request, strategy
def get_user_by_email(self, strategy, email):
"""Gets a user by email, using the given strategy."""
return strategy.storage.user.user_model().objects.get(email=email)
def assert_logged_in_cookie_redirect(self, response):
"""Verify that the user was redirected in order to set the logged in cookie. """
self.assertEqual(response.status_code, 302)
self.assertEqual(
response["Location"],
pipeline.get_complete_url(self.provider.backend_name)
)
self.assertEqual(response.cookies[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME].value, 'true')
self.assertIn(django_settings.EDXMKTG_USER_INFO_COOKIE_NAME, response.cookies)
def set_logged_in_cookies(self, request):
"""Simulate setting the marketing site cookie on the request. """
request.COOKIES[django_settings.EDXMKTG_LOGGED_IN_COOKIE_NAME] = 'true'
request.COOKIES[django_settings.EDXMKTG_USER_INFO_COOKIE_NAME] = json.dumps({
'version': django_settings.EDXMKTG_USER_INFO_COOKIE_VERSION,
})
# Actual tests, executed once per child.
def test_canceling_authentication_redirects_to_login_when_auth_entry_login(self):
self.assert_exception_redirect_looks_correct('/login', auth_entry=pipeline.AUTH_ENTRY_LOGIN)
def test_canceling_authentication_redirects_to_register_when_auth_entry_register(self):
self.assert_exception_redirect_looks_correct('/register', auth_entry=pipeline.AUTH_ENTRY_REGISTER)
def test_canceling_authentication_redirects_to_account_settings_when_auth_entry_account_settings(self):
self.assert_exception_redirect_looks_correct(
'/account/settings', auth_entry=pipeline.AUTH_ENTRY_ACCOUNT_SETTINGS
)
def test_canceling_authentication_redirects_to_root_when_auth_entry_not_set(self):
self.assert_exception_redirect_looks_correct('/')
def test_full_pipeline_succeeds_for_linking_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
pipeline.analytics.track = mock.MagicMock()
request.user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username(), skip_social_auth=True)
# Instrument the pipeline to get to the dashboard with the full
# expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
mako_middleware_process_request(strategy.request)
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
# First we expect that we're in the unlinked state, and that there
# really is no association in the backend.
self.assert_account_settings_context_looks_correct(account_settings_context(request), request.user, linked=False)
self.assert_social_auth_does_not_exist_for_user(request.user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
# Fire off the auth pipeline to link.
self.assert_redirect_to_dashboard_looks_correct(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME))
# Now we expect to be in the linked state, with a backend entry.
self.assert_social_auth_exists_for_user(request.user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), request.user, linked=True)
def test_full_pipeline_succeeds_for_unlinking_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
# We're already logged in, so simulate that the cookie is set correctly
self.set_logged_in_cookies(request)
# Instrument the pipeline to get to the dashboard with the full
# expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
mako_middleware_process_request(strategy.request)
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access
# First we expect that we're in the linked state, with a backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), user, linked=True)
self.assert_social_auth_exists_for_user(request.user, strategy)
# Fire off the disconnect pipeline to unlink.
self.assert_redirect_to_dashboard_looks_correct(actions.do_disconnect(
request.backend, request.user, None, redirect_field_name=auth.REDIRECT_FIELD_NAME))
# Now we expect to be in the unlinked state, with no backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), user, linked=False)
self.assert_social_auth_does_not_exist_for_user(user, strategy)
def test_linking_already_associated_account_raises_auth_already_associated(self):
# This is of a piece with
# test_already_associated_exception_populates_dashboard_with_error. It
# verifies the exception gets raised when we expect; the latter test
# covers exception handling.
email = '[email protected]'
password = 'password'
username = self.get_username()
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
linked_user = self.create_user_models_for_existing_account(strategy, email, password, username)
unlinked_user = social_utils.Storage.user.create_user(
email='other_' + email, password=password, username='other_' + username)
self.assert_social_auth_exists_for_user(linked_user, strategy)
self.assert_social_auth_does_not_exist_for_user(unlinked_user, strategy)
with self.assertRaises(exceptions.AuthAlreadyAssociated):
# pylint: disable=protected-access
actions.do_complete(backend, social_views._do_login, user=unlinked_user)
def test_already_associated_exception_populates_dashboard_with_error(self):
# Instrument the pipeline with an exception. We test that the
# exception is raised correctly separately, so it's ok that we're
# raising it artificially here. This makes the linked=True artificial
# in the final assert because in practice the account would be
# unlinked, but getting that behavior is cumbersome here and already
# covered in other tests. Using linked=True does, however, let us test
# that the duplicate error has no effect on the state of the controls.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
self.client.get('/login')
self.client.get(pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login) # pylint: disable=protected-access
mako_middleware_process_request(strategy.request)
student_views.signin_user(strategy.request)
student_views.login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user) # pylint: disable=protected-access
# Monkey-patch storage for messaging; pylint: disable=protected-access
request._messages = fallback.FallbackStorage(request)
middleware.ExceptionMiddleware().process_exception(
request,
exceptions.AuthAlreadyAssociated(self.provider.backend_name, 'account is already in use.'))
self.assert_account_settings_context_looks_correct(
account_settings_context(request), user, duplicate=True, linked=True)
def test_full_pipeline_succeeds_for_signing_in_to_existing_active_account(self):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
pipeline.analytics.track = mock.MagicMock()
user = self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
self.assertTrue(user.is_active)
# Begin! Ensure that the login form contains expected controls before
# the user starts the pipeline.
self.assert_login_response_before_pipeline_looks_correct(self.client.get('/login'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>
# to resume the pipeline.
# pylint: disable=protected-access
self.assert_redirect_to_login_looks_correct(actions.do_complete(request.backend, social_views._do_login))
mako_middleware_process_request(strategy.request)
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the login form and posts it via JS.
self.assert_login_response_in_pipeline_looks_correct(student_views.signin_user(strategy.request))
# Next, we invoke the view that handles the POST, and expect it
# redirects to /auth/complete. In the browser ajax handlers will
# redirect the user to the dashboard; we invoke it manually here.
self.assert_json_success_response_looks_correct(student_views.login_user(strategy.request))
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(request.backend, social_views._do_login, user=user))
self.assert_account_settings_context_looks_correct(account_settings_context(request), user)
def test_signin_fails_if_account_not_active(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(strategy, '[email protected]', 'password', self.get_username())
user.is_active = False
user.save()
mako_middleware_process_request(strategy.request)
self.assert_json_failure_response_is_inactive_account(student_views.login_user(strategy.request))
def test_signin_fails_if_no_account_associated(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, '[email protected]', 'password', self.get_username(), skip_social_auth=True)
self.assert_json_failure_response_is_missing_social_auth(student_views.login_user(strategy.request))
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_email_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(email='[email protected]')
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_password_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(password='password')
def test_first_party_auth_trumps_third_party_auth_and_fails_when_credentials_bad(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='[email protected]', password='password', success=False)
def test_first_party_auth_trumps_third_party_auth_and_succeeds_when_credentials_good(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='[email protected]', password='password', success=True)
def test_full_pipeline_succeeds_registering_new_account(self):
# First, create, the request and strategy that store pipeline state.
# Mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# Begin! Grab the registration page and check the login control on it.
self.assert_register_response_before_pipeline_looks_correct(self.client.get('/register'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>.
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(request.backend, social_views._do_login))
mako_middleware_process_request(strategy.request)
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the registration form.
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request), pipeline.get(request)['kwargs'])
# Next, we invoke the view that handles the POST. Not all providers
# supply email. Manually add it as the user would have to; this
# also serves as a test of overriding provider values. Always provide a
# password for us to check that we override it properly.
overridden_password = strategy.request.POST.get('password')
email = '[email protected]'
if not strategy.request.POST.get('email'):
strategy.request.POST = self.get_registration_post_vars({'email': email})
# The user must not exist yet...
with self.assertRaises(auth_models.User.DoesNotExist):
self.get_user_by_email(strategy, email)
# ...but when we invoke create_account the existing edX view will make
# it, but not social auths. The pipeline creates those later.
self.assert_json_success_response_looks_correct(student_views.create_account(strategy.request))
# We've overridden the user's password, so authenticate() with the old
# value won't work:
created_user = self.get_user_by_email(strategy, email)
self.assert_password_overridden_by_pipeline(overridden_password, created_user.username)
# At this point the user object exists, but there is no associated
# social auth.
self.assert_social_auth_does_not_exist_for_user(created_user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(strategy.request.backend, social_views._do_login, user=created_user))
# Now the user has been redirected to the dashboard. Their third party account should now be linked.
self.assert_social_auth_exists_for_user(created_user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), created_user, linked=True)
def test_new_account_registration_assigns_distinct_username_on_collision(self):
original_username = self.get_username()
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
# Create a colliding username in the backend, then proceed with
# assignment via pipeline to make sure a distinct username is created.
strategy.storage.user.create_user(username=self.get_username(), email='[email protected]', password='password')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
distinct_username = pipeline.get(request)['kwargs']['username']
self.assertNotEqual(original_username, distinct_username)
def test_new_account_registration_fails_if_email_exists(self):
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
mako_middleware_process_request(strategy.request)
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request), pipeline.get(request)['kwargs'])
strategy.request.POST = self.get_registration_post_vars()
# Create twice: once successfully, and once causing a collision.
student_views.create_account(strategy.request)
self.assert_json_failure_response_is_username_collision(student_views.create_account(strategy.request))
def test_pipeline_raises_auth_entry_error_if_auth_entry_invalid(self):
auth_entry = 'invalid'
self.assertNotIn(auth_entry, pipeline._AUTH_ENTRY_CHOICES) # pylint: disable=protected-access
_, strategy = self.get_request_and_strategy(auth_entry=auth_entry, redirect_uri='social:complete')
with self.assertRaises(pipeline.AuthEntryError):
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
def test_pipeline_raises_auth_entry_error_if_auth_entry_missing(self):
_, strategy = self.get_request_and_strategy(auth_entry=None, redirect_uri='social:complete')
with self.assertRaises(pipeline.AuthEntryError):
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
class Oauth2IntegrationTest(IntegrationTest): # pylint: disable=abstract-method
"""Base test case for integration tests of Oauth2 providers."""
# Dict of string -> object. Information about the token granted to the
# user. Override with test values in subclass; None to force a throw.
TOKEN_RESPONSE_DATA = None
# Dict of string -> object. Information about the user themself. Override
# with test values in subclass; None to force a throw.
USER_RESPONSE_DATA = None
def get_response_data(self):
"""Gets dict (string -> object) of merged data about the user."""
response_data = dict(self.TOKEN_RESPONSE_DATA)
response_data.update(self.USER_RESPONSE_DATA)
return response_data
|
agpl-3.0
|
bratsche/Neutron-Drive
|
google_appengine/lib/webob_1_1_1/docs/wiki-example-code/example.py
|
6
|
5829
|
import os
import re
from webob import Request, Response
from webob import exc
from tempita import HTMLTemplate
VIEW_TEMPLATE = HTMLTemplate("""\
<html>
<head>
<title>{{page.title}}</title>
</head>
<body>
<h1>{{page.title}}</h1>
{{if message}}
<div style="background-color: #99f">{{message}}</div>
{{endif}}
<div>{{page.content|html}}</div>
<hr>
<a href="{{req.url}}?action=edit">Edit</a>
</body>
</html>
""")
EDIT_TEMPLATE = HTMLTemplate("""\
<html>
<head>
<title>Edit: {{page.title}}</title>
</head>
<body>
{{if page.exists}}
<h1>Edit: {{page.title}}</h1>
{{else}}
<h1>Create: {{page.title}}</h1>
{{endif}}
<form action="{{req.path_url}}" method="POST">
<input type="hidden" name="mtime" value="{{page.mtime}}">
Title: <input type="text" name="title" style="width: 70%" value="{{page.title}}"><br>
Content: <input type="submit" value="Save">
<a href="{{req.path_url}}">Cancel</a>
<br>
<textarea name="content" style="width: 100%; height: 75%" rows="40">{{page.content}}</textarea>
<br>
<input type="submit" value="Save">
<a href="{{req.path_url}}">Cancel</a>
</form>
</body></html>
""")
class WikiApp(object):
view_template = VIEW_TEMPLATE
edit_template = EDIT_TEMPLATE
def __init__(self, storage_dir):
self.storage_dir = os.path.abspath(os.path.normpath(storage_dir))
def __call__(self, environ, start_response):
req = Request(environ)
action = req.params.get('action', 'view')
page = self.get_page(req.path_info)
try:
try:
meth = getattr(self, 'action_%s_%s' % (action, req.method))
except AttributeError:
raise exc.HTTPBadRequest('No such action %r' % action)
resp = meth(req, page)
except exc.HTTPException, e:
resp = e
return resp(environ, start_response)
def get_page(self, path):
path = path.lstrip('/')
if not path:
path = 'index'
path = os.path.join(self.storage_dir, path)
path = os.path.normpath(path)
if path.endswith('/'):
path += 'index'
if not path.startswith(self.storage_dir):
raise exc.HTTPBadRequest("Bad path")
path += '.html'
return Page(path)
def action_view_GET(self, req, page):
if not page.exists:
return exc.HTTPTemporaryRedirect(
location=req.url + '?action=edit')
if req.cookies.get('message'):
message = req.cookies['message']
else:
message = None
text = self.view_template.substitute(
page=page, req=req, message=message)
resp = Response(text)
if message:
resp.delete_cookie('message')
else:
resp.last_modified = page.mtime
resp.conditional_response = True
return resp
def action_view_POST(self, req, page):
submit_mtime = int(req.params.get('mtime') or '0') or None
if page.mtime != submit_mtime:
return exc.HTTPPreconditionFailed(
"The page has been updated since you started editing it")
page.set(
title=req.params['title'],
content=req.params['content'])
resp = exc.HTTPSeeOther(
location=req.path_url)
resp.set_cookie('message', 'Page updated')
return resp
def action_edit_GET(self, req, page):
text = self.edit_template.substitute(
page=page, req=req)
return Response(text)
class Page(object):
def __init__(self, filename):
self.filename = filename
@property
def exists(self):
return os.path.exists(self.filename)
@property
def title(self):
if not self.exists:
# we need to guess the title
basename = os.path.splitext(os.path.basename(self.filename))[0]
basename = re.sub(r'[_-]', ' ', basename)
return basename.capitalize()
content = self.full_content
match = re.search(r'<title>(.*?)</title>', content, re.I|re.S)
return match.group(1)
@property
def full_content(self):
f = open(self.filename, 'rb')
try:
return f.read()
finally:
f.close()
@property
def content(self):
if not self.exists:
return ''
content = self.full_content
match = re.search(r'<body[^>]*>(.*?)</body>', content, re.I|re.S)
return match.group(1)
@property
def mtime(self):
if not self.exists:
return None
else:
return os.stat(self.filename).st_mtime
def set(self, title, content):
dir = os.path.dirname(self.filename)
if not os.path.exists(dir):
os.makedirs(dir)
new_content = """<html><head><title>%s</title></head><body>%s</body></html>""" % (
title, content)
f = open(self.filename, 'wb')
f.write(new_content)
f.close()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage='%prog --port=PORT'
)
parser.add_option(
'-p', '--port',
default='8080',
dest='port',
type='int',
help='Port to serve on (default 8080)')
parser.add_option(
'--wiki-data',
default='./wiki',
dest='wiki_data',
help='Place to put wiki data into (default ./wiki/)')
options, args = parser.parse_args()
print 'Writing wiki pages to %s' % options.wiki_data
app = WikiApp(options.wiki_data)
from wsgiref.simple_server import make_server
httpd = make_server('localhost', options.port, app)
print 'Serving on http://localhost:%s' % options.port
try:
httpd.serve_forever()
except KeyboardInterrupt:
print '^C'
|
bsd-3-clause
|
MBoustani/climate
|
ocw/data_source/esgf.py
|
1
|
5050
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import urllib2
from ocw.esgf.constants import DEFAULT_ESGF_SEARCH
from ocw.esgf.download import download
from ocw.esgf.logon import logon
from ocw.esgf.search import SearchClient
import ocw.data_source.local as local
from bs4 import BeautifulSoup
import requests
def load_dataset(dataset_id,
variable,
esgf_username,
esgf_password,
search_url=DEFAULT_ESGF_SEARCH,
elevation_index=0,
name='',
save_path='/tmp',
**additional_constraints):
''' Load an ESGF dataset.
:param dataset_id: The ESGF ID of the dataset to load.
:type dataset_id: :mod:`string`
:param variable: The variable to load.
:type variable: :mod:`string`
:param esgf_username: ESGF OpenID value to use for authentication.
:type esgf_username: :mod:`string`
:param esgf_password: ESGF Password to use for authentication.
:type esgf_password: :mod:`string`
:param search_url: (Optional) The ESGF node to use for searching. Defaults
to the Jet Propulsion Laboratory node.
:type search_url: :mod:`string`
:param elevation_index: (Optional) The elevation level to strip out when
loading the dataset using ocw.data_source.local.
:type elevation_index: :class:`int`
:param name: (Optional) A name for the loaded dataset.
:type name: :mod:`string`
:param save_path: (Optional) Path to where downloaded files should be saved.
:type save_path: :mod:`string`
:param additional_constraints: (Optional) Additional key,value pairs to
pass as constraints to the search wrapper. These can be anything found
on the ESGF metadata page for a dataset.
:returns: A :class:`list` of :class:`dataset.Dataset` contained the
requested dataset. If the dataset is stored in multiple files each will
be loaded into a separate :class:`dataset.Dataset`.
:raises ValueError: If no dataset can be found for the supplied ID and
variable, or if the requested dataset is a multi-file dataset.
'''
download_data = _get_file_download_data(url=search_url,
dataset_id=dataset_id,
variable=variable)
datasets = []
for url, var in download_data:
_download_files([url],
esgf_username,
esgf_password,
download_directory=save_path)
file_save_path = os.path.join(save_path, url.split('/')[-1])
datasets.append(local.load_file(file_save_path,
var,
name=name,
elevation_index=elevation_index))
origin = {
'source': 'esgf',
'dataset_id': dataset_id,
'variable': variable
}
for ds in datasets:
ds.origin = origin
return datasets
def _get_file_download_data(dataset_id, variable, url=DEFAULT_ESGF_SEARCH):
''''''
url += '?type=File&dataset_id={}&variable={}'
url = url.format(dataset_id, variable)
r = requests.get(url)
xml = BeautifulSoup(r.content, "html.parser")
dont_have_results = not bool(xml.response.result['numfound'])
if dont_have_results:
err = "esgf.load_dataset: No files found for specified dataset."
raise ValueError(err)
# Split out URLs for dataset download along with variable names for each
# of those files.
url_groups = xml.response.result.findAll('arr', {'name': 'url'})
variable_groups = xml.response.result.findAll('arr', {'name': 'variable'})
urls = [group.findAll('str')[0].string.split('|')[0]
for group in url_groups]
variables = [group.findAll('str')[0].string
for group in variable_groups]
return zip(urls, variables)
def _download_files(file_urls, username, password, download_directory='/tmp'):
''''''
try:
logon(username, password)
except urllib2.HTTPError:
raise ValueError('esgf._download_files: Invalid login credentials')
for url in file_urls:
download(url, toDirectory=download_directory)
|
apache-2.0
|
peguin40/zulip
|
zerver/logging_handlers.py
|
12
|
3082
|
from __future__ import absolute_import
from django.conf import settings
import logging
import traceback
import platform
from django.core import mail
from django.http import HttpRequest
from django.utils.log import AdminEmailHandler
from django.views.debug import ExceptionReporter, get_exception_reporter_filter
from zerver.lib.queue import queue_json_publish
class AdminZulipHandler(logging.Handler):
"""An exception log handler that sends the exception to the queue to be
sent to the Zulip feedback server.
"""
# adapted in part from django/utils/log.py
def __init__(self):
# type: () -> None
logging.Handler.__init__(self)
def emit(self, record):
# type: (ExceptionReporter) -> None
try:
request = record.request # type: HttpRequest
filter = get_exception_reporter_filter(request)
if record.exc_info:
stack_trace = ''.join(traceback.format_exception(*record.exc_info))
else:
stack_trace = None
try:
user_profile = request.user
user_full_name = user_profile.full_name
user_email = user_profile.email
except Exception:
traceback.print_exc()
# Error was triggered by an anonymous user.
user_full_name = None
user_email = None
report = dict(
node = platform.node(),
method = request.method,
path = request.path,
data = request.GET if request.method == 'GET'
else
(filter.get_post_parameters(request)),
remote_addr = request.META.get('REMOTE_ADDR', None),
query_string = request.META.get('QUERY_STRING', None),
server_name = request.META.get('SERVER_NAME', None),
message = record.getMessage(),
stack_trace = stack_trace,
user_full_name = user_full_name,
user_email = user_email,
)
except:
traceback.print_exc()
report = dict(
node = platform.node(),
message = record.getMessage(),
)
try:
if settings.STAGING_ERROR_NOTIFICATIONS:
# On staging, process the report directly so it can happen inside this
# try/except to prevent looping
from zilencer.error_notify import notify_server_error
notify_server_error(report)
else:
queue_json_publish('error_reports', dict(
type = "server",
report = report,
), lambda x: None)
except:
# If this breaks, complain loudly but don't pass the traceback up the stream
# However, we *don't* want to use logging.exception since that could trigger a loop.
logging.warning("Reporting an exception triggered an exception!", exc_info=True)
|
apache-2.0
|
Changaco/oh-mainline
|
vendor/packages/Django/django/test/simple.py
|
78
|
13939
|
import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(doctest.DocTestSuite(
test_module, checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on it's own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (unittest.TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = (
connection.settings_dict['TEST_MIRROR'])
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
|
agpl-3.0
|
ptsneves/ardupilot
|
Tools/ardupilotwaf/gtest.py
|
52
|
1267
|
#!/usr/bin/env python
# encoding: utf-8
"""
gtest is a Waf tool for test builds in Ardupilot
"""
from waflib import Utils
from waflib.Configure import conf
import boards
def configure(cfg):
cfg.env.HAS_GTEST = False
if cfg.options.disable_tests:
return
board = cfg.get_board()
if isinstance(board, boards.px4):
# toolchain is currently broken for gtest
cfg.msg(
'Gtest',
'PX4 boards currently don\'t support compiling gtest',
color='YELLOW',
)
return
if cfg.env.STATIC_LINKING:
# gtest uses a function (getaddrinfo) that is supposed to be linked
# dynamically
cfg.msg(
'Gtest',
'statically linked tests not supported',
color='YELLOW',
)
return
cfg.env.append_value('GIT_SUBMODULES', 'gtest')
cfg.env.HAS_GTEST = True
@conf
def libgtest(bld, **kw):
kw['cxxflags'] = Utils.to_list(kw.get('cxxflags', [])) + ['-Wno-undef']
kw.update(
source='modules/gtest/src/gtest-all.cc',
target='gtest/gtest',
includes='modules/gtest/ modules/gtest/include',
export_includes='modules/gtest/include',
name='GTEST',
)
return bld.stlib(**kw)
|
gpl-3.0
|
NickRuiz/wikitrans-pootle
|
strings.py
|
5
|
1461
|
# These are some strings that we need for successful extraction. They come from
# Django and is not included in our POT file otherwise. This file itself is not
# used for a running Pootle.
# Don't change any of these strings unless they changed in Django. The adding
# of extra comments to help translators is fine.
_('Hold down "Control", or "Command" on a Mac, to select more than one.')
_("Enter a valid e-mail address.")
_("That e-mail address doesn't have an associated user account. Are you sure you've registered?")
_("This field is required.")
_("Ensure this value has at most %(max)d characters (it has %(length)d).")
_("Save and add another")
_("Save and continue editing")
_("Add")
_("Change")
# l10n: This is a constructed string that might end up saying "Add language" or "Add project". If you need to translate that differently, you might want to try to translate it as "Add an entry to the %(name)s table" or "Add an object of type "%(name)s".
_("Add %(name)s")
# l10n: This link takes the user to the page where the currently configured option is displayed (such as a project or language)
_("View on site")
_("first name")
_("last name")
_("e-mail address")
_("active")
# l10n: Please translate this simply as "Administrator". This string is needed by Django, but 'superuser' is very technical and should be avoided.
_("superuser status")
# l10n: The language from which translation will happen
_("Source Language")
_("Android strings")
|
gpl-2.0
|
Embisto/lightblue-0.4
|
src/series60/_obex.py
|
47
|
4585
|
# Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
import socket as _socket
import os
import types
import _lightbluecommon
from _obexcommon import OBEXError
# public attributes
__all__ = ("sendfile", "recvfile")
def sendfile(address, channel, source):
if not isinstance(source, (types.StringTypes, types.FileType)):
raise TypeError("source must be string or built-in file object")
if isinstance(source, types.StringTypes):
try:
_socket.bt_obex_send_file(address, channel, unicode(source))
except Exception, e:
raise OBEXError(str(e))
else:
# given file object
if hasattr(source, "name"):
localpath = _tempfilename(source.name)
else:
localpath = _tempfilename()
try:
# write the source file object's data into a file, then send it
f = file(localpath, "wb")
f.write(source.read())
f.close()
try:
_socket.bt_obex_send_file(address, channel, unicode(localpath))
except Exception, e:
raise OBEXError(str(e))
finally:
# remove temporary file
if os.path.isfile(localpath):
try:
os.remove(localpath)
except Exception, e:
print "[lightblue.obex] unable to remove temporary file %s: %s" %\
(localpath, str(e))
def recvfile(sock, dest):
if not isinstance(dest, (types.StringTypes, types.FileType)):
raise TypeError("dest must be string or built-in file object")
if isinstance(dest, types.StringTypes):
_recvfile(sock, dest)
else:
# given file object
localpath = _tempfilename()
try:
# receive a file and then read it into the file object
_recvfile(sock, localpath)
recvdfile = file(localpath, "rb")
dest.write(recvdfile.read())
recvdfile.close()
finally:
# remove temporary file
if os.path.isfile(localpath):
try:
os.remove(localpath)
except Exception, e:
print "[lightblue.obex] unable to remove temporary file %s: %s" %\
(localpath, str(e))
# receives file and saves to local path
def _recvfile(sock, localpath):
# PyS60's bt_obex_receive() won't receive the file if given a file path
# that already exists (it tells the client there's a conflict error). So
# we need to handle this somehow, and preferably backup the original file
# so that we can put it back if the recv fails.
if os.path.isfile(localpath):
# if given an existing path, rename existing file
temppath = _tempfilename(localpath)
os.rename(localpath, temppath)
else:
temppath = None
try:
# receive a file (get internal _sock cos sock is our own SocketWrapper
# object)
_socket.bt_obex_receive(sock._sock, unicode(localpath))
except _socket.error, e:
try:
if temppath is not None:
# recv failed, put original file back
os.rename(temppath, localpath)
finally:
# if the renaming of the original file fails, this will still
# get raised
raise OBEXError(str(e))
else:
# recv successful, remove the original file
if temppath is not None:
os.remove(temppath)
# Must point to C:\ because can't write in start-up dir (on Z:?)
def _tempfilename(basename="C:\\lightblue_obex_received_file"):
version = 1
while os.path.isfile(basename):
version += 1
basename = basename[:-1] + str(version)
return basename
|
gpl-3.0
|
kyvinh/home-assistant
|
homeassistant/components/light/mqtt_template.py
|
3
|
10024
|
"""
Support for MQTT Template lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt_template/
"""
import logging
import voluptuous as vol
import homeassistant.components.mqtt as mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_EFFECT, ATTR_FLASH, ATTR_RGB_COLOR, ATTR_TRANSITION,
PLATFORM_SCHEMA, SUPPORT_BRIGHTNESS, SUPPORT_EFFECT, SUPPORT_FLASH,
SUPPORT_RGB_COLOR, SUPPORT_TRANSITION, Light)
from homeassistant.const import CONF_NAME, CONF_OPTIMISTIC, STATE_ON, STATE_OFF
from homeassistant.components.mqtt import (
CONF_STATE_TOPIC, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt_template'
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT Template Light'
DEFAULT_OPTIMISTIC = False
CONF_EFFECT_LIST = "effect_list"
CONF_COMMAND_ON_TEMPLATE = 'command_on_template'
CONF_COMMAND_OFF_TEMPLATE = 'command_off_template'
CONF_STATE_TEMPLATE = 'state_template'
CONF_BRIGHTNESS_TEMPLATE = 'brightness_template'
CONF_RED_TEMPLATE = 'red_template'
CONF_GREEN_TEMPLATE = 'green_template'
CONF_BLUE_TEMPLATE = 'blue_template'
CONF_EFFECT_TEMPLATE = 'effect_template'
SUPPORT_MQTT_TEMPLATE = (SUPPORT_BRIGHTNESS | SUPPORT_EFFECT | SUPPORT_FLASH |
SUPPORT_RGB_COLOR | SUPPORT_TRANSITION)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS):
vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup a MQTT Template light."""
add_devices([MqttTemplate(
hass,
config.get(CONF_NAME),
config.get(CONF_EFFECT_LIST),
{
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC
)
},
{
key: config.get(key) for key in (
CONF_COMMAND_ON_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_BLUE_TEMPLATE,
CONF_EFFECT_TEMPLATE
)
},
config.get(CONF_OPTIMISTIC),
config.get(CONF_QOS),
config.get(CONF_RETAIN)
)])
class MqttTemplate(Light):
"""Representation of a MQTT Template light."""
def __init__(self, hass, name, effect_list, topics, templates, optimistic,
qos, retain):
"""Initialize MQTT Template light."""
self._hass = hass
self._name = name
self._effect_list = effect_list
self._topics = topics
self._templates = templates
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = hass
self._optimistic = optimistic or topics[CONF_STATE_TOPIC] is None \
or templates[CONF_STATE_TEMPLATE] is None
self._qos = qos
self._retain = retain
# features
self._state = False
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
self._brightness = 255
else:
self._brightness = None
if (self._templates[CONF_RED_TEMPLATE] is not None and
self._templates[CONF_GREEN_TEMPLATE] is not None and
self._templates[CONF_BLUE_TEMPLATE] is not None):
self._rgb = [0, 0, 0]
else:
self._rgb = None
self._effect = None
def state_received(topic, payload, qos):
"""A new MQTT message has been received."""
# read state
state = self._templates[CONF_STATE_TEMPLATE].\
render_with_possible_json_value(payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning('Invalid state value received')
# read brightness
if self._brightness is not None:
try:
self._brightness = int(
self._templates[CONF_BRIGHTNESS_TEMPLATE].
render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning('Invalid brightness value received')
# read color
if self._rgb is not None:
try:
self._rgb[0] = int(
self._templates[CONF_RED_TEMPLATE].
render_with_possible_json_value(payload))
self._rgb[1] = int(
self._templates[CONF_GREEN_TEMPLATE].
render_with_possible_json_value(payload))
self._rgb[2] = int(
self._templates[CONF_BLUE_TEMPLATE].
render_with_possible_json_value(payload))
except ValueError:
_LOGGER.warning('Invalid color value received')
# read effect
if self._templates[CONF_EFFECT_TEMPLATE] is not None:
effect = self._templates[CONF_EFFECT_TEMPLATE].\
render_with_possible_json_value(payload)
# validate effect value
if effect in self._effect_list:
self._effect = effect
else:
_LOGGER.warning('Unsupported effect value received')
self.update_ha_state()
if self._topics[CONF_STATE_TOPIC] is not None:
mqtt.subscribe(self._hass, self._topics[CONF_STATE_TOPIC],
state_received, self._qos)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def rgb_color(self):
"""Return the RGB color value [int, int, int]."""
return self._rgb
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self):
"""Return the current effect."""
return self._effect
def turn_on(self, **kwargs):
"""Turn the entity on."""
# state
values = {'state': True}
if self._optimistic:
self._state = True
# brightness
if ATTR_BRIGHTNESS in kwargs:
values['brightness'] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
# color
if ATTR_RGB_COLOR in kwargs:
values['red'] = kwargs[ATTR_RGB_COLOR][0]
values['green'] = kwargs[ATTR_RGB_COLOR][1]
values['blue'] = kwargs[ATTR_RGB_COLOR][2]
if self._optimistic:
self._rgb = kwargs[ATTR_RGB_COLOR]
# effect
if ATTR_EFFECT in kwargs:
values['effect'] = kwargs.get(ATTR_EFFECT)
# flash
if ATTR_FLASH in kwargs:
values['flash'] = kwargs.get(ATTR_FLASH)
# transition
if ATTR_TRANSITION in kwargs:
values['transition'] = kwargs[ATTR_TRANSITION]
mqtt.publish(
self._hass, self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_ON_TEMPLATE].render(**values),
self._qos, self._retain
)
if self._optimistic:
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the entity off."""
# state
values = {'state': False}
if self._optimistic:
self._state = False
# transition
if ATTR_TRANSITION in kwargs:
values['transition'] = kwargs[ATTR_TRANSITION]
mqtt.publish(
self._hass, self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_OFF_TEMPLATE].render(**values),
self._qos, self._retain
)
if self._optimistic:
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
features = 0
if self._brightness is not None:
features = features | SUPPORT_BRIGHTNESS
if self._rgb is not None:
features = features | SUPPORT_RGB_COLOR
if self._effect_list is not None:
features = features | SUPPORT_EFFECT
return features
|
apache-2.0
|
inspirehep/invenio-accounts
|
invenio_accounts/alembic/e12419831262_add_new_columns_on_sessionactivity.py
|
3
|
1487
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Add new columns on SessionActivity."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'e12419831262'
down_revision = '9848d0149abd'
branch_labels = ()
depends_on = None
def upgrade():
"""Upgrade database."""
with op.batch_alter_table('accounts_user_session_activity') as batch_op:
batch_op.add_column(sa.Column('browser', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('browser_version', sa.String(30), nullable=True))
batch_op.add_column(
sa.Column('country', sa.String(3), nullable=True))
batch_op.add_column(
sa.Column('device', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('ip', sa.String(80), nullable=True))
batch_op.add_column(
sa.Column('os', sa.String(80), nullable=True))
def downgrade():
"""Downgrade database."""
with op.batch_alter_table('accounts_user_session_activity') as batch_op:
batch_op.drop_column('os')
batch_op.drop_column('ip')
batch_op.drop_column('device')
batch_op.drop_column('country')
batch_op.drop_column('browser_version')
batch_op.drop_column('browser')
|
mit
|
nacc/autotest
|
cli/acl.py
|
6
|
7654
|
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""
The acl module contains the objects and methods used to
manage ACLs in Autotest.
The valid actions are:
add: adds acl(s), or users or hosts to an ACL
remove: deletes acl(s), or users or hosts from an ACL
list: lists acl(s)
The common options are:
--alist / -A: file containing a list of ACLs
See topic_common.py for a High Level Design and Algorithm.
"""
import os, sys
from autotest.cli import topic_common, action_common
class acl(topic_common.atest):
"""ACL class
atest acl [create|delete|list|add|remove] <options>"""
usage_action = '[create|delete|list|add|remove]'
topic = 'acl_group'
msg_topic = 'ACL'
msg_items = '<acls>'
def __init__(self):
"""Add to the parser the options common to all the ACL actions"""
super(acl, self).__init__()
self.parser.add_option('-A', '--alist',
help='File listing the ACLs',
type='string',
default=None,
metavar='ACL_FLIST')
self.topic_parse_info = topic_common.item_parse_info(
attribute_name='acls',
filename_option='alist',
use_leftover=True)
def get_items(self):
return self.acls
class acl_help(acl):
"""Just here to get the atest logic working.
Usage is set by its parent"""
pass
class acl_list(action_common.atest_list, acl):
"""atest acl list [--verbose]
[--user <users>|--mach <machine>|--alist <file>] [<acls>]"""
def __init__(self):
super(acl_list, self).__init__()
self.parser.add_option('-u', '--user',
help='List ACLs containing USER',
type='string',
metavar='USER')
self.parser.add_option('-m', '--machine',
help='List ACLs containing MACHINE',
type='string',
metavar='MACHINE')
def parse(self):
user_info = topic_common.item_parse_info(attribute_name='users',
inline_option='user')
host_info = topic_common.item_parse_info(attribute_name='hosts',
inline_option='machine')
(options, leftover) = super(acl_list, self).parse([user_info,
host_info])
users = getattr(self, 'users')
hosts = getattr(self, 'hosts')
acls = getattr(self, 'acls')
if ((users and (hosts or acls)) or
(hosts and acls)):
self.invalid_syntax('Only specify one of --user,'
'--machine or ACL')
if len(users) > 1:
self.invalid_syntax('Only specify one <user>')
if len(hosts) > 1:
self.invalid_syntax('Only specify one <machine>')
try:
self.users = users[0]
except IndexError:
pass
try:
self.hosts = hosts[0]
except IndexError:
pass
return (options, leftover)
def execute(self):
filters = {}
check_results = {}
if self.acls:
filters['name__in'] = self.acls
check_results['name__in'] = 'name'
if self.users:
filters['users__login'] = self.users
check_results['users__login'] = None
if self.hosts:
filters['hosts__hostname'] = self.hosts
check_results['hosts__hostname'] = None
return super(acl_list,
self).execute(op='get_acl_groups',
filters=filters,
check_results=check_results)
def output(self, results):
# If an ACL was specified, always print its details
if self.acls or self.verbose:
sublist_keys = ('hosts', 'users')
else:
sublist_keys = ()
super(acl_list, self).output(results,
keys=('name', 'description'),
sublist_keys=sublist_keys)
class acl_create(action_common.atest_create, acl):
"""atest acl create <acl> --desc <description>"""
def __init__(self):
super(acl_create, self).__init__()
self.parser.add_option('-d', '--desc',
help='Creates the ACL with the DESCRIPTION',
type='string')
self.parser.remove_option('--alist')
def parse(self):
(options, leftover) = super(acl_create, self).parse(req_items='acls')
if not options.desc:
self.invalid_syntax('Must specify a description to create an ACL.')
self.data_item_key = 'name'
self.data['description'] = options.desc
if len(self.acls) > 1:
self.invalid_syntax('Can only create one ACL at a time')
return (options, leftover)
class acl_delete(action_common.atest_delete, acl):
"""atest acl delete [<acls> | --alist <file>"""
pass
class acl_add_or_remove(acl):
def __init__(self):
super(acl_add_or_remove, self).__init__()
# Get the appropriate help for adding or removing.
words = self.usage_words
lower_words = tuple(word.lower() for word in words)
self.parser.add_option('-u', '--user',
help='%s USER(s) %s the ACL' % words,
type='string',
metavar='USER')
self.parser.add_option('-U', '--ulist',
help='File containing users to %s %s '
'the ACL' % lower_words,
type='string',
metavar='USER_FLIST')
self.parser.add_option('-m', '--machine',
help='%s MACHINE(s) %s the ACL' % words,
type='string',
metavar='MACHINE')
self.parser.add_option('-M', '--mlist',
help='File containing machines to %s %s '
'the ACL' % lower_words,
type='string',
metavar='MACHINE_FLIST')
def parse(self):
user_info = topic_common.item_parse_info(attribute_name='users',
inline_option='user',
filename_option='ulist')
host_info = topic_common.item_parse_info(attribute_name='hosts',
inline_option='machine',
filename_option='mlist')
(options, leftover) = super(acl_add_or_remove,
self).parse([user_info, host_info],
req_items='acls')
if (not getattr(self, 'users', None) and
not getattr(self, 'hosts', None)):
self.invalid_syntax('Specify at least one USER or MACHINE')
return (options, leftover)
class acl_add(action_common.atest_add, acl_add_or_remove):
"""atest acl add <acl> --user <user>|
--machine <machine>|--mlist <FILE>]"""
pass
class acl_remove(action_common.atest_remove, acl_add_or_remove):
"""atest acl remove [<acls> | --alist <file>
--user <user> | --machine <machine> | --mlist <FILE>]"""
pass
|
gpl-2.0
|
schemaorg/schemaorg
|
software/SchemaTerms/example-code/protobufs/google/protobuf/internal/more_messages_pb2.py
|
4
|
253271
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/more_messages.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/more_messages.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n,google/protobuf/internal/more_messages.proto\x12\x18google.protobuf.internal\"h\n\x10OutOfOrderFields\x12\x17\n\x0foptional_sint32\x18\x05 \x01(\x11\x12\x17\n\x0foptional_uint32\x18\x03 \x01(\r\x12\x16\n\x0eoptional_int32\x18\x01 \x01(\x05*\x04\x08\x04\x10\x05*\x04\x08\x02\x10\x03\"\xc3\x02\n\x05\x63lass\x12\x11\n\tint_field\x18\x01 \x01(\x05\x12\n\n\x02if\x18\x02 \x01(\x05\x12(\n\x02\x61s\x18\x03 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12\x30\n\nenum_field\x18\x04 \x01(\x0e\x32\x1c.google.protobuf.internal.is\x12>\n\x11nested_enum_field\x18\x05 \x01(\x0e\x32#.google.protobuf.internal.class.for\x12;\n\x0enested_message\x18\x06 \x01(\x0b\x32#.google.protobuf.internal.class.try\x1a\x1c\n\x03try\x12\r\n\x05\x66ield\x18\x01 \x01(\x05*\x06\x08\xe7\x07\x10\x90N\"\x1c\n\x03\x66or\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04True\x10\x01*\x06\x08\xe7\x07\x10\x90N\"?\n\x0b\x45xtendClass20\n\x06return\x12\x1f.google.protobuf.internal.class\x18\xea\x07 \x01(\x05\"~\n\x0fTestFullKeyword\x12:\n\x06\x66ield1\x18\x01 \x01(\x0b\x32*.google.protobuf.internal.OutOfOrderFields\x12/\n\x06\x66ield2\x18\x02 \x01(\x0b\x32\x1f.google.protobuf.internal.class\"\xa5\x0f\n\x11LotsNestedMessage\x1a\x04\n\x02\x42\x30\x1a\x04\n\x02\x42\x31\x1a\x04\n\x02\x42\x32\x1a\x04\n\x02\x42\x33\x1a\x04\n\x02\x42\x34\x1a\x04\n\x02\x42\x35\x1a\x04\n\x02\x42\x36\x1a\x04\n\x02\x42\x37\x1a\x04\n\x02\x42\x38\x1a\x04\n\x02\x42\x39\x1a\x05\n\x03\x42\x31\x30\x1a\x05\n\x03\x42\x31\x31\x1a\x05\n\x03\x42\x31\x32\x1a\x05\n\x03\x42\x31\x33\x1a\x05\n\x03\x42\x31\x34\x1a\x05\n\x03\x42\x31\x35\x1a\x05\n\x03\x42\x31\x36\x1a\x05\n\x03\x42\x31\x37\x1a\x05\n\x03\x42\x31\x38\x1a\x05\n\x03\x42\x31\x39\x1a\x05\n\x03\x42\x32\x30\x1a\x05\n\x03\x42\x32\x31\x1a\x05\n\x03\x42\x32\x32\x1a\x05\n\x03\x42\x32\x33\x1a\x05\n\x03\x42\x32\x34\x1a\x05\n\x03\x42\x32\x35\x1a\x05\n\x03\x42\x32\x36\x1a\x05\n\x03\x42\x32\x37\x1a\x05\n\x03\x42\x32\x38\x1a\x05\n\x03\x42\x32\x39\x1a\x05\n\x03\x42\x33\x30\x1a\x05\n\x03\x42\x33\x31\x1a\x05\n\x03\x42\x33\x32\x1a\x05\n\x03\x42\x33\x33\x1a\x05\n\x03\x42\x33\x34\x1a\x05\n\x03\x42\x33\x35\x1a\x05\n\x03\x42\x33\x36\x1a\x05\n\x03\x42\x33\x37\x1a\x05\n\x03\x42\x33\x38\x1a\x05\n\x03\x42\x33\x39\x1a\x05\n\x03\x42\x34\x30\x1a\x05\n\x03\x42\x34\x31\x1a\x05\n\x03\x42\x34\x32\x1a\x05\n\x03\x42\x34\x33\x1a\x05\n\x03\x42\x34\x34\x1a\x05\n\x03\x42\x34\x35\x1a\x05\n\x03\x42\x34\x36\x1a\x05\n\x03\x42\x34\x37\x1a\x05\n\x03\x42\x34\x38\x1a\x05\n\x03\x42\x34\x39\x1a\x05\n\x03\x42\x35\x30\x1a\x05\n\x03\x42\x35\x31\x1a\x05\n\x03\x42\x35\x32\x1a\x05\n\x03\x42\x35\x33\x1a\x05\n\x03\x42\x35\x34\x1a\x05\n\x03\x42\x35\x35\x1a\x05\n\x03\x42\x35\x36\x1a\x05\n\x03\x42\x35\x37\x1a\x05\n\x03\x42\x35\x38\x1a\x05\n\x03\x42\x35\x39\x1a\x05\n\x03\x42\x36\x30\x1a\x05\n\x03\x42\x36\x31\x1a\x05\n\x03\x42\x36\x32\x1a\x05\n\x03\x42\x36\x33\x1a\x05\n\x03\x42\x36\x34\x1a\x05\n\x03\x42\x36\x35\x1a\x05\n\x03\x42\x36\x36\x1a\x05\n\x03\x42\x36\x37\x1a\x05\n\x03\x42\x36\x38\x1a\x05\n\x03\x42\x36\x39\x1a\x05\n\x03\x42\x37\x30\x1a\x05\n\x03\x42\x37\x31\x1a\x05\n\x03\x42\x37\x32\x1a\x05\n\x03\x42\x37\x33\x1a\x05\n\x03\x42\x37\x34\x1a\x05\n\x03\x42\x37\x35\x1a\x05\n\x03\x42\x37\x36\x1a\x05\n\x03\x42\x37\x37\x1a\x05\n\x03\x42\x37\x38\x1a\x05\n\x03\x42\x37\x39\x1a\x05\n\x03\x42\x38\x30\x1a\x05\n\x03\x42\x38\x31\x1a\x05\n\x03\x42\x38\x32\x1a\x05\n\x03\x42\x38\x33\x1a\x05\n\x03\x42\x38\x34\x1a\x05\n\x03\x42\x38\x35\x1a\x05\n\x03\x42\x38\x36\x1a\x05\n\x03\x42\x38\x37\x1a\x05\n\x03\x42\x38\x38\x1a\x05\n\x03\x42\x38\x39\x1a\x05\n\x03\x42\x39\x30\x1a\x05\n\x03\x42\x39\x31\x1a\x05\n\x03\x42\x39\x32\x1a\x05\n\x03\x42\x39\x33\x1a\x05\n\x03\x42\x39\x34\x1a\x05\n\x03\x42\x39\x35\x1a\x05\n\x03\x42\x39\x36\x1a\x05\n\x03\x42\x39\x37\x1a\x05\n\x03\x42\x39\x38\x1a\x05\n\x03\x42\x39\x39\x1a\x06\n\x04\x42\x31\x30\x30\x1a\x06\n\x04\x42\x31\x30\x31\x1a\x06\n\x04\x42\x31\x30\x32\x1a\x06\n\x04\x42\x31\x30\x33\x1a\x06\n\x04\x42\x31\x30\x34\x1a\x06\n\x04\x42\x31\x30\x35\x1a\x06\n\x04\x42\x31\x30\x36\x1a\x06\n\x04\x42\x31\x30\x37\x1a\x06\n\x04\x42\x31\x30\x38\x1a\x06\n\x04\x42\x31\x30\x39\x1a\x06\n\x04\x42\x31\x31\x30\x1a\x06\n\x04\x42\x31\x31\x31\x1a\x06\n\x04\x42\x31\x31\x32\x1a\x06\n\x04\x42\x31\x31\x33\x1a\x06\n\x04\x42\x31\x31\x34\x1a\x06\n\x04\x42\x31\x31\x35\x1a\x06\n\x04\x42\x31\x31\x36\x1a\x06\n\x04\x42\x31\x31\x37\x1a\x06\n\x04\x42\x31\x31\x38\x1a\x06\n\x04\x42\x31\x31\x39\x1a\x06\n\x04\x42\x31\x32\x30\x1a\x06\n\x04\x42\x31\x32\x31\x1a\x06\n\x04\x42\x31\x32\x32\x1a\x06\n\x04\x42\x31\x32\x33\x1a\x06\n\x04\x42\x31\x32\x34\x1a\x06\n\x04\x42\x31\x32\x35\x1a\x06\n\x04\x42\x31\x32\x36\x1a\x06\n\x04\x42\x31\x32\x37\x1a\x06\n\x04\x42\x31\x32\x38\x1a\x06\n\x04\x42\x31\x32\x39\x1a\x06\n\x04\x42\x31\x33\x30\x1a\x06\n\x04\x42\x31\x33\x31\x1a\x06\n\x04\x42\x31\x33\x32\x1a\x06\n\x04\x42\x31\x33\x33\x1a\x06\n\x04\x42\x31\x33\x34\x1a\x06\n\x04\x42\x31\x33\x35\x1a\x06\n\x04\x42\x31\x33\x36\x1a\x06\n\x04\x42\x31\x33\x37\x1a\x06\n\x04\x42\x31\x33\x38\x1a\x06\n\x04\x42\x31\x33\x39\x1a\x06\n\x04\x42\x31\x34\x30\x1a\x06\n\x04\x42\x31\x34\x31\x1a\x06\n\x04\x42\x31\x34\x32\x1a\x06\n\x04\x42\x31\x34\x33\x1a\x06\n\x04\x42\x31\x34\x34\x1a\x06\n\x04\x42\x31\x34\x35\x1a\x06\n\x04\x42\x31\x34\x36\x1a\x06\n\x04\x42\x31\x34\x37\x1a\x06\n\x04\x42\x31\x34\x38\x1a\x06\n\x04\x42\x31\x34\x39\x1a\x06\n\x04\x42\x31\x35\x30\x1a\x06\n\x04\x42\x31\x35\x31\x1a\x06\n\x04\x42\x31\x35\x32\x1a\x06\n\x04\x42\x31\x35\x33\x1a\x06\n\x04\x42\x31\x35\x34\x1a\x06\n\x04\x42\x31\x35\x35\x1a\x06\n\x04\x42\x31\x35\x36\x1a\x06\n\x04\x42\x31\x35\x37\x1a\x06\n\x04\x42\x31\x35\x38\x1a\x06\n\x04\x42\x31\x35\x39\x1a\x06\n\x04\x42\x31\x36\x30\x1a\x06\n\x04\x42\x31\x36\x31\x1a\x06\n\x04\x42\x31\x36\x32\x1a\x06\n\x04\x42\x31\x36\x33\x1a\x06\n\x04\x42\x31\x36\x34\x1a\x06\n\x04\x42\x31\x36\x35\x1a\x06\n\x04\x42\x31\x36\x36\x1a\x06\n\x04\x42\x31\x36\x37\x1a\x06\n\x04\x42\x31\x36\x38\x1a\x06\n\x04\x42\x31\x36\x39\x1a\x06\n\x04\x42\x31\x37\x30\x1a\x06\n\x04\x42\x31\x37\x31\x1a\x06\n\x04\x42\x31\x37\x32\x1a\x06\n\x04\x42\x31\x37\x33\x1a\x06\n\x04\x42\x31\x37\x34\x1a\x06\n\x04\x42\x31\x37\x35\x1a\x06\n\x04\x42\x31\x37\x36\x1a\x06\n\x04\x42\x31\x37\x37\x1a\x06\n\x04\x42\x31\x37\x38\x1a\x06\n\x04\x42\x31\x37\x39\x1a\x06\n\x04\x42\x31\x38\x30\x1a\x06\n\x04\x42\x31\x38\x31\x1a\x06\n\x04\x42\x31\x38\x32\x1a\x06\n\x04\x42\x31\x38\x33\x1a\x06\n\x04\x42\x31\x38\x34\x1a\x06\n\x04\x42\x31\x38\x35\x1a\x06\n\x04\x42\x31\x38\x36\x1a\x06\n\x04\x42\x31\x38\x37\x1a\x06\n\x04\x42\x31\x38\x38\x1a\x06\n\x04\x42\x31\x38\x39\x1a\x06\n\x04\x42\x31\x39\x30\x1a\x06\n\x04\x42\x31\x39\x31\x1a\x06\n\x04\x42\x31\x39\x32\x1a\x06\n\x04\x42\x31\x39\x33\x1a\x06\n\x04\x42\x31\x39\x34\x1a\x06\n\x04\x42\x31\x39\x35\x1a\x06\n\x04\x42\x31\x39\x36\x1a\x06\n\x04\x42\x31\x39\x37\x1a\x06\n\x04\x42\x31\x39\x38\x1a\x06\n\x04\x42\x31\x39\x39\x1a\x06\n\x04\x42\x32\x30\x30\x1a\x06\n\x04\x42\x32\x30\x31\x1a\x06\n\x04\x42\x32\x30\x32\x1a\x06\n\x04\x42\x32\x30\x33\x1a\x06\n\x04\x42\x32\x30\x34\x1a\x06\n\x04\x42\x32\x30\x35\x1a\x06\n\x04\x42\x32\x30\x36\x1a\x06\n\x04\x42\x32\x30\x37\x1a\x06\n\x04\x42\x32\x30\x38\x1a\x06\n\x04\x42\x32\x30\x39\x1a\x06\n\x04\x42\x32\x31\x30\x1a\x06\n\x04\x42\x32\x31\x31\x1a\x06\n\x04\x42\x32\x31\x32\x1a\x06\n\x04\x42\x32\x31\x33\x1a\x06\n\x04\x42\x32\x31\x34\x1a\x06\n\x04\x42\x32\x31\x35\x1a\x06\n\x04\x42\x32\x31\x36\x1a\x06\n\x04\x42\x32\x31\x37\x1a\x06\n\x04\x42\x32\x31\x38\x1a\x06\n\x04\x42\x32\x31\x39\x1a\x06\n\x04\x42\x32\x32\x30\x1a\x06\n\x04\x42\x32\x32\x31\x1a\x06\n\x04\x42\x32\x32\x32\x1a\x06\n\x04\x42\x32\x32\x33\x1a\x06\n\x04\x42\x32\x32\x34\x1a\x06\n\x04\x42\x32\x32\x35\x1a\x06\n\x04\x42\x32\x32\x36\x1a\x06\n\x04\x42\x32\x32\x37\x1a\x06\n\x04\x42\x32\x32\x38\x1a\x06\n\x04\x42\x32\x32\x39\x1a\x06\n\x04\x42\x32\x33\x30\x1a\x06\n\x04\x42\x32\x33\x31\x1a\x06\n\x04\x42\x32\x33\x32\x1a\x06\n\x04\x42\x32\x33\x33\x1a\x06\n\x04\x42\x32\x33\x34\x1a\x06\n\x04\x42\x32\x33\x35\x1a\x06\n\x04\x42\x32\x33\x36\x1a\x06\n\x04\x42\x32\x33\x37\x1a\x06\n\x04\x42\x32\x33\x38\x1a\x06\n\x04\x42\x32\x33\x39\x1a\x06\n\x04\x42\x32\x34\x30\x1a\x06\n\x04\x42\x32\x34\x31\x1a\x06\n\x04\x42\x32\x34\x32\x1a\x06\n\x04\x42\x32\x34\x33\x1a\x06\n\x04\x42\x32\x34\x34\x1a\x06\n\x04\x42\x32\x34\x35\x1a\x06\n\x04\x42\x32\x34\x36\x1a\x06\n\x04\x42\x32\x34\x37\x1a\x06\n\x04\x42\x32\x34\x38\x1a\x06\n\x04\x42\x32\x34\x39\x1a\x06\n\x04\x42\x32\x35\x30\x1a\x06\n\x04\x42\x32\x35\x31\x1a\x06\n\x04\x42\x32\x35\x32\x1a\x06\n\x04\x42\x32\x35\x33\x1a\x06\n\x04\x42\x32\x35\x34\x1a\x06\n\x04\x42\x32\x35\x35*\x1b\n\x02is\x12\x0b\n\x07\x64\x65\x66\x61ult\x10\x00\x12\x08\n\x04\x65lse\x10\x01:C\n\x0foptional_uint64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x04 \x01(\x04:B\n\x0eoptional_int64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x02 \x01(\x03:2\n\x08\x63ontinue\x12\x1f.google.protobuf.internal.class\x18\xe9\x07 \x01(\x05:2\n\x04with\x12#.google.protobuf.internal.class.try\x18\xe9\x07 \x01(\x05'
)
_IS = _descriptor.EnumDescriptor(
name='is',
full_name='google.protobuf.internal.is',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='default', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='else', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2659,
serialized_end=2686,
)
_sym_db.RegisterEnumDescriptor(_IS)
globals()['is'] = enum_type_wrapper.EnumTypeWrapper(_IS)
default = 0
globals()['else'] = 1
OPTIONAL_UINT64_FIELD_NUMBER = 4
optional_uint64 = _descriptor.FieldDescriptor(
name='optional_uint64', full_name='google.protobuf.internal.optional_uint64', index=0,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
OPTIONAL_INT64_FIELD_NUMBER = 2
optional_int64 = _descriptor.FieldDescriptor(
name='optional_int64', full_name='google.protobuf.internal.optional_int64', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
CONTINUE_FIELD_NUMBER = 1001
globals()['continue'] = _descriptor.FieldDescriptor(
name='continue', full_name='google.protobuf.internal.continue', index=2,
number=1001, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
WITH_FIELD_NUMBER = 1001
globals()['with'] = _descriptor.FieldDescriptor(
name='with', full_name='google.protobuf.internal.with', index=3,
number=1001, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_CLASS_FOR = _descriptor.EnumDescriptor(
name='for',
full_name='google.protobuf.internal.class.for',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='default', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='True', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=468,
serialized_end=496,
)
_sym_db.RegisterEnumDescriptor(_CLASS_FOR)
_OUTOFORDERFIELDS = _descriptor.Descriptor(
name='OutOfOrderFields',
full_name='google.protobuf.internal.OutOfOrderFields',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='optional_sint32', full_name='google.protobuf.internal.OutOfOrderFields.optional_sint32', index=0,
number=5, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='optional_uint32', full_name='google.protobuf.internal.OutOfOrderFields.optional_uint32', index=1,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='optional_int32', full_name='google.protobuf.internal.OutOfOrderFields.optional_int32', index=2,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 5), (2, 3), ],
oneofs=[
],
serialized_start=74,
serialized_end=178,
)
_CLASS_TRY = _descriptor.Descriptor(
name='try',
full_name='google.protobuf.internal.class.try',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='field', full_name='google.protobuf.internal.class.try.field', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(999, 10000), ],
oneofs=[
],
serialized_start=438,
serialized_end=466,
)
_CLASS = _descriptor.Descriptor(
name='class',
full_name='google.protobuf.internal.class',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='int_field', full_name='google.protobuf.internal.class.int_field', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='if', full_name='google.protobuf.internal.class.if', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='as', full_name='google.protobuf.internal.class.as', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enum_field', full_name='google.protobuf.internal.class.enum_field', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nested_enum_field', full_name='google.protobuf.internal.class.nested_enum_field', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nested_message', full_name='google.protobuf.internal.class.nested_message', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CLASS_TRY, ],
enum_types=[
_CLASS_FOR,
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(999, 10000), ],
oneofs=[
],
serialized_start=181,
serialized_end=504,
)
_EXTENDCLASS = _descriptor.Descriptor(
name='ExtendClass',
full_name='google.protobuf.internal.ExtendClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
_descriptor.FieldDescriptor(
name='return', full_name='google.protobuf.internal.ExtendClass.return', index=0,
number=1002, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=506,
serialized_end=569,
)
_TESTFULLKEYWORD = _descriptor.Descriptor(
name='TestFullKeyword',
full_name='google.protobuf.internal.TestFullKeyword',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='field1', full_name='google.protobuf.internal.TestFullKeyword.field1', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='field2', full_name='google.protobuf.internal.TestFullKeyword.field2', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=571,
serialized_end=697,
)
_LOTSNESTEDMESSAGE_B0 = _descriptor.Descriptor(
name='B0',
full_name='google.protobuf.internal.LotsNestedMessage.B0',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=721,
serialized_end=725,
)
_LOTSNESTEDMESSAGE_B1 = _descriptor.Descriptor(
name='B1',
full_name='google.protobuf.internal.LotsNestedMessage.B1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=727,
serialized_end=731,
)
_LOTSNESTEDMESSAGE_B2 = _descriptor.Descriptor(
name='B2',
full_name='google.protobuf.internal.LotsNestedMessage.B2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=733,
serialized_end=737,
)
_LOTSNESTEDMESSAGE_B3 = _descriptor.Descriptor(
name='B3',
full_name='google.protobuf.internal.LotsNestedMessage.B3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=739,
serialized_end=743,
)
_LOTSNESTEDMESSAGE_B4 = _descriptor.Descriptor(
name='B4',
full_name='google.protobuf.internal.LotsNestedMessage.B4',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=745,
serialized_end=749,
)
_LOTSNESTEDMESSAGE_B5 = _descriptor.Descriptor(
name='B5',
full_name='google.protobuf.internal.LotsNestedMessage.B5',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=751,
serialized_end=755,
)
_LOTSNESTEDMESSAGE_B6 = _descriptor.Descriptor(
name='B6',
full_name='google.protobuf.internal.LotsNestedMessage.B6',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=757,
serialized_end=761,
)
_LOTSNESTEDMESSAGE_B7 = _descriptor.Descriptor(
name='B7',
full_name='google.protobuf.internal.LotsNestedMessage.B7',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=763,
serialized_end=767,
)
_LOTSNESTEDMESSAGE_B8 = _descriptor.Descriptor(
name='B8',
full_name='google.protobuf.internal.LotsNestedMessage.B8',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=769,
serialized_end=773,
)
_LOTSNESTEDMESSAGE_B9 = _descriptor.Descriptor(
name='B9',
full_name='google.protobuf.internal.LotsNestedMessage.B9',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=775,
serialized_end=779,
)
_LOTSNESTEDMESSAGE_B10 = _descriptor.Descriptor(
name='B10',
full_name='google.protobuf.internal.LotsNestedMessage.B10',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=781,
serialized_end=786,
)
_LOTSNESTEDMESSAGE_B11 = _descriptor.Descriptor(
name='B11',
full_name='google.protobuf.internal.LotsNestedMessage.B11',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=788,
serialized_end=793,
)
_LOTSNESTEDMESSAGE_B12 = _descriptor.Descriptor(
name='B12',
full_name='google.protobuf.internal.LotsNestedMessage.B12',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=795,
serialized_end=800,
)
_LOTSNESTEDMESSAGE_B13 = _descriptor.Descriptor(
name='B13',
full_name='google.protobuf.internal.LotsNestedMessage.B13',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=802,
serialized_end=807,
)
_LOTSNESTEDMESSAGE_B14 = _descriptor.Descriptor(
name='B14',
full_name='google.protobuf.internal.LotsNestedMessage.B14',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=809,
serialized_end=814,
)
_LOTSNESTEDMESSAGE_B15 = _descriptor.Descriptor(
name='B15',
full_name='google.protobuf.internal.LotsNestedMessage.B15',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=816,
serialized_end=821,
)
_LOTSNESTEDMESSAGE_B16 = _descriptor.Descriptor(
name='B16',
full_name='google.protobuf.internal.LotsNestedMessage.B16',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=823,
serialized_end=828,
)
_LOTSNESTEDMESSAGE_B17 = _descriptor.Descriptor(
name='B17',
full_name='google.protobuf.internal.LotsNestedMessage.B17',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=830,
serialized_end=835,
)
_LOTSNESTEDMESSAGE_B18 = _descriptor.Descriptor(
name='B18',
full_name='google.protobuf.internal.LotsNestedMessage.B18',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=837,
serialized_end=842,
)
_LOTSNESTEDMESSAGE_B19 = _descriptor.Descriptor(
name='B19',
full_name='google.protobuf.internal.LotsNestedMessage.B19',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=844,
serialized_end=849,
)
_LOTSNESTEDMESSAGE_B20 = _descriptor.Descriptor(
name='B20',
full_name='google.protobuf.internal.LotsNestedMessage.B20',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=851,
serialized_end=856,
)
_LOTSNESTEDMESSAGE_B21 = _descriptor.Descriptor(
name='B21',
full_name='google.protobuf.internal.LotsNestedMessage.B21',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=858,
serialized_end=863,
)
_LOTSNESTEDMESSAGE_B22 = _descriptor.Descriptor(
name='B22',
full_name='google.protobuf.internal.LotsNestedMessage.B22',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=865,
serialized_end=870,
)
_LOTSNESTEDMESSAGE_B23 = _descriptor.Descriptor(
name='B23',
full_name='google.protobuf.internal.LotsNestedMessage.B23',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=872,
serialized_end=877,
)
_LOTSNESTEDMESSAGE_B24 = _descriptor.Descriptor(
name='B24',
full_name='google.protobuf.internal.LotsNestedMessage.B24',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=879,
serialized_end=884,
)
_LOTSNESTEDMESSAGE_B25 = _descriptor.Descriptor(
name='B25',
full_name='google.protobuf.internal.LotsNestedMessage.B25',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=886,
serialized_end=891,
)
_LOTSNESTEDMESSAGE_B26 = _descriptor.Descriptor(
name='B26',
full_name='google.protobuf.internal.LotsNestedMessage.B26',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=893,
serialized_end=898,
)
_LOTSNESTEDMESSAGE_B27 = _descriptor.Descriptor(
name='B27',
full_name='google.protobuf.internal.LotsNestedMessage.B27',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=900,
serialized_end=905,
)
_LOTSNESTEDMESSAGE_B28 = _descriptor.Descriptor(
name='B28',
full_name='google.protobuf.internal.LotsNestedMessage.B28',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=907,
serialized_end=912,
)
_LOTSNESTEDMESSAGE_B29 = _descriptor.Descriptor(
name='B29',
full_name='google.protobuf.internal.LotsNestedMessage.B29',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=914,
serialized_end=919,
)
_LOTSNESTEDMESSAGE_B30 = _descriptor.Descriptor(
name='B30',
full_name='google.protobuf.internal.LotsNestedMessage.B30',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=921,
serialized_end=926,
)
_LOTSNESTEDMESSAGE_B31 = _descriptor.Descriptor(
name='B31',
full_name='google.protobuf.internal.LotsNestedMessage.B31',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=928,
serialized_end=933,
)
_LOTSNESTEDMESSAGE_B32 = _descriptor.Descriptor(
name='B32',
full_name='google.protobuf.internal.LotsNestedMessage.B32',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=935,
serialized_end=940,
)
_LOTSNESTEDMESSAGE_B33 = _descriptor.Descriptor(
name='B33',
full_name='google.protobuf.internal.LotsNestedMessage.B33',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=942,
serialized_end=947,
)
_LOTSNESTEDMESSAGE_B34 = _descriptor.Descriptor(
name='B34',
full_name='google.protobuf.internal.LotsNestedMessage.B34',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=949,
serialized_end=954,
)
_LOTSNESTEDMESSAGE_B35 = _descriptor.Descriptor(
name='B35',
full_name='google.protobuf.internal.LotsNestedMessage.B35',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=956,
serialized_end=961,
)
_LOTSNESTEDMESSAGE_B36 = _descriptor.Descriptor(
name='B36',
full_name='google.protobuf.internal.LotsNestedMessage.B36',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=963,
serialized_end=968,
)
_LOTSNESTEDMESSAGE_B37 = _descriptor.Descriptor(
name='B37',
full_name='google.protobuf.internal.LotsNestedMessage.B37',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=970,
serialized_end=975,
)
_LOTSNESTEDMESSAGE_B38 = _descriptor.Descriptor(
name='B38',
full_name='google.protobuf.internal.LotsNestedMessage.B38',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=977,
serialized_end=982,
)
_LOTSNESTEDMESSAGE_B39 = _descriptor.Descriptor(
name='B39',
full_name='google.protobuf.internal.LotsNestedMessage.B39',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=984,
serialized_end=989,
)
_LOTSNESTEDMESSAGE_B40 = _descriptor.Descriptor(
name='B40',
full_name='google.protobuf.internal.LotsNestedMessage.B40',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=991,
serialized_end=996,
)
_LOTSNESTEDMESSAGE_B41 = _descriptor.Descriptor(
name='B41',
full_name='google.protobuf.internal.LotsNestedMessage.B41',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=998,
serialized_end=1003,
)
_LOTSNESTEDMESSAGE_B42 = _descriptor.Descriptor(
name='B42',
full_name='google.protobuf.internal.LotsNestedMessage.B42',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1005,
serialized_end=1010,
)
_LOTSNESTEDMESSAGE_B43 = _descriptor.Descriptor(
name='B43',
full_name='google.protobuf.internal.LotsNestedMessage.B43',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1012,
serialized_end=1017,
)
_LOTSNESTEDMESSAGE_B44 = _descriptor.Descriptor(
name='B44',
full_name='google.protobuf.internal.LotsNestedMessage.B44',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1019,
serialized_end=1024,
)
_LOTSNESTEDMESSAGE_B45 = _descriptor.Descriptor(
name='B45',
full_name='google.protobuf.internal.LotsNestedMessage.B45',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1026,
serialized_end=1031,
)
_LOTSNESTEDMESSAGE_B46 = _descriptor.Descriptor(
name='B46',
full_name='google.protobuf.internal.LotsNestedMessage.B46',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1033,
serialized_end=1038,
)
_LOTSNESTEDMESSAGE_B47 = _descriptor.Descriptor(
name='B47',
full_name='google.protobuf.internal.LotsNestedMessage.B47',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1040,
serialized_end=1045,
)
_LOTSNESTEDMESSAGE_B48 = _descriptor.Descriptor(
name='B48',
full_name='google.protobuf.internal.LotsNestedMessage.B48',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1047,
serialized_end=1052,
)
_LOTSNESTEDMESSAGE_B49 = _descriptor.Descriptor(
name='B49',
full_name='google.protobuf.internal.LotsNestedMessage.B49',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1054,
serialized_end=1059,
)
_LOTSNESTEDMESSAGE_B50 = _descriptor.Descriptor(
name='B50',
full_name='google.protobuf.internal.LotsNestedMessage.B50',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1061,
serialized_end=1066,
)
_LOTSNESTEDMESSAGE_B51 = _descriptor.Descriptor(
name='B51',
full_name='google.protobuf.internal.LotsNestedMessage.B51',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1068,
serialized_end=1073,
)
_LOTSNESTEDMESSAGE_B52 = _descriptor.Descriptor(
name='B52',
full_name='google.protobuf.internal.LotsNestedMessage.B52',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1075,
serialized_end=1080,
)
_LOTSNESTEDMESSAGE_B53 = _descriptor.Descriptor(
name='B53',
full_name='google.protobuf.internal.LotsNestedMessage.B53',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1082,
serialized_end=1087,
)
_LOTSNESTEDMESSAGE_B54 = _descriptor.Descriptor(
name='B54',
full_name='google.protobuf.internal.LotsNestedMessage.B54',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1089,
serialized_end=1094,
)
_LOTSNESTEDMESSAGE_B55 = _descriptor.Descriptor(
name='B55',
full_name='google.protobuf.internal.LotsNestedMessage.B55',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1096,
serialized_end=1101,
)
_LOTSNESTEDMESSAGE_B56 = _descriptor.Descriptor(
name='B56',
full_name='google.protobuf.internal.LotsNestedMessage.B56',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1103,
serialized_end=1108,
)
_LOTSNESTEDMESSAGE_B57 = _descriptor.Descriptor(
name='B57',
full_name='google.protobuf.internal.LotsNestedMessage.B57',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1110,
serialized_end=1115,
)
_LOTSNESTEDMESSAGE_B58 = _descriptor.Descriptor(
name='B58',
full_name='google.protobuf.internal.LotsNestedMessage.B58',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1117,
serialized_end=1122,
)
_LOTSNESTEDMESSAGE_B59 = _descriptor.Descriptor(
name='B59',
full_name='google.protobuf.internal.LotsNestedMessage.B59',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1124,
serialized_end=1129,
)
_LOTSNESTEDMESSAGE_B60 = _descriptor.Descriptor(
name='B60',
full_name='google.protobuf.internal.LotsNestedMessage.B60',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1131,
serialized_end=1136,
)
_LOTSNESTEDMESSAGE_B61 = _descriptor.Descriptor(
name='B61',
full_name='google.protobuf.internal.LotsNestedMessage.B61',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1138,
serialized_end=1143,
)
_LOTSNESTEDMESSAGE_B62 = _descriptor.Descriptor(
name='B62',
full_name='google.protobuf.internal.LotsNestedMessage.B62',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1145,
serialized_end=1150,
)
_LOTSNESTEDMESSAGE_B63 = _descriptor.Descriptor(
name='B63',
full_name='google.protobuf.internal.LotsNestedMessage.B63',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1152,
serialized_end=1157,
)
_LOTSNESTEDMESSAGE_B64 = _descriptor.Descriptor(
name='B64',
full_name='google.protobuf.internal.LotsNestedMessage.B64',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1159,
serialized_end=1164,
)
_LOTSNESTEDMESSAGE_B65 = _descriptor.Descriptor(
name='B65',
full_name='google.protobuf.internal.LotsNestedMessage.B65',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1166,
serialized_end=1171,
)
_LOTSNESTEDMESSAGE_B66 = _descriptor.Descriptor(
name='B66',
full_name='google.protobuf.internal.LotsNestedMessage.B66',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1173,
serialized_end=1178,
)
_LOTSNESTEDMESSAGE_B67 = _descriptor.Descriptor(
name='B67',
full_name='google.protobuf.internal.LotsNestedMessage.B67',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1180,
serialized_end=1185,
)
_LOTSNESTEDMESSAGE_B68 = _descriptor.Descriptor(
name='B68',
full_name='google.protobuf.internal.LotsNestedMessage.B68',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1187,
serialized_end=1192,
)
_LOTSNESTEDMESSAGE_B69 = _descriptor.Descriptor(
name='B69',
full_name='google.protobuf.internal.LotsNestedMessage.B69',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1194,
serialized_end=1199,
)
_LOTSNESTEDMESSAGE_B70 = _descriptor.Descriptor(
name='B70',
full_name='google.protobuf.internal.LotsNestedMessage.B70',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1201,
serialized_end=1206,
)
_LOTSNESTEDMESSAGE_B71 = _descriptor.Descriptor(
name='B71',
full_name='google.protobuf.internal.LotsNestedMessage.B71',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1208,
serialized_end=1213,
)
_LOTSNESTEDMESSAGE_B72 = _descriptor.Descriptor(
name='B72',
full_name='google.protobuf.internal.LotsNestedMessage.B72',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1215,
serialized_end=1220,
)
_LOTSNESTEDMESSAGE_B73 = _descriptor.Descriptor(
name='B73',
full_name='google.protobuf.internal.LotsNestedMessage.B73',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1222,
serialized_end=1227,
)
_LOTSNESTEDMESSAGE_B74 = _descriptor.Descriptor(
name='B74',
full_name='google.protobuf.internal.LotsNestedMessage.B74',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1229,
serialized_end=1234,
)
_LOTSNESTEDMESSAGE_B75 = _descriptor.Descriptor(
name='B75',
full_name='google.protobuf.internal.LotsNestedMessage.B75',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1236,
serialized_end=1241,
)
_LOTSNESTEDMESSAGE_B76 = _descriptor.Descriptor(
name='B76',
full_name='google.protobuf.internal.LotsNestedMessage.B76',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1243,
serialized_end=1248,
)
_LOTSNESTEDMESSAGE_B77 = _descriptor.Descriptor(
name='B77',
full_name='google.protobuf.internal.LotsNestedMessage.B77',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1250,
serialized_end=1255,
)
_LOTSNESTEDMESSAGE_B78 = _descriptor.Descriptor(
name='B78',
full_name='google.protobuf.internal.LotsNestedMessage.B78',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1257,
serialized_end=1262,
)
_LOTSNESTEDMESSAGE_B79 = _descriptor.Descriptor(
name='B79',
full_name='google.protobuf.internal.LotsNestedMessage.B79',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1264,
serialized_end=1269,
)
_LOTSNESTEDMESSAGE_B80 = _descriptor.Descriptor(
name='B80',
full_name='google.protobuf.internal.LotsNestedMessage.B80',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1271,
serialized_end=1276,
)
_LOTSNESTEDMESSAGE_B81 = _descriptor.Descriptor(
name='B81',
full_name='google.protobuf.internal.LotsNestedMessage.B81',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1278,
serialized_end=1283,
)
_LOTSNESTEDMESSAGE_B82 = _descriptor.Descriptor(
name='B82',
full_name='google.protobuf.internal.LotsNestedMessage.B82',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1285,
serialized_end=1290,
)
_LOTSNESTEDMESSAGE_B83 = _descriptor.Descriptor(
name='B83',
full_name='google.protobuf.internal.LotsNestedMessage.B83',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1292,
serialized_end=1297,
)
_LOTSNESTEDMESSAGE_B84 = _descriptor.Descriptor(
name='B84',
full_name='google.protobuf.internal.LotsNestedMessage.B84',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1299,
serialized_end=1304,
)
_LOTSNESTEDMESSAGE_B85 = _descriptor.Descriptor(
name='B85',
full_name='google.protobuf.internal.LotsNestedMessage.B85',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1306,
serialized_end=1311,
)
_LOTSNESTEDMESSAGE_B86 = _descriptor.Descriptor(
name='B86',
full_name='google.protobuf.internal.LotsNestedMessage.B86',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1313,
serialized_end=1318,
)
_LOTSNESTEDMESSAGE_B87 = _descriptor.Descriptor(
name='B87',
full_name='google.protobuf.internal.LotsNestedMessage.B87',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1320,
serialized_end=1325,
)
_LOTSNESTEDMESSAGE_B88 = _descriptor.Descriptor(
name='B88',
full_name='google.protobuf.internal.LotsNestedMessage.B88',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1327,
serialized_end=1332,
)
_LOTSNESTEDMESSAGE_B89 = _descriptor.Descriptor(
name='B89',
full_name='google.protobuf.internal.LotsNestedMessage.B89',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1334,
serialized_end=1339,
)
_LOTSNESTEDMESSAGE_B90 = _descriptor.Descriptor(
name='B90',
full_name='google.protobuf.internal.LotsNestedMessage.B90',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1341,
serialized_end=1346,
)
_LOTSNESTEDMESSAGE_B91 = _descriptor.Descriptor(
name='B91',
full_name='google.protobuf.internal.LotsNestedMessage.B91',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1348,
serialized_end=1353,
)
_LOTSNESTEDMESSAGE_B92 = _descriptor.Descriptor(
name='B92',
full_name='google.protobuf.internal.LotsNestedMessage.B92',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1355,
serialized_end=1360,
)
_LOTSNESTEDMESSAGE_B93 = _descriptor.Descriptor(
name='B93',
full_name='google.protobuf.internal.LotsNestedMessage.B93',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1362,
serialized_end=1367,
)
_LOTSNESTEDMESSAGE_B94 = _descriptor.Descriptor(
name='B94',
full_name='google.protobuf.internal.LotsNestedMessage.B94',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1369,
serialized_end=1374,
)
_LOTSNESTEDMESSAGE_B95 = _descriptor.Descriptor(
name='B95',
full_name='google.protobuf.internal.LotsNestedMessage.B95',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1376,
serialized_end=1381,
)
_LOTSNESTEDMESSAGE_B96 = _descriptor.Descriptor(
name='B96',
full_name='google.protobuf.internal.LotsNestedMessage.B96',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1383,
serialized_end=1388,
)
_LOTSNESTEDMESSAGE_B97 = _descriptor.Descriptor(
name='B97',
full_name='google.protobuf.internal.LotsNestedMessage.B97',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1390,
serialized_end=1395,
)
_LOTSNESTEDMESSAGE_B98 = _descriptor.Descriptor(
name='B98',
full_name='google.protobuf.internal.LotsNestedMessage.B98',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1397,
serialized_end=1402,
)
_LOTSNESTEDMESSAGE_B99 = _descriptor.Descriptor(
name='B99',
full_name='google.protobuf.internal.LotsNestedMessage.B99',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1404,
serialized_end=1409,
)
_LOTSNESTEDMESSAGE_B100 = _descriptor.Descriptor(
name='B100',
full_name='google.protobuf.internal.LotsNestedMessage.B100',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1411,
serialized_end=1417,
)
_LOTSNESTEDMESSAGE_B101 = _descriptor.Descriptor(
name='B101',
full_name='google.protobuf.internal.LotsNestedMessage.B101',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1419,
serialized_end=1425,
)
_LOTSNESTEDMESSAGE_B102 = _descriptor.Descriptor(
name='B102',
full_name='google.protobuf.internal.LotsNestedMessage.B102',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1427,
serialized_end=1433,
)
_LOTSNESTEDMESSAGE_B103 = _descriptor.Descriptor(
name='B103',
full_name='google.protobuf.internal.LotsNestedMessage.B103',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1435,
serialized_end=1441,
)
_LOTSNESTEDMESSAGE_B104 = _descriptor.Descriptor(
name='B104',
full_name='google.protobuf.internal.LotsNestedMessage.B104',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1443,
serialized_end=1449,
)
_LOTSNESTEDMESSAGE_B105 = _descriptor.Descriptor(
name='B105',
full_name='google.protobuf.internal.LotsNestedMessage.B105',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1451,
serialized_end=1457,
)
_LOTSNESTEDMESSAGE_B106 = _descriptor.Descriptor(
name='B106',
full_name='google.protobuf.internal.LotsNestedMessage.B106',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1459,
serialized_end=1465,
)
_LOTSNESTEDMESSAGE_B107 = _descriptor.Descriptor(
name='B107',
full_name='google.protobuf.internal.LotsNestedMessage.B107',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1467,
serialized_end=1473,
)
_LOTSNESTEDMESSAGE_B108 = _descriptor.Descriptor(
name='B108',
full_name='google.protobuf.internal.LotsNestedMessage.B108',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1475,
serialized_end=1481,
)
_LOTSNESTEDMESSAGE_B109 = _descriptor.Descriptor(
name='B109',
full_name='google.protobuf.internal.LotsNestedMessage.B109',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1483,
serialized_end=1489,
)
_LOTSNESTEDMESSAGE_B110 = _descriptor.Descriptor(
name='B110',
full_name='google.protobuf.internal.LotsNestedMessage.B110',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1491,
serialized_end=1497,
)
_LOTSNESTEDMESSAGE_B111 = _descriptor.Descriptor(
name='B111',
full_name='google.protobuf.internal.LotsNestedMessage.B111',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1499,
serialized_end=1505,
)
_LOTSNESTEDMESSAGE_B112 = _descriptor.Descriptor(
name='B112',
full_name='google.protobuf.internal.LotsNestedMessage.B112',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1507,
serialized_end=1513,
)
_LOTSNESTEDMESSAGE_B113 = _descriptor.Descriptor(
name='B113',
full_name='google.protobuf.internal.LotsNestedMessage.B113',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1515,
serialized_end=1521,
)
_LOTSNESTEDMESSAGE_B114 = _descriptor.Descriptor(
name='B114',
full_name='google.protobuf.internal.LotsNestedMessage.B114',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1523,
serialized_end=1529,
)
_LOTSNESTEDMESSAGE_B115 = _descriptor.Descriptor(
name='B115',
full_name='google.protobuf.internal.LotsNestedMessage.B115',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1531,
serialized_end=1537,
)
_LOTSNESTEDMESSAGE_B116 = _descriptor.Descriptor(
name='B116',
full_name='google.protobuf.internal.LotsNestedMessage.B116',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1539,
serialized_end=1545,
)
_LOTSNESTEDMESSAGE_B117 = _descriptor.Descriptor(
name='B117',
full_name='google.protobuf.internal.LotsNestedMessage.B117',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1547,
serialized_end=1553,
)
_LOTSNESTEDMESSAGE_B118 = _descriptor.Descriptor(
name='B118',
full_name='google.protobuf.internal.LotsNestedMessage.B118',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1555,
serialized_end=1561,
)
_LOTSNESTEDMESSAGE_B119 = _descriptor.Descriptor(
name='B119',
full_name='google.protobuf.internal.LotsNestedMessage.B119',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1563,
serialized_end=1569,
)
_LOTSNESTEDMESSAGE_B120 = _descriptor.Descriptor(
name='B120',
full_name='google.protobuf.internal.LotsNestedMessage.B120',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1571,
serialized_end=1577,
)
_LOTSNESTEDMESSAGE_B121 = _descriptor.Descriptor(
name='B121',
full_name='google.protobuf.internal.LotsNestedMessage.B121',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1579,
serialized_end=1585,
)
_LOTSNESTEDMESSAGE_B122 = _descriptor.Descriptor(
name='B122',
full_name='google.protobuf.internal.LotsNestedMessage.B122',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1587,
serialized_end=1593,
)
_LOTSNESTEDMESSAGE_B123 = _descriptor.Descriptor(
name='B123',
full_name='google.protobuf.internal.LotsNestedMessage.B123',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1595,
serialized_end=1601,
)
_LOTSNESTEDMESSAGE_B124 = _descriptor.Descriptor(
name='B124',
full_name='google.protobuf.internal.LotsNestedMessage.B124',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1603,
serialized_end=1609,
)
_LOTSNESTEDMESSAGE_B125 = _descriptor.Descriptor(
name='B125',
full_name='google.protobuf.internal.LotsNestedMessage.B125',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1611,
serialized_end=1617,
)
_LOTSNESTEDMESSAGE_B126 = _descriptor.Descriptor(
name='B126',
full_name='google.protobuf.internal.LotsNestedMessage.B126',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1619,
serialized_end=1625,
)
_LOTSNESTEDMESSAGE_B127 = _descriptor.Descriptor(
name='B127',
full_name='google.protobuf.internal.LotsNestedMessage.B127',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1627,
serialized_end=1633,
)
_LOTSNESTEDMESSAGE_B128 = _descriptor.Descriptor(
name='B128',
full_name='google.protobuf.internal.LotsNestedMessage.B128',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1635,
serialized_end=1641,
)
_LOTSNESTEDMESSAGE_B129 = _descriptor.Descriptor(
name='B129',
full_name='google.protobuf.internal.LotsNestedMessage.B129',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1643,
serialized_end=1649,
)
_LOTSNESTEDMESSAGE_B130 = _descriptor.Descriptor(
name='B130',
full_name='google.protobuf.internal.LotsNestedMessage.B130',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1651,
serialized_end=1657,
)
_LOTSNESTEDMESSAGE_B131 = _descriptor.Descriptor(
name='B131',
full_name='google.protobuf.internal.LotsNestedMessage.B131',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1659,
serialized_end=1665,
)
_LOTSNESTEDMESSAGE_B132 = _descriptor.Descriptor(
name='B132',
full_name='google.protobuf.internal.LotsNestedMessage.B132',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1667,
serialized_end=1673,
)
_LOTSNESTEDMESSAGE_B133 = _descriptor.Descriptor(
name='B133',
full_name='google.protobuf.internal.LotsNestedMessage.B133',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1675,
serialized_end=1681,
)
_LOTSNESTEDMESSAGE_B134 = _descriptor.Descriptor(
name='B134',
full_name='google.protobuf.internal.LotsNestedMessage.B134',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1683,
serialized_end=1689,
)
_LOTSNESTEDMESSAGE_B135 = _descriptor.Descriptor(
name='B135',
full_name='google.protobuf.internal.LotsNestedMessage.B135',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1691,
serialized_end=1697,
)
_LOTSNESTEDMESSAGE_B136 = _descriptor.Descriptor(
name='B136',
full_name='google.protobuf.internal.LotsNestedMessage.B136',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1699,
serialized_end=1705,
)
_LOTSNESTEDMESSAGE_B137 = _descriptor.Descriptor(
name='B137',
full_name='google.protobuf.internal.LotsNestedMessage.B137',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1707,
serialized_end=1713,
)
_LOTSNESTEDMESSAGE_B138 = _descriptor.Descriptor(
name='B138',
full_name='google.protobuf.internal.LotsNestedMessage.B138',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1715,
serialized_end=1721,
)
_LOTSNESTEDMESSAGE_B139 = _descriptor.Descriptor(
name='B139',
full_name='google.protobuf.internal.LotsNestedMessage.B139',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1723,
serialized_end=1729,
)
_LOTSNESTEDMESSAGE_B140 = _descriptor.Descriptor(
name='B140',
full_name='google.protobuf.internal.LotsNestedMessage.B140',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1731,
serialized_end=1737,
)
_LOTSNESTEDMESSAGE_B141 = _descriptor.Descriptor(
name='B141',
full_name='google.protobuf.internal.LotsNestedMessage.B141',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1739,
serialized_end=1745,
)
_LOTSNESTEDMESSAGE_B142 = _descriptor.Descriptor(
name='B142',
full_name='google.protobuf.internal.LotsNestedMessage.B142',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1747,
serialized_end=1753,
)
_LOTSNESTEDMESSAGE_B143 = _descriptor.Descriptor(
name='B143',
full_name='google.protobuf.internal.LotsNestedMessage.B143',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1755,
serialized_end=1761,
)
_LOTSNESTEDMESSAGE_B144 = _descriptor.Descriptor(
name='B144',
full_name='google.protobuf.internal.LotsNestedMessage.B144',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1763,
serialized_end=1769,
)
_LOTSNESTEDMESSAGE_B145 = _descriptor.Descriptor(
name='B145',
full_name='google.protobuf.internal.LotsNestedMessage.B145',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1771,
serialized_end=1777,
)
_LOTSNESTEDMESSAGE_B146 = _descriptor.Descriptor(
name='B146',
full_name='google.protobuf.internal.LotsNestedMessage.B146',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1779,
serialized_end=1785,
)
_LOTSNESTEDMESSAGE_B147 = _descriptor.Descriptor(
name='B147',
full_name='google.protobuf.internal.LotsNestedMessage.B147',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1787,
serialized_end=1793,
)
_LOTSNESTEDMESSAGE_B148 = _descriptor.Descriptor(
name='B148',
full_name='google.protobuf.internal.LotsNestedMessage.B148',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1795,
serialized_end=1801,
)
_LOTSNESTEDMESSAGE_B149 = _descriptor.Descriptor(
name='B149',
full_name='google.protobuf.internal.LotsNestedMessage.B149',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1803,
serialized_end=1809,
)
_LOTSNESTEDMESSAGE_B150 = _descriptor.Descriptor(
name='B150',
full_name='google.protobuf.internal.LotsNestedMessage.B150',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1811,
serialized_end=1817,
)
_LOTSNESTEDMESSAGE_B151 = _descriptor.Descriptor(
name='B151',
full_name='google.protobuf.internal.LotsNestedMessage.B151',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1819,
serialized_end=1825,
)
_LOTSNESTEDMESSAGE_B152 = _descriptor.Descriptor(
name='B152',
full_name='google.protobuf.internal.LotsNestedMessage.B152',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1827,
serialized_end=1833,
)
_LOTSNESTEDMESSAGE_B153 = _descriptor.Descriptor(
name='B153',
full_name='google.protobuf.internal.LotsNestedMessage.B153',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1835,
serialized_end=1841,
)
_LOTSNESTEDMESSAGE_B154 = _descriptor.Descriptor(
name='B154',
full_name='google.protobuf.internal.LotsNestedMessage.B154',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1843,
serialized_end=1849,
)
_LOTSNESTEDMESSAGE_B155 = _descriptor.Descriptor(
name='B155',
full_name='google.protobuf.internal.LotsNestedMessage.B155',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1851,
serialized_end=1857,
)
_LOTSNESTEDMESSAGE_B156 = _descriptor.Descriptor(
name='B156',
full_name='google.protobuf.internal.LotsNestedMessage.B156',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1859,
serialized_end=1865,
)
_LOTSNESTEDMESSAGE_B157 = _descriptor.Descriptor(
name='B157',
full_name='google.protobuf.internal.LotsNestedMessage.B157',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1867,
serialized_end=1873,
)
_LOTSNESTEDMESSAGE_B158 = _descriptor.Descriptor(
name='B158',
full_name='google.protobuf.internal.LotsNestedMessage.B158',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1875,
serialized_end=1881,
)
_LOTSNESTEDMESSAGE_B159 = _descriptor.Descriptor(
name='B159',
full_name='google.protobuf.internal.LotsNestedMessage.B159',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1883,
serialized_end=1889,
)
_LOTSNESTEDMESSAGE_B160 = _descriptor.Descriptor(
name='B160',
full_name='google.protobuf.internal.LotsNestedMessage.B160',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1891,
serialized_end=1897,
)
_LOTSNESTEDMESSAGE_B161 = _descriptor.Descriptor(
name='B161',
full_name='google.protobuf.internal.LotsNestedMessage.B161',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1899,
serialized_end=1905,
)
_LOTSNESTEDMESSAGE_B162 = _descriptor.Descriptor(
name='B162',
full_name='google.protobuf.internal.LotsNestedMessage.B162',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1907,
serialized_end=1913,
)
_LOTSNESTEDMESSAGE_B163 = _descriptor.Descriptor(
name='B163',
full_name='google.protobuf.internal.LotsNestedMessage.B163',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1915,
serialized_end=1921,
)
_LOTSNESTEDMESSAGE_B164 = _descriptor.Descriptor(
name='B164',
full_name='google.protobuf.internal.LotsNestedMessage.B164',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1923,
serialized_end=1929,
)
_LOTSNESTEDMESSAGE_B165 = _descriptor.Descriptor(
name='B165',
full_name='google.protobuf.internal.LotsNestedMessage.B165',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1931,
serialized_end=1937,
)
_LOTSNESTEDMESSAGE_B166 = _descriptor.Descriptor(
name='B166',
full_name='google.protobuf.internal.LotsNestedMessage.B166',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1939,
serialized_end=1945,
)
_LOTSNESTEDMESSAGE_B167 = _descriptor.Descriptor(
name='B167',
full_name='google.protobuf.internal.LotsNestedMessage.B167',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1947,
serialized_end=1953,
)
_LOTSNESTEDMESSAGE_B168 = _descriptor.Descriptor(
name='B168',
full_name='google.protobuf.internal.LotsNestedMessage.B168',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1955,
serialized_end=1961,
)
_LOTSNESTEDMESSAGE_B169 = _descriptor.Descriptor(
name='B169',
full_name='google.protobuf.internal.LotsNestedMessage.B169',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1963,
serialized_end=1969,
)
_LOTSNESTEDMESSAGE_B170 = _descriptor.Descriptor(
name='B170',
full_name='google.protobuf.internal.LotsNestedMessage.B170',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1971,
serialized_end=1977,
)
_LOTSNESTEDMESSAGE_B171 = _descriptor.Descriptor(
name='B171',
full_name='google.protobuf.internal.LotsNestedMessage.B171',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1979,
serialized_end=1985,
)
_LOTSNESTEDMESSAGE_B172 = _descriptor.Descriptor(
name='B172',
full_name='google.protobuf.internal.LotsNestedMessage.B172',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1987,
serialized_end=1993,
)
_LOTSNESTEDMESSAGE_B173 = _descriptor.Descriptor(
name='B173',
full_name='google.protobuf.internal.LotsNestedMessage.B173',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1995,
serialized_end=2001,
)
_LOTSNESTEDMESSAGE_B174 = _descriptor.Descriptor(
name='B174',
full_name='google.protobuf.internal.LotsNestedMessage.B174',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2003,
serialized_end=2009,
)
_LOTSNESTEDMESSAGE_B175 = _descriptor.Descriptor(
name='B175',
full_name='google.protobuf.internal.LotsNestedMessage.B175',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2011,
serialized_end=2017,
)
_LOTSNESTEDMESSAGE_B176 = _descriptor.Descriptor(
name='B176',
full_name='google.protobuf.internal.LotsNestedMessage.B176',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2019,
serialized_end=2025,
)
_LOTSNESTEDMESSAGE_B177 = _descriptor.Descriptor(
name='B177',
full_name='google.protobuf.internal.LotsNestedMessage.B177',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2027,
serialized_end=2033,
)
_LOTSNESTEDMESSAGE_B178 = _descriptor.Descriptor(
name='B178',
full_name='google.protobuf.internal.LotsNestedMessage.B178',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2035,
serialized_end=2041,
)
_LOTSNESTEDMESSAGE_B179 = _descriptor.Descriptor(
name='B179',
full_name='google.protobuf.internal.LotsNestedMessage.B179',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2043,
serialized_end=2049,
)
_LOTSNESTEDMESSAGE_B180 = _descriptor.Descriptor(
name='B180',
full_name='google.protobuf.internal.LotsNestedMessage.B180',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2051,
serialized_end=2057,
)
_LOTSNESTEDMESSAGE_B181 = _descriptor.Descriptor(
name='B181',
full_name='google.protobuf.internal.LotsNestedMessage.B181',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2059,
serialized_end=2065,
)
_LOTSNESTEDMESSAGE_B182 = _descriptor.Descriptor(
name='B182',
full_name='google.protobuf.internal.LotsNestedMessage.B182',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2067,
serialized_end=2073,
)
_LOTSNESTEDMESSAGE_B183 = _descriptor.Descriptor(
name='B183',
full_name='google.protobuf.internal.LotsNestedMessage.B183',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2075,
serialized_end=2081,
)
_LOTSNESTEDMESSAGE_B184 = _descriptor.Descriptor(
name='B184',
full_name='google.protobuf.internal.LotsNestedMessage.B184',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2083,
serialized_end=2089,
)
_LOTSNESTEDMESSAGE_B185 = _descriptor.Descriptor(
name='B185',
full_name='google.protobuf.internal.LotsNestedMessage.B185',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2091,
serialized_end=2097,
)
_LOTSNESTEDMESSAGE_B186 = _descriptor.Descriptor(
name='B186',
full_name='google.protobuf.internal.LotsNestedMessage.B186',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2099,
serialized_end=2105,
)
_LOTSNESTEDMESSAGE_B187 = _descriptor.Descriptor(
name='B187',
full_name='google.protobuf.internal.LotsNestedMessage.B187',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2107,
serialized_end=2113,
)
_LOTSNESTEDMESSAGE_B188 = _descriptor.Descriptor(
name='B188',
full_name='google.protobuf.internal.LotsNestedMessage.B188',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2115,
serialized_end=2121,
)
_LOTSNESTEDMESSAGE_B189 = _descriptor.Descriptor(
name='B189',
full_name='google.protobuf.internal.LotsNestedMessage.B189',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2123,
serialized_end=2129,
)
_LOTSNESTEDMESSAGE_B190 = _descriptor.Descriptor(
name='B190',
full_name='google.protobuf.internal.LotsNestedMessage.B190',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2131,
serialized_end=2137,
)
_LOTSNESTEDMESSAGE_B191 = _descriptor.Descriptor(
name='B191',
full_name='google.protobuf.internal.LotsNestedMessage.B191',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2139,
serialized_end=2145,
)
_LOTSNESTEDMESSAGE_B192 = _descriptor.Descriptor(
name='B192',
full_name='google.protobuf.internal.LotsNestedMessage.B192',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2147,
serialized_end=2153,
)
_LOTSNESTEDMESSAGE_B193 = _descriptor.Descriptor(
name='B193',
full_name='google.protobuf.internal.LotsNestedMessage.B193',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2155,
serialized_end=2161,
)
_LOTSNESTEDMESSAGE_B194 = _descriptor.Descriptor(
name='B194',
full_name='google.protobuf.internal.LotsNestedMessage.B194',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2163,
serialized_end=2169,
)
_LOTSNESTEDMESSAGE_B195 = _descriptor.Descriptor(
name='B195',
full_name='google.protobuf.internal.LotsNestedMessage.B195',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2171,
serialized_end=2177,
)
_LOTSNESTEDMESSAGE_B196 = _descriptor.Descriptor(
name='B196',
full_name='google.protobuf.internal.LotsNestedMessage.B196',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2179,
serialized_end=2185,
)
_LOTSNESTEDMESSAGE_B197 = _descriptor.Descriptor(
name='B197',
full_name='google.protobuf.internal.LotsNestedMessage.B197',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2187,
serialized_end=2193,
)
_LOTSNESTEDMESSAGE_B198 = _descriptor.Descriptor(
name='B198',
full_name='google.protobuf.internal.LotsNestedMessage.B198',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2195,
serialized_end=2201,
)
_LOTSNESTEDMESSAGE_B199 = _descriptor.Descriptor(
name='B199',
full_name='google.protobuf.internal.LotsNestedMessage.B199',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2203,
serialized_end=2209,
)
_LOTSNESTEDMESSAGE_B200 = _descriptor.Descriptor(
name='B200',
full_name='google.protobuf.internal.LotsNestedMessage.B200',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2211,
serialized_end=2217,
)
_LOTSNESTEDMESSAGE_B201 = _descriptor.Descriptor(
name='B201',
full_name='google.protobuf.internal.LotsNestedMessage.B201',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2219,
serialized_end=2225,
)
_LOTSNESTEDMESSAGE_B202 = _descriptor.Descriptor(
name='B202',
full_name='google.protobuf.internal.LotsNestedMessage.B202',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2227,
serialized_end=2233,
)
_LOTSNESTEDMESSAGE_B203 = _descriptor.Descriptor(
name='B203',
full_name='google.protobuf.internal.LotsNestedMessage.B203',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2235,
serialized_end=2241,
)
_LOTSNESTEDMESSAGE_B204 = _descriptor.Descriptor(
name='B204',
full_name='google.protobuf.internal.LotsNestedMessage.B204',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2243,
serialized_end=2249,
)
_LOTSNESTEDMESSAGE_B205 = _descriptor.Descriptor(
name='B205',
full_name='google.protobuf.internal.LotsNestedMessage.B205',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2251,
serialized_end=2257,
)
_LOTSNESTEDMESSAGE_B206 = _descriptor.Descriptor(
name='B206',
full_name='google.protobuf.internal.LotsNestedMessage.B206',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2259,
serialized_end=2265,
)
_LOTSNESTEDMESSAGE_B207 = _descriptor.Descriptor(
name='B207',
full_name='google.protobuf.internal.LotsNestedMessage.B207',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2267,
serialized_end=2273,
)
_LOTSNESTEDMESSAGE_B208 = _descriptor.Descriptor(
name='B208',
full_name='google.protobuf.internal.LotsNestedMessage.B208',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2275,
serialized_end=2281,
)
_LOTSNESTEDMESSAGE_B209 = _descriptor.Descriptor(
name='B209',
full_name='google.protobuf.internal.LotsNestedMessage.B209',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2283,
serialized_end=2289,
)
_LOTSNESTEDMESSAGE_B210 = _descriptor.Descriptor(
name='B210',
full_name='google.protobuf.internal.LotsNestedMessage.B210',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2291,
serialized_end=2297,
)
_LOTSNESTEDMESSAGE_B211 = _descriptor.Descriptor(
name='B211',
full_name='google.protobuf.internal.LotsNestedMessage.B211',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2299,
serialized_end=2305,
)
_LOTSNESTEDMESSAGE_B212 = _descriptor.Descriptor(
name='B212',
full_name='google.protobuf.internal.LotsNestedMessage.B212',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2307,
serialized_end=2313,
)
_LOTSNESTEDMESSAGE_B213 = _descriptor.Descriptor(
name='B213',
full_name='google.protobuf.internal.LotsNestedMessage.B213',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2315,
serialized_end=2321,
)
_LOTSNESTEDMESSAGE_B214 = _descriptor.Descriptor(
name='B214',
full_name='google.protobuf.internal.LotsNestedMessage.B214',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2323,
serialized_end=2329,
)
_LOTSNESTEDMESSAGE_B215 = _descriptor.Descriptor(
name='B215',
full_name='google.protobuf.internal.LotsNestedMessage.B215',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2331,
serialized_end=2337,
)
_LOTSNESTEDMESSAGE_B216 = _descriptor.Descriptor(
name='B216',
full_name='google.protobuf.internal.LotsNestedMessage.B216',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2339,
serialized_end=2345,
)
_LOTSNESTEDMESSAGE_B217 = _descriptor.Descriptor(
name='B217',
full_name='google.protobuf.internal.LotsNestedMessage.B217',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2347,
serialized_end=2353,
)
_LOTSNESTEDMESSAGE_B218 = _descriptor.Descriptor(
name='B218',
full_name='google.protobuf.internal.LotsNestedMessage.B218',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2355,
serialized_end=2361,
)
_LOTSNESTEDMESSAGE_B219 = _descriptor.Descriptor(
name='B219',
full_name='google.protobuf.internal.LotsNestedMessage.B219',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2363,
serialized_end=2369,
)
_LOTSNESTEDMESSAGE_B220 = _descriptor.Descriptor(
name='B220',
full_name='google.protobuf.internal.LotsNestedMessage.B220',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2371,
serialized_end=2377,
)
_LOTSNESTEDMESSAGE_B221 = _descriptor.Descriptor(
name='B221',
full_name='google.protobuf.internal.LotsNestedMessage.B221',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2379,
serialized_end=2385,
)
_LOTSNESTEDMESSAGE_B222 = _descriptor.Descriptor(
name='B222',
full_name='google.protobuf.internal.LotsNestedMessage.B222',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2387,
serialized_end=2393,
)
_LOTSNESTEDMESSAGE_B223 = _descriptor.Descriptor(
name='B223',
full_name='google.protobuf.internal.LotsNestedMessage.B223',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2395,
serialized_end=2401,
)
_LOTSNESTEDMESSAGE_B224 = _descriptor.Descriptor(
name='B224',
full_name='google.protobuf.internal.LotsNestedMessage.B224',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2403,
serialized_end=2409,
)
_LOTSNESTEDMESSAGE_B225 = _descriptor.Descriptor(
name='B225',
full_name='google.protobuf.internal.LotsNestedMessage.B225',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2411,
serialized_end=2417,
)
_LOTSNESTEDMESSAGE_B226 = _descriptor.Descriptor(
name='B226',
full_name='google.protobuf.internal.LotsNestedMessage.B226',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2419,
serialized_end=2425,
)
_LOTSNESTEDMESSAGE_B227 = _descriptor.Descriptor(
name='B227',
full_name='google.protobuf.internal.LotsNestedMessage.B227',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2427,
serialized_end=2433,
)
_LOTSNESTEDMESSAGE_B228 = _descriptor.Descriptor(
name='B228',
full_name='google.protobuf.internal.LotsNestedMessage.B228',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2435,
serialized_end=2441,
)
_LOTSNESTEDMESSAGE_B229 = _descriptor.Descriptor(
name='B229',
full_name='google.protobuf.internal.LotsNestedMessage.B229',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2443,
serialized_end=2449,
)
_LOTSNESTEDMESSAGE_B230 = _descriptor.Descriptor(
name='B230',
full_name='google.protobuf.internal.LotsNestedMessage.B230',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2451,
serialized_end=2457,
)
_LOTSNESTEDMESSAGE_B231 = _descriptor.Descriptor(
name='B231',
full_name='google.protobuf.internal.LotsNestedMessage.B231',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2459,
serialized_end=2465,
)
_LOTSNESTEDMESSAGE_B232 = _descriptor.Descriptor(
name='B232',
full_name='google.protobuf.internal.LotsNestedMessage.B232',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2467,
serialized_end=2473,
)
_LOTSNESTEDMESSAGE_B233 = _descriptor.Descriptor(
name='B233',
full_name='google.protobuf.internal.LotsNestedMessage.B233',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2475,
serialized_end=2481,
)
_LOTSNESTEDMESSAGE_B234 = _descriptor.Descriptor(
name='B234',
full_name='google.protobuf.internal.LotsNestedMessage.B234',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2483,
serialized_end=2489,
)
_LOTSNESTEDMESSAGE_B235 = _descriptor.Descriptor(
name='B235',
full_name='google.protobuf.internal.LotsNestedMessage.B235',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2491,
serialized_end=2497,
)
_LOTSNESTEDMESSAGE_B236 = _descriptor.Descriptor(
name='B236',
full_name='google.protobuf.internal.LotsNestedMessage.B236',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2499,
serialized_end=2505,
)
_LOTSNESTEDMESSAGE_B237 = _descriptor.Descriptor(
name='B237',
full_name='google.protobuf.internal.LotsNestedMessage.B237',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2507,
serialized_end=2513,
)
_LOTSNESTEDMESSAGE_B238 = _descriptor.Descriptor(
name='B238',
full_name='google.protobuf.internal.LotsNestedMessage.B238',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2515,
serialized_end=2521,
)
_LOTSNESTEDMESSAGE_B239 = _descriptor.Descriptor(
name='B239',
full_name='google.protobuf.internal.LotsNestedMessage.B239',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2523,
serialized_end=2529,
)
_LOTSNESTEDMESSAGE_B240 = _descriptor.Descriptor(
name='B240',
full_name='google.protobuf.internal.LotsNestedMessage.B240',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2531,
serialized_end=2537,
)
_LOTSNESTEDMESSAGE_B241 = _descriptor.Descriptor(
name='B241',
full_name='google.protobuf.internal.LotsNestedMessage.B241',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2539,
serialized_end=2545,
)
_LOTSNESTEDMESSAGE_B242 = _descriptor.Descriptor(
name='B242',
full_name='google.protobuf.internal.LotsNestedMessage.B242',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2547,
serialized_end=2553,
)
_LOTSNESTEDMESSAGE_B243 = _descriptor.Descriptor(
name='B243',
full_name='google.protobuf.internal.LotsNestedMessage.B243',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2555,
serialized_end=2561,
)
_LOTSNESTEDMESSAGE_B244 = _descriptor.Descriptor(
name='B244',
full_name='google.protobuf.internal.LotsNestedMessage.B244',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2563,
serialized_end=2569,
)
_LOTSNESTEDMESSAGE_B245 = _descriptor.Descriptor(
name='B245',
full_name='google.protobuf.internal.LotsNestedMessage.B245',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2571,
serialized_end=2577,
)
_LOTSNESTEDMESSAGE_B246 = _descriptor.Descriptor(
name='B246',
full_name='google.protobuf.internal.LotsNestedMessage.B246',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2579,
serialized_end=2585,
)
_LOTSNESTEDMESSAGE_B247 = _descriptor.Descriptor(
name='B247',
full_name='google.protobuf.internal.LotsNestedMessage.B247',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2587,
serialized_end=2593,
)
_LOTSNESTEDMESSAGE_B248 = _descriptor.Descriptor(
name='B248',
full_name='google.protobuf.internal.LotsNestedMessage.B248',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2595,
serialized_end=2601,
)
_LOTSNESTEDMESSAGE_B249 = _descriptor.Descriptor(
name='B249',
full_name='google.protobuf.internal.LotsNestedMessage.B249',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2603,
serialized_end=2609,
)
_LOTSNESTEDMESSAGE_B250 = _descriptor.Descriptor(
name='B250',
full_name='google.protobuf.internal.LotsNestedMessage.B250',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2611,
serialized_end=2617,
)
_LOTSNESTEDMESSAGE_B251 = _descriptor.Descriptor(
name='B251',
full_name='google.protobuf.internal.LotsNestedMessage.B251',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2619,
serialized_end=2625,
)
_LOTSNESTEDMESSAGE_B252 = _descriptor.Descriptor(
name='B252',
full_name='google.protobuf.internal.LotsNestedMessage.B252',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2627,
serialized_end=2633,
)
_LOTSNESTEDMESSAGE_B253 = _descriptor.Descriptor(
name='B253',
full_name='google.protobuf.internal.LotsNestedMessage.B253',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2635,
serialized_end=2641,
)
_LOTSNESTEDMESSAGE_B254 = _descriptor.Descriptor(
name='B254',
full_name='google.protobuf.internal.LotsNestedMessage.B254',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2643,
serialized_end=2649,
)
_LOTSNESTEDMESSAGE_B255 = _descriptor.Descriptor(
name='B255',
full_name='google.protobuf.internal.LotsNestedMessage.B255',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2651,
serialized_end=2657,
)
_LOTSNESTEDMESSAGE = _descriptor.Descriptor(
name='LotsNestedMessage',
full_name='google.protobuf.internal.LotsNestedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[_LOTSNESTEDMESSAGE_B0, _LOTSNESTEDMESSAGE_B1, _LOTSNESTEDMESSAGE_B2, _LOTSNESTEDMESSAGE_B3, _LOTSNESTEDMESSAGE_B4, _LOTSNESTEDMESSAGE_B5, _LOTSNESTEDMESSAGE_B6, _LOTSNESTEDMESSAGE_B7, _LOTSNESTEDMESSAGE_B8, _LOTSNESTEDMESSAGE_B9, _LOTSNESTEDMESSAGE_B10, _LOTSNESTEDMESSAGE_B11, _LOTSNESTEDMESSAGE_B12, _LOTSNESTEDMESSAGE_B13, _LOTSNESTEDMESSAGE_B14, _LOTSNESTEDMESSAGE_B15, _LOTSNESTEDMESSAGE_B16, _LOTSNESTEDMESSAGE_B17, _LOTSNESTEDMESSAGE_B18, _LOTSNESTEDMESSAGE_B19, _LOTSNESTEDMESSAGE_B20, _LOTSNESTEDMESSAGE_B21, _LOTSNESTEDMESSAGE_B22, _LOTSNESTEDMESSAGE_B23, _LOTSNESTEDMESSAGE_B24, _LOTSNESTEDMESSAGE_B25, _LOTSNESTEDMESSAGE_B26, _LOTSNESTEDMESSAGE_B27, _LOTSNESTEDMESSAGE_B28, _LOTSNESTEDMESSAGE_B29, _LOTSNESTEDMESSAGE_B30, _LOTSNESTEDMESSAGE_B31, _LOTSNESTEDMESSAGE_B32, _LOTSNESTEDMESSAGE_B33, _LOTSNESTEDMESSAGE_B34, _LOTSNESTEDMESSAGE_B35, _LOTSNESTEDMESSAGE_B36, _LOTSNESTEDMESSAGE_B37, _LOTSNESTEDMESSAGE_B38, _LOTSNESTEDMESSAGE_B39, _LOTSNESTEDMESSAGE_B40, _LOTSNESTEDMESSAGE_B41, _LOTSNESTEDMESSAGE_B42, _LOTSNESTEDMESSAGE_B43, _LOTSNESTEDMESSAGE_B44, _LOTSNESTEDMESSAGE_B45, _LOTSNESTEDMESSAGE_B46, _LOTSNESTEDMESSAGE_B47, _LOTSNESTEDMESSAGE_B48, _LOTSNESTEDMESSAGE_B49, _LOTSNESTEDMESSAGE_B50, _LOTSNESTEDMESSAGE_B51, _LOTSNESTEDMESSAGE_B52, _LOTSNESTEDMESSAGE_B53, _LOTSNESTEDMESSAGE_B54, _LOTSNESTEDMESSAGE_B55, _LOTSNESTEDMESSAGE_B56, _LOTSNESTEDMESSAGE_B57, _LOTSNESTEDMESSAGE_B58, _LOTSNESTEDMESSAGE_B59, _LOTSNESTEDMESSAGE_B60, _LOTSNESTEDMESSAGE_B61, _LOTSNESTEDMESSAGE_B62, _LOTSNESTEDMESSAGE_B63, _LOTSNESTEDMESSAGE_B64, _LOTSNESTEDMESSAGE_B65, _LOTSNESTEDMESSAGE_B66, _LOTSNESTEDMESSAGE_B67, _LOTSNESTEDMESSAGE_B68, _LOTSNESTEDMESSAGE_B69, _LOTSNESTEDMESSAGE_B70, _LOTSNESTEDMESSAGE_B71, _LOTSNESTEDMESSAGE_B72, _LOTSNESTEDMESSAGE_B73, _LOTSNESTEDMESSAGE_B74, _LOTSNESTEDMESSAGE_B75, _LOTSNESTEDMESSAGE_B76, _LOTSNESTEDMESSAGE_B77, _LOTSNESTEDMESSAGE_B78, _LOTSNESTEDMESSAGE_B79, _LOTSNESTEDMESSAGE_B80, _LOTSNESTEDMESSAGE_B81, _LOTSNESTEDMESSAGE_B82, _LOTSNESTEDMESSAGE_B83, _LOTSNESTEDMESSAGE_B84, _LOTSNESTEDMESSAGE_B85, _LOTSNESTEDMESSAGE_B86, _LOTSNESTEDMESSAGE_B87, _LOTSNESTEDMESSAGE_B88, _LOTSNESTEDMESSAGE_B89, _LOTSNESTEDMESSAGE_B90, _LOTSNESTEDMESSAGE_B91, _LOTSNESTEDMESSAGE_B92, _LOTSNESTEDMESSAGE_B93, _LOTSNESTEDMESSAGE_B94, _LOTSNESTEDMESSAGE_B95, _LOTSNESTEDMESSAGE_B96, _LOTSNESTEDMESSAGE_B97, _LOTSNESTEDMESSAGE_B98, _LOTSNESTEDMESSAGE_B99, _LOTSNESTEDMESSAGE_B100, _LOTSNESTEDMESSAGE_B101, _LOTSNESTEDMESSAGE_B102, _LOTSNESTEDMESSAGE_B103, _LOTSNESTEDMESSAGE_B104, _LOTSNESTEDMESSAGE_B105, _LOTSNESTEDMESSAGE_B106, _LOTSNESTEDMESSAGE_B107, _LOTSNESTEDMESSAGE_B108, _LOTSNESTEDMESSAGE_B109, _LOTSNESTEDMESSAGE_B110, _LOTSNESTEDMESSAGE_B111, _LOTSNESTEDMESSAGE_B112, _LOTSNESTEDMESSAGE_B113, _LOTSNESTEDMESSAGE_B114, _LOTSNESTEDMESSAGE_B115, _LOTSNESTEDMESSAGE_B116, _LOTSNESTEDMESSAGE_B117, _LOTSNESTEDMESSAGE_B118, _LOTSNESTEDMESSAGE_B119, _LOTSNESTEDMESSAGE_B120, _LOTSNESTEDMESSAGE_B121, _LOTSNESTEDMESSAGE_B122, _LOTSNESTEDMESSAGE_B123, _LOTSNESTEDMESSAGE_B124, _LOTSNESTEDMESSAGE_B125, _LOTSNESTEDMESSAGE_B126, _LOTSNESTEDMESSAGE_B127, _LOTSNESTEDMESSAGE_B128, _LOTSNESTEDMESSAGE_B129, _LOTSNESTEDMESSAGE_B130, _LOTSNESTEDMESSAGE_B131, _LOTSNESTEDMESSAGE_B132, _LOTSNESTEDMESSAGE_B133, _LOTSNESTEDMESSAGE_B134, _LOTSNESTEDMESSAGE_B135, _LOTSNESTEDMESSAGE_B136, _LOTSNESTEDMESSAGE_B137, _LOTSNESTEDMESSAGE_B138, _LOTSNESTEDMESSAGE_B139, _LOTSNESTEDMESSAGE_B140, _LOTSNESTEDMESSAGE_B141, _LOTSNESTEDMESSAGE_B142, _LOTSNESTEDMESSAGE_B143, _LOTSNESTEDMESSAGE_B144, _LOTSNESTEDMESSAGE_B145, _LOTSNESTEDMESSAGE_B146, _LOTSNESTEDMESSAGE_B147, _LOTSNESTEDMESSAGE_B148, _LOTSNESTEDMESSAGE_B149, _LOTSNESTEDMESSAGE_B150, _LOTSNESTEDMESSAGE_B151, _LOTSNESTEDMESSAGE_B152, _LOTSNESTEDMESSAGE_B153, _LOTSNESTEDMESSAGE_B154, _LOTSNESTEDMESSAGE_B155, _LOTSNESTEDMESSAGE_B156, _LOTSNESTEDMESSAGE_B157, _LOTSNESTEDMESSAGE_B158, _LOTSNESTEDMESSAGE_B159, _LOTSNESTEDMESSAGE_B160, _LOTSNESTEDMESSAGE_B161, _LOTSNESTEDMESSAGE_B162, _LOTSNESTEDMESSAGE_B163, _LOTSNESTEDMESSAGE_B164, _LOTSNESTEDMESSAGE_B165, _LOTSNESTEDMESSAGE_B166, _LOTSNESTEDMESSAGE_B167, _LOTSNESTEDMESSAGE_B168, _LOTSNESTEDMESSAGE_B169, _LOTSNESTEDMESSAGE_B170, _LOTSNESTEDMESSAGE_B171, _LOTSNESTEDMESSAGE_B172, _LOTSNESTEDMESSAGE_B173, _LOTSNESTEDMESSAGE_B174, _LOTSNESTEDMESSAGE_B175, _LOTSNESTEDMESSAGE_B176, _LOTSNESTEDMESSAGE_B177, _LOTSNESTEDMESSAGE_B178, _LOTSNESTEDMESSAGE_B179, _LOTSNESTEDMESSAGE_B180, _LOTSNESTEDMESSAGE_B181, _LOTSNESTEDMESSAGE_B182, _LOTSNESTEDMESSAGE_B183, _LOTSNESTEDMESSAGE_B184, _LOTSNESTEDMESSAGE_B185, _LOTSNESTEDMESSAGE_B186, _LOTSNESTEDMESSAGE_B187, _LOTSNESTEDMESSAGE_B188, _LOTSNESTEDMESSAGE_B189, _LOTSNESTEDMESSAGE_B190, _LOTSNESTEDMESSAGE_B191, _LOTSNESTEDMESSAGE_B192, _LOTSNESTEDMESSAGE_B193, _LOTSNESTEDMESSAGE_B194, _LOTSNESTEDMESSAGE_B195, _LOTSNESTEDMESSAGE_B196, _LOTSNESTEDMESSAGE_B197, _LOTSNESTEDMESSAGE_B198, _LOTSNESTEDMESSAGE_B199, _LOTSNESTEDMESSAGE_B200, _LOTSNESTEDMESSAGE_B201, _LOTSNESTEDMESSAGE_B202, _LOTSNESTEDMESSAGE_B203, _LOTSNESTEDMESSAGE_B204, _LOTSNESTEDMESSAGE_B205, _LOTSNESTEDMESSAGE_B206, _LOTSNESTEDMESSAGE_B207, _LOTSNESTEDMESSAGE_B208, _LOTSNESTEDMESSAGE_B209, _LOTSNESTEDMESSAGE_B210, _LOTSNESTEDMESSAGE_B211, _LOTSNESTEDMESSAGE_B212, _LOTSNESTEDMESSAGE_B213, _LOTSNESTEDMESSAGE_B214, _LOTSNESTEDMESSAGE_B215, _LOTSNESTEDMESSAGE_B216, _LOTSNESTEDMESSAGE_B217, _LOTSNESTEDMESSAGE_B218, _LOTSNESTEDMESSAGE_B219, _LOTSNESTEDMESSAGE_B220, _LOTSNESTEDMESSAGE_B221, _LOTSNESTEDMESSAGE_B222, _LOTSNESTEDMESSAGE_B223, _LOTSNESTEDMESSAGE_B224, _LOTSNESTEDMESSAGE_B225, _LOTSNESTEDMESSAGE_B226, _LOTSNESTEDMESSAGE_B227, _LOTSNESTEDMESSAGE_B228, _LOTSNESTEDMESSAGE_B229, _LOTSNESTEDMESSAGE_B230, _LOTSNESTEDMESSAGE_B231, _LOTSNESTEDMESSAGE_B232, _LOTSNESTEDMESSAGE_B233, _LOTSNESTEDMESSAGE_B234, _LOTSNESTEDMESSAGE_B235, _LOTSNESTEDMESSAGE_B236, _LOTSNESTEDMESSAGE_B237, _LOTSNESTEDMESSAGE_B238, _LOTSNESTEDMESSAGE_B239, _LOTSNESTEDMESSAGE_B240, _LOTSNESTEDMESSAGE_B241, _LOTSNESTEDMESSAGE_B242, _LOTSNESTEDMESSAGE_B243, _LOTSNESTEDMESSAGE_B244, _LOTSNESTEDMESSAGE_B245, _LOTSNESTEDMESSAGE_B246, _LOTSNESTEDMESSAGE_B247, _LOTSNESTEDMESSAGE_B248, _LOTSNESTEDMESSAGE_B249, _LOTSNESTEDMESSAGE_B250, _LOTSNESTEDMESSAGE_B251, _LOTSNESTEDMESSAGE_B252, _LOTSNESTEDMESSAGE_B253, _LOTSNESTEDMESSAGE_B254, _LOTSNESTEDMESSAGE_B255, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=700,
serialized_end=2657,
)
_CLASS_TRY.containing_type = _CLASS
_CLASS.fields_by_name['as'].enum_type = _IS
_CLASS.fields_by_name['enum_field'].enum_type = _IS
_CLASS.fields_by_name['nested_enum_field'].enum_type = _CLASS_FOR
_CLASS.fields_by_name['nested_message'].message_type = _CLASS_TRY
_CLASS_FOR.containing_type = _CLASS
_TESTFULLKEYWORD.fields_by_name['field1'].message_type = _OUTOFORDERFIELDS
_TESTFULLKEYWORD.fields_by_name['field2'].message_type = _CLASS
_LOTSNESTEDMESSAGE_B0.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B1.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B2.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B3.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B4.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B5.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B6.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B7.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B8.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B9.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B10.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B11.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B12.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B13.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B14.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B15.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B16.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B17.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B18.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B19.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B20.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B21.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B22.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B23.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B24.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B25.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B26.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B27.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B28.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B29.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B30.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B31.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B32.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B33.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B34.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B35.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B36.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B37.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B38.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B39.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B40.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B41.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B42.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B43.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B44.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B45.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B46.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B47.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B48.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B49.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B50.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B51.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B52.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B53.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B54.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B55.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B56.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B57.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B58.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B59.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B60.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B61.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B62.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B63.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B64.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B65.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B66.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B67.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B68.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B69.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B70.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B71.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B72.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B73.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B74.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B75.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B76.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B77.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B78.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B79.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B80.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B81.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B82.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B83.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B84.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B85.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B86.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B87.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B88.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B89.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B90.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B91.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B92.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B93.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B94.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B95.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B96.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B97.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B98.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B99.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B100.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B101.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B102.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B103.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B104.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B105.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B106.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B107.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B108.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B109.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B110.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B111.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B112.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B113.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B114.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B115.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B116.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B117.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B118.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B119.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B120.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B121.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B122.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B123.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B124.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B125.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B126.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B127.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B128.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B129.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B130.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B131.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B132.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B133.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B134.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B135.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B136.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B137.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B138.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B139.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B140.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B141.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B142.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B143.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B144.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B145.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B146.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B147.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B148.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B149.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B150.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B151.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B152.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B153.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B154.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B155.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B156.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B157.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B158.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B159.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B160.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B161.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B162.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B163.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B164.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B165.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B166.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B167.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B168.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B169.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B170.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B171.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B172.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B173.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B174.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B175.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B176.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B177.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B178.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B179.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B180.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B181.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B182.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B183.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B184.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B185.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B186.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B187.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B188.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B189.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B190.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B191.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B192.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B193.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B194.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B195.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B196.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B197.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B198.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B199.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B200.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B201.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B202.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B203.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B204.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B205.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B206.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B207.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B208.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B209.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B210.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B211.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B212.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B213.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B214.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B215.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B216.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B217.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B218.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B219.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B220.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B221.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B222.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B223.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B224.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B225.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B226.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B227.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B228.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B229.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B230.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B231.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B232.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B233.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B234.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B235.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B236.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B237.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B238.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B239.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B240.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B241.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B242.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B243.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B244.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B245.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B246.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B247.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B248.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B249.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B250.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B251.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B252.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B253.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B254.containing_type = _LOTSNESTEDMESSAGE
_LOTSNESTEDMESSAGE_B255.containing_type = _LOTSNESTEDMESSAGE
DESCRIPTOR.message_types_by_name['OutOfOrderFields'] = _OUTOFORDERFIELDS
DESCRIPTOR.message_types_by_name['class'] = _CLASS
DESCRIPTOR.message_types_by_name['ExtendClass'] = _EXTENDCLASS
DESCRIPTOR.message_types_by_name['TestFullKeyword'] = _TESTFULLKEYWORD
DESCRIPTOR.message_types_by_name['LotsNestedMessage'] = _LOTSNESTEDMESSAGE
DESCRIPTOR.enum_types_by_name['is'] = _IS
DESCRIPTOR.extensions_by_name['optional_uint64'] = optional_uint64
DESCRIPTOR.extensions_by_name['optional_int64'] = optional_int64
DESCRIPTOR.extensions_by_name['continue'] = globals()['continue']
DESCRIPTOR.extensions_by_name['with'] = globals()['with']
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OutOfOrderFields = _reflection.GeneratedProtocolMessageType('OutOfOrderFields', (_message.Message,), {
'DESCRIPTOR' : _OUTOFORDERFIELDS,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.OutOfOrderFields)
})
_sym_db.RegisterMessage(OutOfOrderFields)
globals()['class'] = _reflection.GeneratedProtocolMessageType('class', (_message.Message,), {
'try' : _reflection.GeneratedProtocolMessageType('try', (_message.Message,), {
'DESCRIPTOR' : _CLASS_TRY,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.class.try)
})
,
'DESCRIPTOR' : _CLASS,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.class)
})
_sym_db.RegisterMessage(globals()['class'])
_sym_db.RegisterMessage(getattr(globals()['class'], 'try'))
ExtendClass = _reflection.GeneratedProtocolMessageType('ExtendClass', (_message.Message,), {
'DESCRIPTOR' : _EXTENDCLASS,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.ExtendClass)
})
_sym_db.RegisterMessage(ExtendClass)
TestFullKeyword = _reflection.GeneratedProtocolMessageType('TestFullKeyword', (_message.Message,), {
'DESCRIPTOR' : _TESTFULLKEYWORD,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.TestFullKeyword)
})
_sym_db.RegisterMessage(TestFullKeyword)
LotsNestedMessage = _reflection.GeneratedProtocolMessageType('LotsNestedMessage', (_message.Message,), {
'B0' : _reflection.GeneratedProtocolMessageType('B0', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B0,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B0)
})
,
'B1' : _reflection.GeneratedProtocolMessageType('B1', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B1,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B1)
})
,
'B2' : _reflection.GeneratedProtocolMessageType('B2', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B2,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B2)
})
,
'B3' : _reflection.GeneratedProtocolMessageType('B3', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B3,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B3)
})
,
'B4' : _reflection.GeneratedProtocolMessageType('B4', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B4,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B4)
})
,
'B5' : _reflection.GeneratedProtocolMessageType('B5', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B5,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B5)
})
,
'B6' : _reflection.GeneratedProtocolMessageType('B6', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B6,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B6)
})
,
'B7' : _reflection.GeneratedProtocolMessageType('B7', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B7,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B7)
})
,
'B8' : _reflection.GeneratedProtocolMessageType('B8', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B8,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B8)
})
,
'B9' : _reflection.GeneratedProtocolMessageType('B9', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B9,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B9)
})
,
'B10' : _reflection.GeneratedProtocolMessageType('B10', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B10,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B10)
})
,
'B11' : _reflection.GeneratedProtocolMessageType('B11', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B11,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B11)
})
,
'B12' : _reflection.GeneratedProtocolMessageType('B12', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B12,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B12)
})
,
'B13' : _reflection.GeneratedProtocolMessageType('B13', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B13,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B13)
})
,
'B14' : _reflection.GeneratedProtocolMessageType('B14', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B14,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B14)
})
,
'B15' : _reflection.GeneratedProtocolMessageType('B15', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B15,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B15)
})
,
'B16' : _reflection.GeneratedProtocolMessageType('B16', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B16,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B16)
})
,
'B17' : _reflection.GeneratedProtocolMessageType('B17', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B17,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B17)
})
,
'B18' : _reflection.GeneratedProtocolMessageType('B18', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B18,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B18)
})
,
'B19' : _reflection.GeneratedProtocolMessageType('B19', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B19,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B19)
})
,
'B20' : _reflection.GeneratedProtocolMessageType('B20', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B20,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B20)
})
,
'B21' : _reflection.GeneratedProtocolMessageType('B21', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B21,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B21)
})
,
'B22' : _reflection.GeneratedProtocolMessageType('B22', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B22,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B22)
})
,
'B23' : _reflection.GeneratedProtocolMessageType('B23', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B23,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B23)
})
,
'B24' : _reflection.GeneratedProtocolMessageType('B24', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B24,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B24)
})
,
'B25' : _reflection.GeneratedProtocolMessageType('B25', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B25,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B25)
})
,
'B26' : _reflection.GeneratedProtocolMessageType('B26', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B26,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B26)
})
,
'B27' : _reflection.GeneratedProtocolMessageType('B27', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B27,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B27)
})
,
'B28' : _reflection.GeneratedProtocolMessageType('B28', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B28,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B28)
})
,
'B29' : _reflection.GeneratedProtocolMessageType('B29', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B29,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B29)
})
,
'B30' : _reflection.GeneratedProtocolMessageType('B30', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B30,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B30)
})
,
'B31' : _reflection.GeneratedProtocolMessageType('B31', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B31,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B31)
})
,
'B32' : _reflection.GeneratedProtocolMessageType('B32', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B32,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B32)
})
,
'B33' : _reflection.GeneratedProtocolMessageType('B33', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B33,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B33)
})
,
'B34' : _reflection.GeneratedProtocolMessageType('B34', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B34,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B34)
})
,
'B35' : _reflection.GeneratedProtocolMessageType('B35', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B35,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B35)
})
,
'B36' : _reflection.GeneratedProtocolMessageType('B36', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B36,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B36)
})
,
'B37' : _reflection.GeneratedProtocolMessageType('B37', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B37,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B37)
})
,
'B38' : _reflection.GeneratedProtocolMessageType('B38', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B38,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B38)
})
,
'B39' : _reflection.GeneratedProtocolMessageType('B39', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B39,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B39)
})
,
'B40' : _reflection.GeneratedProtocolMessageType('B40', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B40,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B40)
})
,
'B41' : _reflection.GeneratedProtocolMessageType('B41', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B41,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B41)
})
,
'B42' : _reflection.GeneratedProtocolMessageType('B42', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B42,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B42)
})
,
'B43' : _reflection.GeneratedProtocolMessageType('B43', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B43,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B43)
})
,
'B44' : _reflection.GeneratedProtocolMessageType('B44', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B44,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B44)
})
,
'B45' : _reflection.GeneratedProtocolMessageType('B45', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B45,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B45)
})
,
'B46' : _reflection.GeneratedProtocolMessageType('B46', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B46,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B46)
})
,
'B47' : _reflection.GeneratedProtocolMessageType('B47', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B47,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B47)
})
,
'B48' : _reflection.GeneratedProtocolMessageType('B48', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B48,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B48)
})
,
'B49' : _reflection.GeneratedProtocolMessageType('B49', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B49,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B49)
})
,
'B50' : _reflection.GeneratedProtocolMessageType('B50', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B50,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B50)
})
,
'B51' : _reflection.GeneratedProtocolMessageType('B51', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B51,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B51)
})
,
'B52' : _reflection.GeneratedProtocolMessageType('B52', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B52,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B52)
})
,
'B53' : _reflection.GeneratedProtocolMessageType('B53', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B53,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B53)
})
,
'B54' : _reflection.GeneratedProtocolMessageType('B54', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B54,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B54)
})
,
'B55' : _reflection.GeneratedProtocolMessageType('B55', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B55,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B55)
})
,
'B56' : _reflection.GeneratedProtocolMessageType('B56', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B56,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B56)
})
,
'B57' : _reflection.GeneratedProtocolMessageType('B57', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B57,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B57)
})
,
'B58' : _reflection.GeneratedProtocolMessageType('B58', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B58,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B58)
})
,
'B59' : _reflection.GeneratedProtocolMessageType('B59', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B59,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B59)
})
,
'B60' : _reflection.GeneratedProtocolMessageType('B60', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B60,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B60)
})
,
'B61' : _reflection.GeneratedProtocolMessageType('B61', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B61,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B61)
})
,
'B62' : _reflection.GeneratedProtocolMessageType('B62', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B62,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B62)
})
,
'B63' : _reflection.GeneratedProtocolMessageType('B63', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B63,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B63)
})
,
'B64' : _reflection.GeneratedProtocolMessageType('B64', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B64,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B64)
})
,
'B65' : _reflection.GeneratedProtocolMessageType('B65', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B65,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B65)
})
,
'B66' : _reflection.GeneratedProtocolMessageType('B66', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B66,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B66)
})
,
'B67' : _reflection.GeneratedProtocolMessageType('B67', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B67,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B67)
})
,
'B68' : _reflection.GeneratedProtocolMessageType('B68', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B68,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B68)
})
,
'B69' : _reflection.GeneratedProtocolMessageType('B69', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B69,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B69)
})
,
'B70' : _reflection.GeneratedProtocolMessageType('B70', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B70,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B70)
})
,
'B71' : _reflection.GeneratedProtocolMessageType('B71', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B71,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B71)
})
,
'B72' : _reflection.GeneratedProtocolMessageType('B72', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B72,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B72)
})
,
'B73' : _reflection.GeneratedProtocolMessageType('B73', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B73,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B73)
})
,
'B74' : _reflection.GeneratedProtocolMessageType('B74', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B74,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B74)
})
,
'B75' : _reflection.GeneratedProtocolMessageType('B75', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B75,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B75)
})
,
'B76' : _reflection.GeneratedProtocolMessageType('B76', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B76,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B76)
})
,
'B77' : _reflection.GeneratedProtocolMessageType('B77', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B77,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B77)
})
,
'B78' : _reflection.GeneratedProtocolMessageType('B78', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B78,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B78)
})
,
'B79' : _reflection.GeneratedProtocolMessageType('B79', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B79,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B79)
})
,
'B80' : _reflection.GeneratedProtocolMessageType('B80', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B80,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B80)
})
,
'B81' : _reflection.GeneratedProtocolMessageType('B81', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B81,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B81)
})
,
'B82' : _reflection.GeneratedProtocolMessageType('B82', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B82,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B82)
})
,
'B83' : _reflection.GeneratedProtocolMessageType('B83', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B83,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B83)
})
,
'B84' : _reflection.GeneratedProtocolMessageType('B84', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B84,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B84)
})
,
'B85' : _reflection.GeneratedProtocolMessageType('B85', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B85,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B85)
})
,
'B86' : _reflection.GeneratedProtocolMessageType('B86', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B86,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B86)
})
,
'B87' : _reflection.GeneratedProtocolMessageType('B87', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B87,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B87)
})
,
'B88' : _reflection.GeneratedProtocolMessageType('B88', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B88,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B88)
})
,
'B89' : _reflection.GeneratedProtocolMessageType('B89', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B89,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B89)
})
,
'B90' : _reflection.GeneratedProtocolMessageType('B90', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B90,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B90)
})
,
'B91' : _reflection.GeneratedProtocolMessageType('B91', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B91,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B91)
})
,
'B92' : _reflection.GeneratedProtocolMessageType('B92', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B92,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B92)
})
,
'B93' : _reflection.GeneratedProtocolMessageType('B93', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B93,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B93)
})
,
'B94' : _reflection.GeneratedProtocolMessageType('B94', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B94,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B94)
})
,
'B95' : _reflection.GeneratedProtocolMessageType('B95', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B95,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B95)
})
,
'B96' : _reflection.GeneratedProtocolMessageType('B96', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B96,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B96)
})
,
'B97' : _reflection.GeneratedProtocolMessageType('B97', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B97,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B97)
})
,
'B98' : _reflection.GeneratedProtocolMessageType('B98', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B98,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B98)
})
,
'B99' : _reflection.GeneratedProtocolMessageType('B99', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B99,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B99)
})
,
'B100' : _reflection.GeneratedProtocolMessageType('B100', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B100,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B100)
})
,
'B101' : _reflection.GeneratedProtocolMessageType('B101', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B101,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B101)
})
,
'B102' : _reflection.GeneratedProtocolMessageType('B102', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B102,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B102)
})
,
'B103' : _reflection.GeneratedProtocolMessageType('B103', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B103,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B103)
})
,
'B104' : _reflection.GeneratedProtocolMessageType('B104', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B104,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B104)
})
,
'B105' : _reflection.GeneratedProtocolMessageType('B105', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B105,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B105)
})
,
'B106' : _reflection.GeneratedProtocolMessageType('B106', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B106,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B106)
})
,
'B107' : _reflection.GeneratedProtocolMessageType('B107', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B107,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B107)
})
,
'B108' : _reflection.GeneratedProtocolMessageType('B108', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B108,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B108)
})
,
'B109' : _reflection.GeneratedProtocolMessageType('B109', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B109,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B109)
})
,
'B110' : _reflection.GeneratedProtocolMessageType('B110', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B110,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B110)
})
,
'B111' : _reflection.GeneratedProtocolMessageType('B111', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B111,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B111)
})
,
'B112' : _reflection.GeneratedProtocolMessageType('B112', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B112,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B112)
})
,
'B113' : _reflection.GeneratedProtocolMessageType('B113', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B113,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B113)
})
,
'B114' : _reflection.GeneratedProtocolMessageType('B114', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B114,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B114)
})
,
'B115' : _reflection.GeneratedProtocolMessageType('B115', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B115,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B115)
})
,
'B116' : _reflection.GeneratedProtocolMessageType('B116', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B116,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B116)
})
,
'B117' : _reflection.GeneratedProtocolMessageType('B117', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B117,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B117)
})
,
'B118' : _reflection.GeneratedProtocolMessageType('B118', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B118,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B118)
})
,
'B119' : _reflection.GeneratedProtocolMessageType('B119', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B119,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B119)
})
,
'B120' : _reflection.GeneratedProtocolMessageType('B120', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B120,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B120)
})
,
'B121' : _reflection.GeneratedProtocolMessageType('B121', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B121,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B121)
})
,
'B122' : _reflection.GeneratedProtocolMessageType('B122', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B122,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B122)
})
,
'B123' : _reflection.GeneratedProtocolMessageType('B123', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B123,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B123)
})
,
'B124' : _reflection.GeneratedProtocolMessageType('B124', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B124,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B124)
})
,
'B125' : _reflection.GeneratedProtocolMessageType('B125', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B125,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B125)
})
,
'B126' : _reflection.GeneratedProtocolMessageType('B126', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B126,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B126)
})
,
'B127' : _reflection.GeneratedProtocolMessageType('B127', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B127,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B127)
})
,
'B128' : _reflection.GeneratedProtocolMessageType('B128', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B128,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B128)
})
,
'B129' : _reflection.GeneratedProtocolMessageType('B129', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B129,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B129)
})
,
'B130' : _reflection.GeneratedProtocolMessageType('B130', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B130,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B130)
})
,
'B131' : _reflection.GeneratedProtocolMessageType('B131', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B131,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B131)
})
,
'B132' : _reflection.GeneratedProtocolMessageType('B132', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B132,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B132)
})
,
'B133' : _reflection.GeneratedProtocolMessageType('B133', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B133,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B133)
})
,
'B134' : _reflection.GeneratedProtocolMessageType('B134', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B134,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B134)
})
,
'B135' : _reflection.GeneratedProtocolMessageType('B135', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B135,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B135)
})
,
'B136' : _reflection.GeneratedProtocolMessageType('B136', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B136,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B136)
})
,
'B137' : _reflection.GeneratedProtocolMessageType('B137', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B137,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B137)
})
,
'B138' : _reflection.GeneratedProtocolMessageType('B138', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B138,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B138)
})
,
'B139' : _reflection.GeneratedProtocolMessageType('B139', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B139,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B139)
})
,
'B140' : _reflection.GeneratedProtocolMessageType('B140', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B140,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B140)
})
,
'B141' : _reflection.GeneratedProtocolMessageType('B141', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B141,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B141)
})
,
'B142' : _reflection.GeneratedProtocolMessageType('B142', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B142,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B142)
})
,
'B143' : _reflection.GeneratedProtocolMessageType('B143', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B143,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B143)
})
,
'B144' : _reflection.GeneratedProtocolMessageType('B144', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B144,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B144)
})
,
'B145' : _reflection.GeneratedProtocolMessageType('B145', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B145,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B145)
})
,
'B146' : _reflection.GeneratedProtocolMessageType('B146', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B146,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B146)
})
,
'B147' : _reflection.GeneratedProtocolMessageType('B147', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B147,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B147)
})
,
'B148' : _reflection.GeneratedProtocolMessageType('B148', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B148,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B148)
})
,
'B149' : _reflection.GeneratedProtocolMessageType('B149', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B149,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B149)
})
,
'B150' : _reflection.GeneratedProtocolMessageType('B150', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B150,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B150)
})
,
'B151' : _reflection.GeneratedProtocolMessageType('B151', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B151,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B151)
})
,
'B152' : _reflection.GeneratedProtocolMessageType('B152', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B152,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B152)
})
,
'B153' : _reflection.GeneratedProtocolMessageType('B153', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B153,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B153)
})
,
'B154' : _reflection.GeneratedProtocolMessageType('B154', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B154,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B154)
})
,
'B155' : _reflection.GeneratedProtocolMessageType('B155', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B155,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B155)
})
,
'B156' : _reflection.GeneratedProtocolMessageType('B156', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B156,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B156)
})
,
'B157' : _reflection.GeneratedProtocolMessageType('B157', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B157,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B157)
})
,
'B158' : _reflection.GeneratedProtocolMessageType('B158', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B158,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B158)
})
,
'B159' : _reflection.GeneratedProtocolMessageType('B159', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B159,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B159)
})
,
'B160' : _reflection.GeneratedProtocolMessageType('B160', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B160,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B160)
})
,
'B161' : _reflection.GeneratedProtocolMessageType('B161', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B161,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B161)
})
,
'B162' : _reflection.GeneratedProtocolMessageType('B162', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B162,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B162)
})
,
'B163' : _reflection.GeneratedProtocolMessageType('B163', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B163,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B163)
})
,
'B164' : _reflection.GeneratedProtocolMessageType('B164', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B164,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B164)
})
,
'B165' : _reflection.GeneratedProtocolMessageType('B165', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B165,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B165)
})
,
'B166' : _reflection.GeneratedProtocolMessageType('B166', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B166,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B166)
})
,
'B167' : _reflection.GeneratedProtocolMessageType('B167', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B167,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B167)
})
,
'B168' : _reflection.GeneratedProtocolMessageType('B168', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B168,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B168)
})
,
'B169' : _reflection.GeneratedProtocolMessageType('B169', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B169,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B169)
})
,
'B170' : _reflection.GeneratedProtocolMessageType('B170', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B170,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B170)
})
,
'B171' : _reflection.GeneratedProtocolMessageType('B171', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B171,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B171)
})
,
'B172' : _reflection.GeneratedProtocolMessageType('B172', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B172,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B172)
})
,
'B173' : _reflection.GeneratedProtocolMessageType('B173', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B173,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B173)
})
,
'B174' : _reflection.GeneratedProtocolMessageType('B174', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B174,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B174)
})
,
'B175' : _reflection.GeneratedProtocolMessageType('B175', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B175,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B175)
})
,
'B176' : _reflection.GeneratedProtocolMessageType('B176', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B176,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B176)
})
,
'B177' : _reflection.GeneratedProtocolMessageType('B177', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B177,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B177)
})
,
'B178' : _reflection.GeneratedProtocolMessageType('B178', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B178,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B178)
})
,
'B179' : _reflection.GeneratedProtocolMessageType('B179', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B179,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B179)
})
,
'B180' : _reflection.GeneratedProtocolMessageType('B180', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B180,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B180)
})
,
'B181' : _reflection.GeneratedProtocolMessageType('B181', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B181,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B181)
})
,
'B182' : _reflection.GeneratedProtocolMessageType('B182', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B182,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B182)
})
,
'B183' : _reflection.GeneratedProtocolMessageType('B183', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B183,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B183)
})
,
'B184' : _reflection.GeneratedProtocolMessageType('B184', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B184,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B184)
})
,
'B185' : _reflection.GeneratedProtocolMessageType('B185', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B185,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B185)
})
,
'B186' : _reflection.GeneratedProtocolMessageType('B186', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B186,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B186)
})
,
'B187' : _reflection.GeneratedProtocolMessageType('B187', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B187,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B187)
})
,
'B188' : _reflection.GeneratedProtocolMessageType('B188', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B188,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B188)
})
,
'B189' : _reflection.GeneratedProtocolMessageType('B189', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B189,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B189)
})
,
'B190' : _reflection.GeneratedProtocolMessageType('B190', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B190,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B190)
})
,
'B191' : _reflection.GeneratedProtocolMessageType('B191', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B191,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B191)
})
,
'B192' : _reflection.GeneratedProtocolMessageType('B192', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B192,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B192)
})
,
'B193' : _reflection.GeneratedProtocolMessageType('B193', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B193,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B193)
})
,
'B194' : _reflection.GeneratedProtocolMessageType('B194', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B194,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B194)
})
,
'B195' : _reflection.GeneratedProtocolMessageType('B195', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B195,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B195)
})
,
'B196' : _reflection.GeneratedProtocolMessageType('B196', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B196,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B196)
})
,
'B197' : _reflection.GeneratedProtocolMessageType('B197', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B197,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B197)
})
,
'B198' : _reflection.GeneratedProtocolMessageType('B198', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B198,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B198)
})
,
'B199' : _reflection.GeneratedProtocolMessageType('B199', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B199,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B199)
})
,
'B200' : _reflection.GeneratedProtocolMessageType('B200', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B200,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B200)
})
,
'B201' : _reflection.GeneratedProtocolMessageType('B201', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B201,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B201)
})
,
'B202' : _reflection.GeneratedProtocolMessageType('B202', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B202,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B202)
})
,
'B203' : _reflection.GeneratedProtocolMessageType('B203', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B203,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B203)
})
,
'B204' : _reflection.GeneratedProtocolMessageType('B204', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B204,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B204)
})
,
'B205' : _reflection.GeneratedProtocolMessageType('B205', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B205,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B205)
})
,
'B206' : _reflection.GeneratedProtocolMessageType('B206', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B206,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B206)
})
,
'B207' : _reflection.GeneratedProtocolMessageType('B207', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B207,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B207)
})
,
'B208' : _reflection.GeneratedProtocolMessageType('B208', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B208,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B208)
})
,
'B209' : _reflection.GeneratedProtocolMessageType('B209', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B209,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B209)
})
,
'B210' : _reflection.GeneratedProtocolMessageType('B210', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B210,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B210)
})
,
'B211' : _reflection.GeneratedProtocolMessageType('B211', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B211,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B211)
})
,
'B212' : _reflection.GeneratedProtocolMessageType('B212', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B212,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B212)
})
,
'B213' : _reflection.GeneratedProtocolMessageType('B213', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B213,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B213)
})
,
'B214' : _reflection.GeneratedProtocolMessageType('B214', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B214,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B214)
})
,
'B215' : _reflection.GeneratedProtocolMessageType('B215', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B215,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B215)
})
,
'B216' : _reflection.GeneratedProtocolMessageType('B216', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B216,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B216)
})
,
'B217' : _reflection.GeneratedProtocolMessageType('B217', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B217,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B217)
})
,
'B218' : _reflection.GeneratedProtocolMessageType('B218', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B218,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B218)
})
,
'B219' : _reflection.GeneratedProtocolMessageType('B219', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B219,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B219)
})
,
'B220' : _reflection.GeneratedProtocolMessageType('B220', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B220,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B220)
})
,
'B221' : _reflection.GeneratedProtocolMessageType('B221', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B221,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B221)
})
,
'B222' : _reflection.GeneratedProtocolMessageType('B222', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B222,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B222)
})
,
'B223' : _reflection.GeneratedProtocolMessageType('B223', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B223,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B223)
})
,
'B224' : _reflection.GeneratedProtocolMessageType('B224', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B224,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B224)
})
,
'B225' : _reflection.GeneratedProtocolMessageType('B225', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B225,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B225)
})
,
'B226' : _reflection.GeneratedProtocolMessageType('B226', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B226,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B226)
})
,
'B227' : _reflection.GeneratedProtocolMessageType('B227', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B227,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B227)
})
,
'B228' : _reflection.GeneratedProtocolMessageType('B228', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B228,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B228)
})
,
'B229' : _reflection.GeneratedProtocolMessageType('B229', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B229,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B229)
})
,
'B230' : _reflection.GeneratedProtocolMessageType('B230', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B230,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B230)
})
,
'B231' : _reflection.GeneratedProtocolMessageType('B231', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B231,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B231)
})
,
'B232' : _reflection.GeneratedProtocolMessageType('B232', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B232,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B232)
})
,
'B233' : _reflection.GeneratedProtocolMessageType('B233', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B233,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B233)
})
,
'B234' : _reflection.GeneratedProtocolMessageType('B234', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B234,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B234)
})
,
'B235' : _reflection.GeneratedProtocolMessageType('B235', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B235,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B235)
})
,
'B236' : _reflection.GeneratedProtocolMessageType('B236', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B236,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B236)
})
,
'B237' : _reflection.GeneratedProtocolMessageType('B237', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B237,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B237)
})
,
'B238' : _reflection.GeneratedProtocolMessageType('B238', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B238,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B238)
})
,
'B239' : _reflection.GeneratedProtocolMessageType('B239', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B239,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B239)
})
,
'B240' : _reflection.GeneratedProtocolMessageType('B240', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B240,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B240)
})
,
'B241' : _reflection.GeneratedProtocolMessageType('B241', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B241,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B241)
})
,
'B242' : _reflection.GeneratedProtocolMessageType('B242', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B242,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B242)
})
,
'B243' : _reflection.GeneratedProtocolMessageType('B243', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B243,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B243)
})
,
'B244' : _reflection.GeneratedProtocolMessageType('B244', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B244,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B244)
})
,
'B245' : _reflection.GeneratedProtocolMessageType('B245', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B245,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B245)
})
,
'B246' : _reflection.GeneratedProtocolMessageType('B246', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B246,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B246)
})
,
'B247' : _reflection.GeneratedProtocolMessageType('B247', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B247,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B247)
})
,
'B248' : _reflection.GeneratedProtocolMessageType('B248', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B248,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B248)
})
,
'B249' : _reflection.GeneratedProtocolMessageType('B249', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B249,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B249)
})
,
'B250' : _reflection.GeneratedProtocolMessageType('B250', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B250,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B250)
})
,
'B251' : _reflection.GeneratedProtocolMessageType('B251', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B251,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B251)
})
,
'B252' : _reflection.GeneratedProtocolMessageType('B252', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B252,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B252)
})
,
'B253' : _reflection.GeneratedProtocolMessageType('B253', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B253,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B253)
})
,
'B254' : _reflection.GeneratedProtocolMessageType('B254', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B254,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B254)
})
,
'B255' : _reflection.GeneratedProtocolMessageType('B255', (_message.Message,), {
'DESCRIPTOR' : _LOTSNESTEDMESSAGE_B255,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage.B255)
})
,
'DESCRIPTOR' : _LOTSNESTEDMESSAGE,
'__module__' : 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.LotsNestedMessage)
})
_sym_db.RegisterMessage(LotsNestedMessage)
_sym_db.RegisterMessage(LotsNestedMessage.B0)
_sym_db.RegisterMessage(LotsNestedMessage.B1)
_sym_db.RegisterMessage(LotsNestedMessage.B2)
_sym_db.RegisterMessage(LotsNestedMessage.B3)
_sym_db.RegisterMessage(LotsNestedMessage.B4)
_sym_db.RegisterMessage(LotsNestedMessage.B5)
_sym_db.RegisterMessage(LotsNestedMessage.B6)
_sym_db.RegisterMessage(LotsNestedMessage.B7)
_sym_db.RegisterMessage(LotsNestedMessage.B8)
_sym_db.RegisterMessage(LotsNestedMessage.B9)
_sym_db.RegisterMessage(LotsNestedMessage.B10)
_sym_db.RegisterMessage(LotsNestedMessage.B11)
_sym_db.RegisterMessage(LotsNestedMessage.B12)
_sym_db.RegisterMessage(LotsNestedMessage.B13)
_sym_db.RegisterMessage(LotsNestedMessage.B14)
_sym_db.RegisterMessage(LotsNestedMessage.B15)
_sym_db.RegisterMessage(LotsNestedMessage.B16)
_sym_db.RegisterMessage(LotsNestedMessage.B17)
_sym_db.RegisterMessage(LotsNestedMessage.B18)
_sym_db.RegisterMessage(LotsNestedMessage.B19)
_sym_db.RegisterMessage(LotsNestedMessage.B20)
_sym_db.RegisterMessage(LotsNestedMessage.B21)
_sym_db.RegisterMessage(LotsNestedMessage.B22)
_sym_db.RegisterMessage(LotsNestedMessage.B23)
_sym_db.RegisterMessage(LotsNestedMessage.B24)
_sym_db.RegisterMessage(LotsNestedMessage.B25)
_sym_db.RegisterMessage(LotsNestedMessage.B26)
_sym_db.RegisterMessage(LotsNestedMessage.B27)
_sym_db.RegisterMessage(LotsNestedMessage.B28)
_sym_db.RegisterMessage(LotsNestedMessage.B29)
_sym_db.RegisterMessage(LotsNestedMessage.B30)
_sym_db.RegisterMessage(LotsNestedMessage.B31)
_sym_db.RegisterMessage(LotsNestedMessage.B32)
_sym_db.RegisterMessage(LotsNestedMessage.B33)
_sym_db.RegisterMessage(LotsNestedMessage.B34)
_sym_db.RegisterMessage(LotsNestedMessage.B35)
_sym_db.RegisterMessage(LotsNestedMessage.B36)
_sym_db.RegisterMessage(LotsNestedMessage.B37)
_sym_db.RegisterMessage(LotsNestedMessage.B38)
_sym_db.RegisterMessage(LotsNestedMessage.B39)
_sym_db.RegisterMessage(LotsNestedMessage.B40)
_sym_db.RegisterMessage(LotsNestedMessage.B41)
_sym_db.RegisterMessage(LotsNestedMessage.B42)
_sym_db.RegisterMessage(LotsNestedMessage.B43)
_sym_db.RegisterMessage(LotsNestedMessage.B44)
_sym_db.RegisterMessage(LotsNestedMessage.B45)
_sym_db.RegisterMessage(LotsNestedMessage.B46)
_sym_db.RegisterMessage(LotsNestedMessage.B47)
_sym_db.RegisterMessage(LotsNestedMessage.B48)
_sym_db.RegisterMessage(LotsNestedMessage.B49)
_sym_db.RegisterMessage(LotsNestedMessage.B50)
_sym_db.RegisterMessage(LotsNestedMessage.B51)
_sym_db.RegisterMessage(LotsNestedMessage.B52)
_sym_db.RegisterMessage(LotsNestedMessage.B53)
_sym_db.RegisterMessage(LotsNestedMessage.B54)
_sym_db.RegisterMessage(LotsNestedMessage.B55)
_sym_db.RegisterMessage(LotsNestedMessage.B56)
_sym_db.RegisterMessage(LotsNestedMessage.B57)
_sym_db.RegisterMessage(LotsNestedMessage.B58)
_sym_db.RegisterMessage(LotsNestedMessage.B59)
_sym_db.RegisterMessage(LotsNestedMessage.B60)
_sym_db.RegisterMessage(LotsNestedMessage.B61)
_sym_db.RegisterMessage(LotsNestedMessage.B62)
_sym_db.RegisterMessage(LotsNestedMessage.B63)
_sym_db.RegisterMessage(LotsNestedMessage.B64)
_sym_db.RegisterMessage(LotsNestedMessage.B65)
_sym_db.RegisterMessage(LotsNestedMessage.B66)
_sym_db.RegisterMessage(LotsNestedMessage.B67)
_sym_db.RegisterMessage(LotsNestedMessage.B68)
_sym_db.RegisterMessage(LotsNestedMessage.B69)
_sym_db.RegisterMessage(LotsNestedMessage.B70)
_sym_db.RegisterMessage(LotsNestedMessage.B71)
_sym_db.RegisterMessage(LotsNestedMessage.B72)
_sym_db.RegisterMessage(LotsNestedMessage.B73)
_sym_db.RegisterMessage(LotsNestedMessage.B74)
_sym_db.RegisterMessage(LotsNestedMessage.B75)
_sym_db.RegisterMessage(LotsNestedMessage.B76)
_sym_db.RegisterMessage(LotsNestedMessage.B77)
_sym_db.RegisterMessage(LotsNestedMessage.B78)
_sym_db.RegisterMessage(LotsNestedMessage.B79)
_sym_db.RegisterMessage(LotsNestedMessage.B80)
_sym_db.RegisterMessage(LotsNestedMessage.B81)
_sym_db.RegisterMessage(LotsNestedMessage.B82)
_sym_db.RegisterMessage(LotsNestedMessage.B83)
_sym_db.RegisterMessage(LotsNestedMessage.B84)
_sym_db.RegisterMessage(LotsNestedMessage.B85)
_sym_db.RegisterMessage(LotsNestedMessage.B86)
_sym_db.RegisterMessage(LotsNestedMessage.B87)
_sym_db.RegisterMessage(LotsNestedMessage.B88)
_sym_db.RegisterMessage(LotsNestedMessage.B89)
_sym_db.RegisterMessage(LotsNestedMessage.B90)
_sym_db.RegisterMessage(LotsNestedMessage.B91)
_sym_db.RegisterMessage(LotsNestedMessage.B92)
_sym_db.RegisterMessage(LotsNestedMessage.B93)
_sym_db.RegisterMessage(LotsNestedMessage.B94)
_sym_db.RegisterMessage(LotsNestedMessage.B95)
_sym_db.RegisterMessage(LotsNestedMessage.B96)
_sym_db.RegisterMessage(LotsNestedMessage.B97)
_sym_db.RegisterMessage(LotsNestedMessage.B98)
_sym_db.RegisterMessage(LotsNestedMessage.B99)
_sym_db.RegisterMessage(LotsNestedMessage.B100)
_sym_db.RegisterMessage(LotsNestedMessage.B101)
_sym_db.RegisterMessage(LotsNestedMessage.B102)
_sym_db.RegisterMessage(LotsNestedMessage.B103)
_sym_db.RegisterMessage(LotsNestedMessage.B104)
_sym_db.RegisterMessage(LotsNestedMessage.B105)
_sym_db.RegisterMessage(LotsNestedMessage.B106)
_sym_db.RegisterMessage(LotsNestedMessage.B107)
_sym_db.RegisterMessage(LotsNestedMessage.B108)
_sym_db.RegisterMessage(LotsNestedMessage.B109)
_sym_db.RegisterMessage(LotsNestedMessage.B110)
_sym_db.RegisterMessage(LotsNestedMessage.B111)
_sym_db.RegisterMessage(LotsNestedMessage.B112)
_sym_db.RegisterMessage(LotsNestedMessage.B113)
_sym_db.RegisterMessage(LotsNestedMessage.B114)
_sym_db.RegisterMessage(LotsNestedMessage.B115)
_sym_db.RegisterMessage(LotsNestedMessage.B116)
_sym_db.RegisterMessage(LotsNestedMessage.B117)
_sym_db.RegisterMessage(LotsNestedMessage.B118)
_sym_db.RegisterMessage(LotsNestedMessage.B119)
_sym_db.RegisterMessage(LotsNestedMessage.B120)
_sym_db.RegisterMessage(LotsNestedMessage.B121)
_sym_db.RegisterMessage(LotsNestedMessage.B122)
_sym_db.RegisterMessage(LotsNestedMessage.B123)
_sym_db.RegisterMessage(LotsNestedMessage.B124)
_sym_db.RegisterMessage(LotsNestedMessage.B125)
_sym_db.RegisterMessage(LotsNestedMessage.B126)
_sym_db.RegisterMessage(LotsNestedMessage.B127)
_sym_db.RegisterMessage(LotsNestedMessage.B128)
_sym_db.RegisterMessage(LotsNestedMessage.B129)
_sym_db.RegisterMessage(LotsNestedMessage.B130)
_sym_db.RegisterMessage(LotsNestedMessage.B131)
_sym_db.RegisterMessage(LotsNestedMessage.B132)
_sym_db.RegisterMessage(LotsNestedMessage.B133)
_sym_db.RegisterMessage(LotsNestedMessage.B134)
_sym_db.RegisterMessage(LotsNestedMessage.B135)
_sym_db.RegisterMessage(LotsNestedMessage.B136)
_sym_db.RegisterMessage(LotsNestedMessage.B137)
_sym_db.RegisterMessage(LotsNestedMessage.B138)
_sym_db.RegisterMessage(LotsNestedMessage.B139)
_sym_db.RegisterMessage(LotsNestedMessage.B140)
_sym_db.RegisterMessage(LotsNestedMessage.B141)
_sym_db.RegisterMessage(LotsNestedMessage.B142)
_sym_db.RegisterMessage(LotsNestedMessage.B143)
_sym_db.RegisterMessage(LotsNestedMessage.B144)
_sym_db.RegisterMessage(LotsNestedMessage.B145)
_sym_db.RegisterMessage(LotsNestedMessage.B146)
_sym_db.RegisterMessage(LotsNestedMessage.B147)
_sym_db.RegisterMessage(LotsNestedMessage.B148)
_sym_db.RegisterMessage(LotsNestedMessage.B149)
_sym_db.RegisterMessage(LotsNestedMessage.B150)
_sym_db.RegisterMessage(LotsNestedMessage.B151)
_sym_db.RegisterMessage(LotsNestedMessage.B152)
_sym_db.RegisterMessage(LotsNestedMessage.B153)
_sym_db.RegisterMessage(LotsNestedMessage.B154)
_sym_db.RegisterMessage(LotsNestedMessage.B155)
_sym_db.RegisterMessage(LotsNestedMessage.B156)
_sym_db.RegisterMessage(LotsNestedMessage.B157)
_sym_db.RegisterMessage(LotsNestedMessage.B158)
_sym_db.RegisterMessage(LotsNestedMessage.B159)
_sym_db.RegisterMessage(LotsNestedMessage.B160)
_sym_db.RegisterMessage(LotsNestedMessage.B161)
_sym_db.RegisterMessage(LotsNestedMessage.B162)
_sym_db.RegisterMessage(LotsNestedMessage.B163)
_sym_db.RegisterMessage(LotsNestedMessage.B164)
_sym_db.RegisterMessage(LotsNestedMessage.B165)
_sym_db.RegisterMessage(LotsNestedMessage.B166)
_sym_db.RegisterMessage(LotsNestedMessage.B167)
_sym_db.RegisterMessage(LotsNestedMessage.B168)
_sym_db.RegisterMessage(LotsNestedMessage.B169)
_sym_db.RegisterMessage(LotsNestedMessage.B170)
_sym_db.RegisterMessage(LotsNestedMessage.B171)
_sym_db.RegisterMessage(LotsNestedMessage.B172)
_sym_db.RegisterMessage(LotsNestedMessage.B173)
_sym_db.RegisterMessage(LotsNestedMessage.B174)
_sym_db.RegisterMessage(LotsNestedMessage.B175)
_sym_db.RegisterMessage(LotsNestedMessage.B176)
_sym_db.RegisterMessage(LotsNestedMessage.B177)
_sym_db.RegisterMessage(LotsNestedMessage.B178)
_sym_db.RegisterMessage(LotsNestedMessage.B179)
_sym_db.RegisterMessage(LotsNestedMessage.B180)
_sym_db.RegisterMessage(LotsNestedMessage.B181)
_sym_db.RegisterMessage(LotsNestedMessage.B182)
_sym_db.RegisterMessage(LotsNestedMessage.B183)
_sym_db.RegisterMessage(LotsNestedMessage.B184)
_sym_db.RegisterMessage(LotsNestedMessage.B185)
_sym_db.RegisterMessage(LotsNestedMessage.B186)
_sym_db.RegisterMessage(LotsNestedMessage.B187)
_sym_db.RegisterMessage(LotsNestedMessage.B188)
_sym_db.RegisterMessage(LotsNestedMessage.B189)
_sym_db.RegisterMessage(LotsNestedMessage.B190)
_sym_db.RegisterMessage(LotsNestedMessage.B191)
_sym_db.RegisterMessage(LotsNestedMessage.B192)
_sym_db.RegisterMessage(LotsNestedMessage.B193)
_sym_db.RegisterMessage(LotsNestedMessage.B194)
_sym_db.RegisterMessage(LotsNestedMessage.B195)
_sym_db.RegisterMessage(LotsNestedMessage.B196)
_sym_db.RegisterMessage(LotsNestedMessage.B197)
_sym_db.RegisterMessage(LotsNestedMessage.B198)
_sym_db.RegisterMessage(LotsNestedMessage.B199)
_sym_db.RegisterMessage(LotsNestedMessage.B200)
_sym_db.RegisterMessage(LotsNestedMessage.B201)
_sym_db.RegisterMessage(LotsNestedMessage.B202)
_sym_db.RegisterMessage(LotsNestedMessage.B203)
_sym_db.RegisterMessage(LotsNestedMessage.B204)
_sym_db.RegisterMessage(LotsNestedMessage.B205)
_sym_db.RegisterMessage(LotsNestedMessage.B206)
_sym_db.RegisterMessage(LotsNestedMessage.B207)
_sym_db.RegisterMessage(LotsNestedMessage.B208)
_sym_db.RegisterMessage(LotsNestedMessage.B209)
_sym_db.RegisterMessage(LotsNestedMessage.B210)
_sym_db.RegisterMessage(LotsNestedMessage.B211)
_sym_db.RegisterMessage(LotsNestedMessage.B212)
_sym_db.RegisterMessage(LotsNestedMessage.B213)
_sym_db.RegisterMessage(LotsNestedMessage.B214)
_sym_db.RegisterMessage(LotsNestedMessage.B215)
_sym_db.RegisterMessage(LotsNestedMessage.B216)
_sym_db.RegisterMessage(LotsNestedMessage.B217)
_sym_db.RegisterMessage(LotsNestedMessage.B218)
_sym_db.RegisterMessage(LotsNestedMessage.B219)
_sym_db.RegisterMessage(LotsNestedMessage.B220)
_sym_db.RegisterMessage(LotsNestedMessage.B221)
_sym_db.RegisterMessage(LotsNestedMessage.B222)
_sym_db.RegisterMessage(LotsNestedMessage.B223)
_sym_db.RegisterMessage(LotsNestedMessage.B224)
_sym_db.RegisterMessage(LotsNestedMessage.B225)
_sym_db.RegisterMessage(LotsNestedMessage.B226)
_sym_db.RegisterMessage(LotsNestedMessage.B227)
_sym_db.RegisterMessage(LotsNestedMessage.B228)
_sym_db.RegisterMessage(LotsNestedMessage.B229)
_sym_db.RegisterMessage(LotsNestedMessage.B230)
_sym_db.RegisterMessage(LotsNestedMessage.B231)
_sym_db.RegisterMessage(LotsNestedMessage.B232)
_sym_db.RegisterMessage(LotsNestedMessage.B233)
_sym_db.RegisterMessage(LotsNestedMessage.B234)
_sym_db.RegisterMessage(LotsNestedMessage.B235)
_sym_db.RegisterMessage(LotsNestedMessage.B236)
_sym_db.RegisterMessage(LotsNestedMessage.B237)
_sym_db.RegisterMessage(LotsNestedMessage.B238)
_sym_db.RegisterMessage(LotsNestedMessage.B239)
_sym_db.RegisterMessage(LotsNestedMessage.B240)
_sym_db.RegisterMessage(LotsNestedMessage.B241)
_sym_db.RegisterMessage(LotsNestedMessage.B242)
_sym_db.RegisterMessage(LotsNestedMessage.B243)
_sym_db.RegisterMessage(LotsNestedMessage.B244)
_sym_db.RegisterMessage(LotsNestedMessage.B245)
_sym_db.RegisterMessage(LotsNestedMessage.B246)
_sym_db.RegisterMessage(LotsNestedMessage.B247)
_sym_db.RegisterMessage(LotsNestedMessage.B248)
_sym_db.RegisterMessage(LotsNestedMessage.B249)
_sym_db.RegisterMessage(LotsNestedMessage.B250)
_sym_db.RegisterMessage(LotsNestedMessage.B251)
_sym_db.RegisterMessage(LotsNestedMessage.B252)
_sym_db.RegisterMessage(LotsNestedMessage.B253)
_sym_db.RegisterMessage(LotsNestedMessage.B254)
_sym_db.RegisterMessage(LotsNestedMessage.B255)
OutOfOrderFields.RegisterExtension(optional_uint64)
OutOfOrderFields.RegisterExtension(optional_int64)
globals()['class'].RegisterExtension(globals()['continue'])
getattr(globals()['class'], 'try').RegisterExtension(globals()['with'])
globals()['class'].RegisterExtension(_EXTENDCLASS.extensions_by_name['return'])
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
slightstone/SickRage
|
lib/chardet/langthaimodel.py
|
2930
|
11275
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.