gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class PhoneNumberList(ListResource):
def __init__(self, version):
"""
Initialize the PhoneNumberList
:param Version version: Version that contains the resource
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberList
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberList
"""
super(PhoneNumberList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, phone_number):
"""
Constructs a PhoneNumberContext
:param phone_number: The phone number to fetch in E.164 format
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, phone_number=phone_number, )
def __call__(self, phone_number):
"""
Constructs a PhoneNumberContext
:param phone_number: The phone number to fetch in E.164 format
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
return PhoneNumberContext(self._version, phone_number=phone_number, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Lookups.V1.PhoneNumberList>'
class PhoneNumberPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the PhoneNumberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberPage
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberPage
"""
super(PhoneNumberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of PhoneNumberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
return PhoneNumberInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Lookups.V1.PhoneNumberPage>'
class PhoneNumberContext(InstanceContext):
def __init__(self, version, phone_number):
"""
Initialize the PhoneNumberContext
:param Version version: Version that contains the resource
:param phone_number: The phone number to fetch in E.164 format
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
super(PhoneNumberContext, self).__init__(version)
# Path Solution
self._solution = {'phone_number': phone_number, }
self._uri = '/PhoneNumbers/{phone_number}'.format(**self._solution)
def fetch(self, country_code=values.unset, type=values.unset,
add_ons=values.unset, add_ons_data=values.unset):
"""
Fetch the PhoneNumberInstance
:param unicode country_code: The ISO country code of the phone number
:param list[unicode] type: The type of information to return
:param list[unicode] add_ons: The unique_name of an Add-on you would like to invoke
:param dict add_ons_data: Data specific to the add-on you would like to invoke
:returns: The fetched PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
data = values.of({
'CountryCode': country_code,
'Type': serialize.map(type, lambda e: e),
'AddOns': serialize.map(add_ons, lambda e: e),
})
data.update(serialize.prefixed_collapsible_map(add_ons_data, 'AddOns'))
payload = self._version.fetch(method='GET', uri=self._uri, params=data, )
return PhoneNumberInstance(self._version, payload, phone_number=self._solution['phone_number'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Lookups.V1.PhoneNumberContext {}>'.format(context)
class PhoneNumberInstance(InstanceResource):
class Type(object):
LANDLINE = "landline"
MOBILE = "mobile"
VOIP = "voip"
def __init__(self, version, payload, phone_number=None):
"""
Initialize the PhoneNumberInstance
:returns: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
super(PhoneNumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'caller_name': payload.get('caller_name'),
'country_code': payload.get('country_code'),
'phone_number': payload.get('phone_number'),
'national_format': payload.get('national_format'),
'carrier': payload.get('carrier'),
'add_ons': payload.get('add_ons'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'phone_number': phone_number or self._properties['phone_number'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(self._version, phone_number=self._solution['phone_number'], )
return self._context
@property
def caller_name(self):
"""
:returns: The name of the phone number's owner
:rtype: dict
"""
return self._properties['caller_name']
@property
def country_code(self):
"""
:returns: The ISO country code for the phone number
:rtype: unicode
"""
return self._properties['country_code']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def national_format(self):
"""
:returns: The phone number, in national format
:rtype: unicode
"""
return self._properties['national_format']
@property
def carrier(self):
"""
:returns: The telecom company that provides the phone number
:rtype: dict
"""
return self._properties['carrier']
@property
def add_ons(self):
"""
:returns: A JSON string with the results of the Add-ons you specified
:rtype: dict
"""
return self._properties['add_ons']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self, country_code=values.unset, type=values.unset,
add_ons=values.unset, add_ons_data=values.unset):
"""
Fetch the PhoneNumberInstance
:param unicode country_code: The ISO country code of the phone number
:param list[unicode] type: The type of information to return
:param list[unicode] add_ons: The unique_name of an Add-on you would like to invoke
:param dict add_ons_data: Data specific to the add-on you would like to invoke
:returns: The fetched PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
return self._proxy.fetch(
country_code=country_code,
type=type,
add_ons=add_ons,
add_ons_data=add_ons_data,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Lookups.V1.PhoneNumberInstance {}>'.format(context)
|
|
from django.db.models.aggregates import Count
from corehq.apps.accounting.filters import DateCreatedFilter
from corehq.apps.reports.datatables import (
DataTablesColumn,
DataTablesHeader,
)
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.sms.models import (
INCOMING,
OUTGOING,
)
from corehq.apps.smsbillables.dispatcher import SMSAdminInterfaceDispatcher
from corehq.apps.smsbillables.filters import (
CountryCodeFilter,
DateSentFilter,
DirectionFilter,
DomainFilter,
HasGatewayFeeFilter,
GatewayTypeFilter,
ShowBillablesFilter,
SpecificGateway,
)
from corehq.apps.smsbillables.models import (
SmsBillable,
SmsGatewayFee,
SmsGatewayFeeCriteria,
)
from couchexport.models import Format
class SMSBillablesInterface(GenericTabularReport):
base_template = "accounting/report_filter_actions.html"
section_name = "Accounting"
dispatcher = SMSAdminInterfaceDispatcher
name = "SMS Billables"
description = "List of all SMS Billables"
slug = "sms_billables"
ajax_pagination = True
exportable = True
exportable_all = True
export_format_override = Format.UNZIPPED_CSV
fields = [
'corehq.apps.smsbillables.interface.DateSentFilter',
'corehq.apps.accounting.interface.DateCreatedFilter',
'corehq.apps.smsbillables.interface.ShowBillablesFilter',
'corehq.apps.smsbillables.interface.DomainFilter',
'corehq.apps.smsbillables.interface.HasGatewayFeeFilter',
'corehq.apps.smsbillables.interface.GatewayTypeFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Date of Message"),
DataTablesColumn("Project Space"),
DataTablesColumn("Direction"),
DataTablesColumn("SMS parts"),
DataTablesColumn("Gateway", sortable=False),
DataTablesColumn("Gateway Charge", sortable=False),
DataTablesColumn("Usage Charge", sortable=False),
DataTablesColumn("Total Charge", sortable=False),
DataTablesColumn("Message Log ID", sortable=False),
DataTablesColumn("Is Valid?", sortable=False),
DataTablesColumn("Date Created"),
)
@property
def sort_field(self):
sort_fields = [
'date_sent',
'domain',
'direction',
'multipart_count',
None,
None,
None,
None,
None,
'date_created',
]
sort_index = int(self.request.GET.get('iSortCol_0', 1))
field = sort_fields[sort_index]
sort_descending = self.request.GET.get('sSortDir_0', 'asc') == 'desc'
return field if not sort_descending else '-{0}'.format(field)
@property
def shared_pagination_GET_params(self):
return DateSentFilter.shared_pagination_GET_params(self.request) + \
DateCreatedFilter.shared_pagination_GET_params(self.request) + [
{
'name': DateCreatedFilter.optional_filter_slug(),
'value': DateCreatedFilter.optional_filter_string_value(self.request)
},
{
'name': ShowBillablesFilter.slug,
'value': ShowBillablesFilter.get_value(self.request, self.domain)
},
{
'name': DomainFilter.slug,
'value': DomainFilter.get_value(self.request, self.domain)
},
{
'name': HasGatewayFeeFilter.slug,
'value': HasGatewayFeeFilter.get_value(self.request, self.domain)
},
{
'name': GatewayTypeFilter.slug,
'value': GatewayTypeFilter.get_value(self.request, self.domain)
},
]
@property
def get_all_rows(self):
query = self.sms_billables
query = query.order_by(self.sort_field)
return self._format_billables(query)
@property
def total_records(self):
query = self.sms_billables
return query.aggregate(Count('id'))['id__count']
@property
def rows(self):
query = self.sms_billables
query = query.order_by(self.sort_field)
sms_billables = query[self.pagination.start:(self.pagination.start + self.pagination.count)]
return self._format_billables(sms_billables)
def _format_billables(self, sms_billables):
return [
[
sms_billable.date_sent,
sms_billable.domain,
{
INCOMING: "Incoming",
OUTGOING: "Outgoing",
}.get(sms_billable.direction, ""),
sms_billable.multipart_count,
sms_billable.gateway_fee.criteria.backend_api_id if sms_billable.gateway_fee else "",
sms_billable.gateway_charge,
sms_billable.usage_charge,
sms_billable.gateway_charge + sms_billable.usage_charge,
sms_billable.log_id,
sms_billable.is_valid,
sms_billable.date_created,
]
for sms_billable in sms_billables
]
@property
def sms_billables(self):
selected_billables = SmsBillable.objects.filter(
date_sent__gte=DateSentFilter.get_start_date(self.request),
date_sent__lte=DateSentFilter.get_end_date(self.request),
)
if DateCreatedFilter.use_filter(self.request):
selected_billables = selected_billables.filter(
date_created__gte=DateCreatedFilter.get_start_date(
self.request),
date_created__lte=DateCreatedFilter.get_end_date(self.request),
)
show_billables = ShowBillablesFilter.get_value(
self.request, self.domain)
if show_billables:
selected_billables = selected_billables.filter(
is_valid=(show_billables == ShowBillablesFilter.VALID),
)
domain = DomainFilter.get_value(self.request, self.domain)
if domain:
selected_billables = selected_billables.filter(
domain=domain,
)
has_gateway_fee = HasGatewayFeeFilter.get_value(
self.request, self.domain
)
if has_gateway_fee:
if has_gateway_fee == HasGatewayFeeFilter.YES:
selected_billables = selected_billables.exclude(
gateway_fee=None
)
else:
selected_billables = selected_billables.filter(
gateway_fee=None
)
gateway_type = GatewayTypeFilter.get_value(self.request, self.domain)
if gateway_type:
selected_billables = selected_billables.filter(
gateway_fee__criteria__backend_api_id=gateway_type,
)
return selected_billables
class SMSGatewayFeeCriteriaInterface(GenericTabularReport):
base_template = "accounting/report_filter_actions.html"
section_name = "Accounting"
dispatcher = SMSAdminInterfaceDispatcher
name = "SMS Gateway Fee Criteria"
description = "List of all SMS Gateway Fee Criteria"
slug = "sms_gateway_fee_criteria"
exportable = True
exportable_all = True
fields = [
'corehq.apps.smsbillables.interface.GatewayTypeFilter',
'corehq.apps.smsbillables.interface.SpecificGateway',
'corehq.apps.smsbillables.interface.DirectionFilter',
'corehq.apps.smsbillables.interface.CountryCodeFilter',
]
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn("Gateway Type"),
DataTablesColumn("Specific Gateway"),
DataTablesColumn("Direction"),
DataTablesColumn("Country Code"),
DataTablesColumn("Prefix"),
DataTablesColumn("Fee (Amount, Currency)")
)
@property
def get_all_rows(self):
return self.rows
@property
def rows(self):
rows = []
for criteria in self.sms_gateway_fee_criteria:
gateway_fee = SmsGatewayFee.get_by_criteria_obj(criteria)
rows.append([
criteria.backend_api_id,
(criteria.backend_instance
if criteria.backend_instance is not None else "Any"),
criteria.direction,
(criteria.country_code
if criteria.country_code is not None else "Any"),
criteria.prefix or "Any",
"%(amount)s %(currency)s" % {
'amount': str(gateway_fee.amount),
'currency': gateway_fee.currency.code,
},
])
return rows
@property
def sms_gateway_fee_criteria(self):
selected_criteria = SmsGatewayFeeCriteria.objects.filter()
gateway_type = GatewayTypeFilter.get_value(self.request, self.domain)
if gateway_type:
selected_criteria = selected_criteria.filter(
backend_api_id=gateway_type,
)
specific_gateway = SpecificGateway.get_value(self.request, self.domain)
if specific_gateway:
selected_criteria = selected_criteria.filter(
backend_instance=specific_gateway,
)
direction = DirectionFilter.get_value(self.request, self.domain)
if direction:
selected_criteria = selected_criteria.filter(
direction=direction,
)
country_code = CountryCodeFilter.get_value(self.request, self.domain)
if country_code:
selected_criteria = selected_criteria.filter(
country_code=int(country_code),
)
return selected_criteria
|
|
#############################################################################
# Code for managing and training a variational Iterative Refinement Model. #
#############################################################################
# basic python
import numpy as np
import numpy.random as npr
from collections import OrderedDict
# theano business
import theano
import theano.tensor as T
#from theano.tensor.shared_randomstreams import RandomStreams as RandStream
from theano.sandbox.cuda.rng_curand import CURAND_RandomStreams as RandStream
# phil's sweetness
from NetLayers import HiddenLayer, DiscLayer, relu_actfun, softplus_actfun, \
apply_mask
from InfNet import InfNet
from DKCode import get_adam_updates, get_adadelta_updates
from LogPDFs import log_prob_bernoulli, log_prob_gaussian2, gaussian_kld
from HelperFuncs import to_fX
#
# Important symbolic variables:
# Xd: Xd represents input at the "data variables" of the inferencer
#
class MultiStageModel(object):
"""
Controller for training a multi-step iterative refinement model.
Parameters:
rng: numpy.random.RandomState (for reproducibility)
x_in: symbolic "data" input to this MultiStageModel
x_out: symbolic "target" output for this MultiStageModel
x_mask: symbolic binary "mask" describing known/missing target values
p_s0_obs_given_z_obs: InfNet for s0 given z_obs
p_hi_given_si: InfNet for hi given si
p_sip1_given_si_hi: InfNet for sip1 given si and hi
p_x_given_si_hi: InfNet for x given si and hi
q_z_given_x: InfNet for z given x
q_hi_given_x_si: InfNet for hi given x and si
model_init_obs: whether to use a model-based initial obs state
obs_dim: dimension of the observations to generate
z_dim: dimension of the "initial" latent space
h_dim: dimension of the "primary" latent space
ir_steps: number of "iterative refinement" steps to perform
params: REQUIRED PARAMS SHOWN BELOW
x_type: can be "bernoulli" or "gaussian"
obs_transform: can be 'none' or 'sigmoid'
"""
def __init__(self, rng=None, x_in=None, \
p_s0_obs_given_z_obs=None, p_hi_given_si=None, p_sip1_given_si_hi=None, \
p_x_given_si_hi=None, q_z_given_x=None, q_hi_given_x_si=None, \
obs_dim=None, z_dim=None, h_dim=None, \
model_init_obs=True, ir_steps=2, \
params=None):
# setup a rng for this GIPair
self.rng = RandStream(rng.randint(100000))
# TODO: implement functionality for working with "latent" si
assert(p_x_given_si_hi is None)
# decide whether to initialize from a model or from a "constant"
self.model_init_obs = model_init_obs
# grab the user-provided parameters
self.params = params
self.x_type = self.params['x_type']
assert((self.x_type == 'bernoulli') or (self.x_type == 'gaussian'))
if 'obs_transform' in self.params:
assert((self.params['obs_transform'] == 'sigmoid') or \
(self.params['obs_transform'] == 'none'))
if self.params['obs_transform'] == 'sigmoid':
self.obs_transform = lambda x: T.nnet.sigmoid(x)
else:
self.obs_transform = lambda x: x
else:
self.obs_transform = lambda x: T.nnet.sigmoid(x)
if self.x_type == 'bernoulli':
self.obs_transform = lambda x: T.nnet.sigmoid(x)
# record the dimensions of various spaces relevant to this model
self.obs_dim = obs_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.ir_steps = ir_steps
# record the symbolic variables that will provide inputs to the
# computation graph created to describe this MultiStageModel
self.x = x_in
self.batch_reps = T.lscalar()
# setup switching variable for changing between sampling/training
zero_ary = np.zeros((1,)).astype(theano.config.floatX)
self.train_switch = theano.shared(value=zero_ary, name='msm_train_switch')
self.set_train_switch(1.0)
# setup a weight for pulling priors over hi given si towards a
# shared global prior -- e.g. zero mean and unit variance.
self.kzg_weight = theano.shared(value=zero_ary, name='msm_kzg_weight')
self.set_kzg_weight(0.1)
# this weight balances l1 vs. l2 penalty on posterior KLds
self.l1l2_weight = theano.shared(value=zero_ary, name='msm_l1l2_weight')
self.set_l1l2_weight(1.0)
# this parameter controls dropout rate in the generator read function
self.drop_rate = theano.shared(value=zero_ary, name='msm_drop_rate')
self.set_drop_rate(0.0)
#############################
# Setup self.z and self.s0. #
#############################
print("Building MSM step 0...")
obs_scale = 0.0
if self.model_init_obs: # initialize obs state from generative model
obs_scale = 1.0
self.q_z_given_x = q_z_given_x.shared_param_clone(rng=rng, Xd=self.x)
self.z = self.q_z_given_x.output
self.p_s0_obs_given_z_obs = p_s0_obs_given_z_obs.shared_param_clone( \
rng=rng, Xd=self.z)
_s0_obs_model = self.p_s0_obs_given_z_obs.output_mean
_s0_obs_const = self.p_s0_obs_given_z_obs.mu_layers[-1].b
self.s0_obs = (obs_scale * _s0_obs_model) + \
((1.0 - obs_scale) * _s0_obs_const)
self.output_logvar = self.p_s0_obs_given_z_obs.sigma_layers[-1].b
self.bounded_logvar = 8.0 * T.tanh((1.0/8.0) * self.output_logvar)
###############################################################
# Setup the iterative refinement loop, starting from self.s0. #
###############################################################
self.p_hi_given_si = [] # holds p_hi_given_si for each i
self.p_sip1_given_si_hi = [] # holds p_sip1_given_si_hi for each i
self.q_hi_given_x_si = [] # holds q_hi_given_x_si for each i
self.si = [self.s0_obs] # holds si for each i
self.hi = [] # holds hi for each i
for i in range(self.ir_steps):
print("Building MSM step {0:d}...".format(i+1))
si_obs = self.si[i]
# get samples of next hi, conditioned on current si
self.p_hi_given_si.append( \
p_hi_given_si.shared_param_clone(rng=rng, \
Xd=self.obs_transform(si_obs)))
hi_p = self.p_hi_given_si[i].output
# now we build the model for variational hi given si
grad_ll = self.x - self.obs_transform(si_obs)
self.q_hi_given_x_si.append(\
q_hi_given_x_si.shared_param_clone(rng=rng, \
Xd=T.horizontal_stack( \
grad_ll, self.obs_transform(si_obs))))
hi_q = self.q_hi_given_x_si[i].output
# make hi samples that can be switched between hi_p and hi_q
self.hi.append( ((self.train_switch[0] * hi_q) + \
((1.0 - self.train_switch[0]) * hi_p)) )
# p_sip1_given_si_hi is conditioned on hi.
self.p_sip1_given_si_hi.append( \
p_sip1_given_si_hi.shared_param_clone(rng=rng, \
Xd=self.hi[i]))
# construct the update from si_obs to sip1_obs
sip1_obs = si_obs + self.p_sip1_given_si_hi[i].output_mean
# record the updated state of the generative process
self.si.append(sip1_obs)
######################################################################
# ALL SYMBOLIC VARS NEEDED FOR THE OBJECTIVE SHOULD NOW BE AVAILABLE #
######################################################################
# shared var learning rate for generator and inferencer
zero_ary = np.zeros((1,)).astype(theano.config.floatX)
self.lr_1 = theano.shared(value=zero_ary, name='msm_lr_1')
self.lr_2 = theano.shared(value=zero_ary, name='msm_lr_2')
# shared var momentum parameters for generator and inferencer
self.mom_1 = theano.shared(value=zero_ary, name='msm_mom_1')
self.mom_2 = theano.shared(value=zero_ary, name='msm_mom_2')
# init parameters for controlling learning dynamics
self.set_sgd_params()
# init shared var for weighting nll of data given posterior sample
self.lam_nll = theano.shared(value=zero_ary, name='msm_lam_nll')
self.set_lam_nll(lam_nll=1.0)
# init shared var for weighting prior kld against reconstruction
self.lam_kld_1 = theano.shared(value=zero_ary, name='msm_lam_kld_1')
self.lam_kld_2 = theano.shared(value=zero_ary, name='msm_lam_kld_2')
self.set_lam_kld(lam_kld_1=1.0, lam_kld_2=1.0)
# init shared var for controlling l2 regularization on params
self.lam_l2w = theano.shared(value=zero_ary, name='msm_lam_l2w')
self.set_lam_l2w(1e-5)
# Grab all of the "optimizable" parameters in "group 1"
self.group_1_params = []
self.group_1_params.extend(self.q_z_given_x.mlp_params)
self.group_1_params.extend(self.p_s0_obs_given_z_obs.mlp_params)
# Grab all of the "optimizable" parameters in "group 2"
self.group_2_params = []
for i in range(self.ir_steps):
self.group_2_params.extend(self.q_hi_given_x_si[i].mlp_params)
self.group_2_params.extend(self.p_hi_given_si[i].mlp_params)
self.group_2_params.extend(self.p_sip1_given_si_hi[i].mlp_params)
# Make a joint list of parameters group 1/2
self.joint_params = self.group_1_params + self.group_2_params
#################################
# CONSTRUCT THE KLD-BASED COSTS #
#################################
self.kld_z, self.kld_hi_cond, self.kld_hi_glob = \
self._construct_kld_costs()
self.kld_cost = (self.lam_kld_1[0] * T.mean(self.kld_z)) + \
(self.lam_kld_2[0] * (T.mean(self.kld_hi_cond) + \
(self.kzg_weight[0] * T.mean(self.kld_hi_glob))))
#################################
# CONSTRUCT THE NLL-BASED COSTS #
#################################
self.nll_costs = self._construct_nll_costs()
self.nll_cost = self.lam_nll[0] * T.mean(self.nll_costs)
########################################
# CONSTRUCT THE REST OF THE JOINT COST #
########################################
param_reg_cost = self._construct_reg_costs()
self.reg_cost = self.lam_l2w[0] * param_reg_cost
self.joint_cost = self.nll_cost + self.kld_cost + self.reg_cost
# Get the gradient of the joint cost for all optimizable parameters
print("Computing gradients of self.joint_cost...")
self.joint_grads = OrderedDict()
grad_list = T.grad(self.joint_cost, self.joint_params)
for i, p in enumerate(self.joint_params):
self.joint_grads[p] = grad_list[i]
# Construct the updates for the generator and inferencer networks
self.group_1_updates = get_adam_updates(params=self.group_1_params, \
grads=self.joint_grads, alpha=self.lr_1, \
beta1=self.mom_1, beta2=self.mom_2, \
mom2_init=1e-3, smoothing=1e-5, max_grad_norm=10.0)
self.group_2_updates = get_adam_updates(params=self.group_2_params, \
grads=self.joint_grads, alpha=self.lr_2, \
beta1=self.mom_1, beta2=self.mom_2, \
mom2_init=1e-3, smoothing=1e-5, max_grad_norm=10.0)
self.joint_updates = OrderedDict()
for k in self.group_1_updates:
self.joint_updates[k] = self.group_1_updates[k]
for k in self.group_2_updates:
self.joint_updates[k] = self.group_2_updates[k]
# Construct a function for jointly training the generator/inferencer
print("Compiling training function...")
self.train_joint = self._construct_train_joint()
self.compute_post_klds = self._construct_compute_post_klds()
self.compute_fe_terms = self._construct_compute_fe_terms()
self.sample_from_prior = self._construct_sample_from_prior()
# make easy access points for some interesting parameters
self.inf_1_weights = self.q_z_given_x.shared_layers[0].W
self.gen_1_weights = self.p_s0_obs_given_z_obs.mu_layers[-1].W
self.inf_2_weights = self.q_hi_given_x_si[0].shared_layers[0].W
self.gen_2_weights = self.p_sip1_given_si_hi[0].mu_layers[-1].W
self.gen_inf_weights = self.p_hi_given_si[0].shared_layers[0].W
return
def set_sgd_params(self, lr_1=0.01, lr_2=0.01, \
mom_1=0.9, mom_2=0.999):
"""
Set learning rate and momentum parameter for all updates.
"""
zero_ary = np.zeros((1,))
# set learning rates
new_lr_1 = zero_ary + lr_1
self.lr_1.set_value(new_lr_1.astype(theano.config.floatX))
new_lr_2 = zero_ary + lr_2
self.lr_2.set_value(new_lr_2.astype(theano.config.floatX))
# set momentums
new_mom_1 = zero_ary + mom_1
self.mom_1.set_value(new_mom_1.astype(theano.config.floatX))
new_mom_2 = zero_ary + mom_2
self.mom_2.set_value(new_mom_2.astype(theano.config.floatX))
return
def set_lam_nll(self, lam_nll=1.0):
"""
Set weight for controlling the influence of the data likelihood.
"""
zero_ary = np.zeros((1,))
new_lam = zero_ary + lam_nll
self.lam_nll.set_value(new_lam.astype(theano.config.floatX))
return
def set_lam_kld(self, lam_kld_1=1.0, lam_kld_2=1.0):
"""
Set the relative weight of prior KL-divergence vs. data likelihood.
"""
zero_ary = np.zeros((1,))
new_lam = zero_ary + lam_kld_1
self.lam_kld_1.set_value(new_lam.astype(theano.config.floatX))
new_lam = zero_ary + lam_kld_2
self.lam_kld_2.set_value(new_lam.astype(theano.config.floatX))
return
def set_lam_l2w(self, lam_l2w=1e-3):
"""
Set the relative strength of l2 regularization on network params.
"""
zero_ary = np.zeros((1,))
new_lam = zero_ary + lam_l2w
self.lam_l2w.set_value(new_lam.astype(theano.config.floatX))
return
def set_train_switch(self, switch_val=0.0):
"""
Set the switch for changing between training and sampling behavior.
"""
if (switch_val < 0.5):
switch_val = 0.0
else:
switch_val = 1.0
zero_ary = np.zeros((1,))
new_val = zero_ary + switch_val
new_val = new_val.astype(theano.config.floatX)
self.train_switch.set_value(new_val)
return
def set_kzg_weight(self, kzg_weight=0.2):
"""
Set the weight for shaping penalty on conditional priors over zt.
"""
assert(kzg_weight >= 0.0)
zero_ary = np.zeros((1,))
new_val = zero_ary + kzg_weight
new_val = new_val.astype(theano.config.floatX)
self.kzg_weight.set_value(new_val)
return
def set_l1l2_weight(self, l1l2_weight=1.0):
"""
Set the weight for shaping penalty on posterior KLds.
"""
assert((l1l2_weight >= 0.0) and (l1l2_weight <= 1.0))
zero_ary = np.zeros((1,))
new_val = zero_ary + l1l2_weight
new_val = new_val.astype(theano.config.floatX)
self.l1l2_weight.set_value(new_val)
return
def set_drop_rate(self, drop_rate=0.0):
"""
Set the dropout rate for generator read function.
"""
assert((drop_rate >= 0.0) and (drop_rate <= 1.0))
zero_ary = np.zeros((1,))
new_val = zero_ary + drop_rate
new_val = new_val.astype(theano.config.floatX)
self.drop_rate.set_value(new_val)
return
def set_input_bias(self, new_bias=None):
"""
Set the output layer bias.
"""
new_bias = new_bias.astype(theano.config.floatX)
self.q_z_given_x.shared_layers[0].b_in.set_value(new_bias)
return
def set_obs_bias(self, new_obs_bias=None):
"""
Set initial bias on the obs part of state.
"""
assert(new_obs_bias.shape[0] == self.obs_dim)
new_bias = np.zeros((self.obs_dim,)) + new_obs_bias
new_bias = new_bias.astype(theano.config.floatX)
self.p_s0_obs_given_z_obs.mu_layers[-1].b.set_value(new_bias)
return
def _construct_nll_costs(self):
"""
Construct the negative log-likelihood part of free energy.
"""
# average log-likelihood over the refinement sequence
xh = self.obs_transform(self.si[-1])
if self.x_type == 'bernoulli':
ll_costs = log_prob_bernoulli(self.x, xh)
else:
ll_costs = log_prob_gaussian2(self.x, xh, \
log_vars=self.bounded_logvar)
nll_costs = -ll_costs
return nll_costs
def _construct_kld_costs(self):
"""
Construct the posterior KL-divergence part of cost to minimize.
"""
# construct KLd cost for the distributions over hi. the prior over
# hi is given by a distribution conditioned on si, which we estimate
# using self.p_hi_given_si[i]. the conditionals produced by each
# self.p_hi_given_si[i] will also be regularized towards a shared
# prior, e.g. a Gaussian with zero mean and unit variance.
kld_hi_conds = []
kld_hi_globs = []
for i in range(self.ir_steps):
kld_hi_cond = gaussian_kld( \
self.q_hi_given_x_si[i].output_mean, \
self.q_hi_given_x_si[i].output_logvar, \
self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar)
kld_hi_glob = gaussian_kld( \
self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar, \
0.0, 0.0)
kld_hi_cond_l1l2 = (self.l1l2_weight[0] * kld_hi_cond) + \
((1.0 - self.l1l2_weight[0]) * kld_hi_cond**2.0)
kld_hi_conds.append(T.sum(kld_hi_cond_l1l2, \
axis=1, keepdims=True))
kld_hi_globs.append(T.sum(kld_hi_glob**2.0, \
axis=1, keepdims=True))
# compute the batch-wise costs
kld_hi_cond = sum(kld_hi_conds)
kld_hi_glob = sum(kld_hi_globs)
# construct KLd cost for the distributions over z
kld_z_all = gaussian_kld(self.q_z_given_x.output_mean, \
self.q_z_given_x.output_logvar, \
0.0, 0.0)
kld_z_l1l2 = (self.l1l2_weight[0] * kld_z_all) + \
((1.0 - self.l1l2_weight[0]) * kld_z_all**2.0)
kld_z = T.sum(kld_z_l1l2, \
axis=1, keepdims=True)
return [kld_z, kld_hi_cond, kld_hi_glob]
def _construct_reg_costs(self):
"""
Construct the cost for low-level basic regularization. E.g. for
applying l2 regularization to the network activations and parameters.
"""
param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])
return param_reg_cost
def _construct_train_joint(self):
"""
Construct theano function to train all networks jointly.
"""
# setup some symbolic variables for theano to deal with
x = T.matrix()
# collect the outputs to return from this function
outputs = [self.joint_cost, self.nll_cost, self.kld_cost, \
self.reg_cost]
# compile the theano function
func = theano.function(inputs=[ x, self.batch_reps ], \
outputs=outputs, \
givens={ self.x: x.repeat(self.batch_reps, axis=0) }, \
updates=self.joint_updates)
return func
def _construct_compute_fe_terms(self):
"""
Construct a function for computing terms in variational free energy.
"""
# setup some symbolic variables for theano to deal with
x_in = T.matrix()
# construct values to output
nll = self._construct_nll_costs()
kld = self.kld_z + self.kld_hi_cond
# compile theano function for a one-sample free-energy estimate
fe_term_sample = theano.function(inputs=[x_in], \
outputs=[nll, kld], givens={self.x: x_in})
# construct a wrapper function for multi-sample free-energy estimate
def fe_term_estimator(X, sample_count):
nll_sum = np.zeros((X.shape[0],))
kld_sum = np.zeros((X.shape[0],))
for i in range(sample_count):
result = fe_term_sample(X)
nll_sum += result[0].ravel()
kld_sum += result[1].ravel()
mean_nll = nll_sum / float(sample_count)
mean_kld = kld_sum / float(sample_count)
return [mean_nll, mean_kld]
return fe_term_estimator
def _construct_compute_post_klds(self):
"""
Construct theano function to compute the info about the variational
approximate posteriors for some inputs.
"""
# setup some symbolic variables for theano to deal with
x = T.matrix()
# construct symbolic expressions for the desired KLds
cond_klds = []
glob_klds = []
for i in range(self.ir_steps):
kld_hi_cond = gaussian_kld(self.q_hi_given_x_si[i].output_mean, \
self.q_hi_given_x_si[i].output_logvar, \
self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar)
kld_hi_glob = gaussian_kld(self.p_hi_given_si[i].output_mean, \
self.p_hi_given_si[i].output_logvar, 0.0, 0.0)
cond_klds.append(kld_hi_cond)
glob_klds.append(kld_hi_glob)
# gather conditional and global klds for all IR steps
all_klds = cond_klds + glob_klds
# gather kld for the initialization step
kld_z_all = gaussian_kld(self.q_z_given_x.output_mean, \
self.q_z_given_x.output_logvar, \
0.0, 0.0)
all_klds.append(kld_z_all)
# compile theano function for a one-sample free-energy estimate
kld_func = theano.function(inputs=[x], outputs=all_klds, \
givens={ self.x: x })
def post_kld_computer(X):
f_all_klds = kld_func(X)
f_kld_z = f_all_klds[-1]
f_kld_hi_cond = np.zeros(f_all_klds[0].shape)
f_kld_hi_glob = np.zeros(f_all_klds[0].shape)
for j in range(self.ir_steps):
f_kld_hi_cond += f_all_klds[j]
f_kld_hi_glob += f_all_klds[j + self.ir_steps]
return [f_kld_z, f_kld_hi_cond, f_kld_hi_glob]
return post_kld_computer
def _construct_sample_from_prior(self):
"""
Construct a function for drawing independent samples from the
distribution generated by this MultiStageModel. This function returns
the full sequence of "partially completed" examples.
"""
z_sym = T.matrix()
x_sym = T.matrix()
oputs = [self.obs_transform(s) for s in self.si]
sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \
givens={ self.z: z_sym, \
self.x: T.zeros_like(x_sym) })
def prior_sampler(samp_count):
x_samps = np.zeros((samp_count, self.obs_dim))
x_samps = x_samps.astype(theano.config.floatX)
old_switch = self.train_switch.get_value(borrow=False)
# set model to generation mode
self.set_train_switch(switch_val=0.0)
z_samps = npr.randn(samp_count, self.z_dim)
z_samps = z_samps.astype(theano.config.floatX)
model_samps = sample_func(z_samps, x_samps)
# set model back to either training or generation mode
self.set_train_switch(switch_val=old_switch)
return model_samps
return prior_sampler
if __name__=="__main__":
print("Hello world!")
##############
# EYE BUFFER #
##############
|
|
"""
HTML Widget classes
"""
from __future__ import absolute_import, unicode_literals
import copy
from itertools import chain
import warnings
from django.conf import settings
from django.forms.util import flatatt, to_current_timezone
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.html import conditional_escape, format_html
from django.utils.translation import ugettext_lazy
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import datetime_safe, formats, six
from django.utils.six.moves.urllib.parse import urljoin
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput',
'EmailInput', 'URLInput', 'NumberInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput',
'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget',
'SplitDateTimeWidget',
)
MEDIA_TYPES = ('css','js')
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
# Any leftover attributes must be invalid.
# if media_attrs != {}:
# raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys()))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [format_html('<script type="text/javascript" src="{0}"></script>', self.absolute_path(path)) for path in self._js]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[
[format_html('<link href="{0}" type="text/css" media="{1}" rel="stylesheet" />', self.absolute_path(path), medium)
for path in self._css[medium]]
for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend == True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases,
attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{0} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value: value=None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
is_hidden = True
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id', None)
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{0} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name, None)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = '%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s'
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
url_markup_template = '<a href="{0}">{1}</a>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = format_html(self.url_markup_template,
value.url,
force_text(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{0}>\r\n{1}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateInput(TextInput):
def __init__(self, attrs=None, format=None):
super(DateInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATE_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_date(value)
return value.strftime(self.format)
return value
class DateTimeInput(TextInput):
def __init__(self, attrs=None, format=None):
super(DateTimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_datetime(value)
return value.strftime(self.format)
return value
class TimeInput(TextInput):
def __init__(self, attrs=None, format=None):
super(TimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('TIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
return value.strftime(self.format)
return value
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{0} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{0}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{0}"{1}>{2}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{0}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value, None)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{0}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
if 'id' in self.attrs:
label_for = format_html(' for="{0}_{1}"', self.attrs['id'], self.index)
else:
label_for = ''
return format_html('<label{0}>{1} {2}</label>', label_for, self.tag(), self.choice_label)
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{0} />', flatatt(final_attrs))
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class RadioInput(RadioChoiceInput):
def __init__(self, *args, **kwargs):
msg = "RadioInput has been deprecated. Use RadioChoiceInput instead."
warnings.warn(msg, PendingDeprecationWarning, stacklevel=2)
super(RadioInput, self).__init__(*args, **kwargs)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
yield self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id', None)
start_tag = format_html('<ul id="{0}">', id_) if id_ else '<ul>'
output = [start_tag]
for widget in self:
output.append(format_html('<li>{0}</li>', force_text(widget)))
output.append('</ul>')
return mark_safe('\n'.join(output))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
is_hidden = True
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
widget.is_hidden = True
|
|
import numpy as np
import scipy.optimize as opt
import scipy.sparse as sps
import numpy.linalg as nla
import scipy.linalg as sla
import time
def nnlsm_blockpivot(A, B, is_input_prod=False, init=None):
""" Nonnegativity-constrained least squares with block principal pivoting method and column grouping
Solves min ||AX-B||_2^2 s.t. X >= 0 element-wise.
J. Kim and H. Park, Fast nonnegative matrix factorization: An active-set-like method and comparisons,
SIAM Journal on Scientific Computing,
vol. 33, no. 6, pp. 3261-3281, 2011.
Parameters
----------
A : numpy.array, shape (m,n)
B : numpy.array or scipy.sparse matrix, shape (m,k)
Optional Parameters
-------------------
is_input_prod : True/False. - If True, the A and B arguments are interpreted as
AtA and AtB, respectively. Default is False.
init: numpy.array, shape (n,k). - If provided, init is used as an initial value for the algorithm.
Default is None.
Returns
-------
X, (success, Y, num_cholesky, num_eq, num_backup)
X : numpy.array, shape (n,k) - solution
success : True/False - True if the solution is found. False if the algorithm did not terminate
due to numerical errors.
Y : numpy.array, shape (n,k) - Y = A.T * A * X - A.T * B
num_cholesky : int - the number of Cholesky factorizations needed
num_eq : int - the number of linear systems of equations needed to be solved
num_backup: int - the number of appearances of the back-up rule. See SISC paper for details.
"""
if is_input_prod:
AtA = A
AtB = B
else:
AtA = A.T.dot(A)
if sps.issparse(B):
AtB = B.T.dot(A)
AtB = AtB.T
else:
AtB = A.T.dot(B)
(n, k) = AtB.shape
MAX_ITER = n * 5
if init != None:
PassSet = init > 0
X, num_cholesky, num_eq = normal_eq_comb(AtA, AtB, PassSet)
Y = AtA.dot(X) - AtB
else:
X = np.zeros([n, k])
Y = -AtB
PassSet = np.zeros([n, k], dtype=bool)
num_cholesky = 0
num_eq = 0
p_bar = 3
p_vec = np.zeros([k])
p_vec[:] = p_bar
ninf_vec = np.zeros([k])
ninf_vec[:] = n + 1
not_opt_set = np.logical_and(Y < 0, -PassSet)
infea_set = np.logical_and(X < 0, PassSet)
not_good = np.sum(not_opt_set, axis=0) + np.sum(infea_set, axis=0)
not_opt_colset = not_good > 0
not_opt_cols = not_opt_colset.nonzero()[0]
big_iter = 0
num_backup = 0
success = True
while not_opt_cols.size > 0:
big_iter += 1
if MAX_ITER > 0 and big_iter > MAX_ITER:
success = False
break
cols_set1 = np.logical_and(not_opt_colset, not_good < ninf_vec)
temp1 = np.logical_and(not_opt_colset, not_good >= ninf_vec)
temp2 = p_vec >= 1
cols_set2 = np.logical_and(temp1, temp2)
cols_set3 = np.logical_and(temp1, -temp2)
cols1 = cols_set1.nonzero()[0]
cols2 = cols_set2.nonzero()[0]
cols3 = cols_set3.nonzero()[0]
if cols1.size > 0:
p_vec[cols1] = p_bar
ninf_vec[cols1] = not_good[cols1]
true_set = np.logical_and(not_opt_set, np.tile(cols_set1, (n, 1)))
false_set = np.logical_and(infea_set, np.tile(cols_set1, (n, 1)))
PassSet[true_set] = True
PassSet[false_set] = False
if cols2.size > 0:
p_vec[cols2] = p_vec[cols2] - 1
temp_tile = np.tile(cols_set2, (n, 1))
true_set = np.logical_and(not_opt_set, temp_tile)
false_set = np.logical_and(infea_set, temp_tile)
PassSet[true_set] = True
PassSet[false_set] = False
if cols3.size > 0:
for col in cols3:
candi_set = np.logical_or(
not_opt_set[:, col], infea_set[:, col])
to_change = np.max(candi_set.nonzero()[0])
PassSet[to_change, col] = -PassSet[to_change, col]
num_backup += 1
(X[:, not_opt_cols], temp_cholesky, temp_eq) = normal_eq_comb(
AtA, AtB[:, not_opt_cols], PassSet[:, not_opt_cols])
num_cholesky += temp_cholesky
num_eq += temp_eq
X[abs(X) < 1e-12] = 0
Y[:, not_opt_cols] = AtA.dot(X[:, not_opt_cols]) - AtB[:, not_opt_cols]
Y[abs(Y) < 1e-12] = 0
not_opt_mask = np.tile(not_opt_colset, (n, 1))
not_opt_set = np.logical_and(
np.logical_and(not_opt_mask, Y < 0), -PassSet)
infea_set = np.logical_and(
np.logical_and(not_opt_mask, X < 0), PassSet)
not_good = np.sum(not_opt_set, axis=0) + np.sum(infea_set, axis=0)
not_opt_colset = not_good > 0
not_opt_cols = not_opt_colset.nonzero()[0]
return X, (success, Y, num_cholesky, num_eq, num_backup)
def nnlsm_activeset(A, B, overwrite=False, is_input_prod=False, init=None):
""" Nonnegativity-constrained least squares with active-set method and column grouping
Solves min ||AX-B||_2^2 s.t. X >= 0 element-wise.
Algorithm of this routine is close to the one presented in the following paper but
is different in organising inner- and outer-loops:
M. H. Van Benthem and M. R. Keenan, J. Chemometrics 2004; 18: 441-450
Parameters
----------
A : numpy.array, shape (m,n)
B : numpy.array or scipy.sparse matrix, shape (m,k)
Optional Parameters
-------------------
is_input_prod : True/False. - If True, the A and B arguments are interpreted as
AtA and AtB, respectively. Default is False.
init: numpy.array, shape (n,k). - If provided, init is used as an initial value for the algorithm.
Default is None.
Returns
-------
X, (success, Y, num_cholesky, num_eq, num_backup)
X : numpy.array, shape (n,k) - solution
success : True/False - True if the solution is found. False if the algorithm did not terminate
due to numerical errors.
Y : numpy.array, shape (n,k) - Y = A.T * A * X - A.T * B
num_cholesky : int - the number of Cholesky factorizations needed
num_eq : int - the number of linear systems of equations needed to be solved
"""
if is_input_prod:
AtA = A
AtB = B
else:
AtA = A.T.dot(A)
if sps.issparse(B):
AtB = B.T.dot(A)
AtB = AtB.T
else:
AtB = A.T.dot(B)
(n, k) = AtB.shape
MAX_ITER = n * 5
num_cholesky = 0
num_eq = 0
not_opt_set = np.ones([k], dtype=bool)
if overwrite:
X, num_cholesky, num_eq = normal_eq_comb(AtA, AtB)
PassSet = X > 0
not_opt_set = np.any(X < 0, axis=0)
elif init != None:
X = init
X[X < 0] = 0
PassSet = X > 0
else:
X = np.zeros([n, k])
PassSet = np.zeros([n, k], dtype=bool)
Y = np.zeros([n, k])
opt_cols = (-not_opt_set).nonzero()[0]
not_opt_cols = not_opt_set.nonzero()[0]
Y[:, opt_cols] = AtA.dot(X[:, opt_cols]) - AtB[:, opt_cols]
big_iter = 0
success = True
while not_opt_cols.size > 0:
big_iter += 1
if MAX_ITER > 0 and big_iter > MAX_ITER:
success = False
break
(Z, temp_cholesky, temp_eq) = normal_eq_comb(
AtA, AtB[:, not_opt_cols], PassSet[:, not_opt_cols])
num_cholesky += temp_cholesky
num_eq += temp_eq
Z[abs(Z) < 1e-12] = 0
infea_subset = Z < 0
temp = np.any(infea_subset, axis=0)
infea_subcols = temp.nonzero()[0]
fea_subcols = (-temp).nonzero()[0]
if infea_subcols.size > 0:
infea_cols = not_opt_cols[infea_subcols]
(ix0, ix1_subsub) = infea_subset[:, infea_subcols].nonzero()
ix1_sub = infea_subcols[ix1_subsub]
ix1 = not_opt_cols[ix1_sub]
X_infea = X[(ix0, ix1)]
alpha = np.zeros([n, len(infea_subcols)])
alpha[:] = np.inf
alpha[(ix0, ix1_subsub)] = X_infea / (X_infea - Z[(ix0, ix1_sub)])
min_ix = np.argmin(alpha, axis=0)
min_vals = alpha[(min_ix, xrange(0, alpha.shape[1]))]
X[:, infea_cols] = X[:, infea_cols] + \
(Z[:, infea_subcols] - X[:, infea_cols]) * min_vals
X[(min_ix, infea_cols)] = 0
PassSet[(min_ix, infea_cols)] = False
elif fea_subcols.size > 0:
fea_cols = not_opt_cols[fea_subcols]
X[:, fea_cols] = Z[:, fea_subcols]
Y[:, fea_cols] = AtA.dot(X[:, fea_cols]) - AtB[:, fea_cols]
Y[abs(Y) < 1e-12] = 0
not_opt_subset = np.logical_and(
Y[:, fea_cols] < 0, -PassSet[:, fea_cols])
new_opt_cols = fea_cols[np.all(-not_opt_subset, axis=0)]
update_cols = fea_cols[np.any(not_opt_subset, axis=0)]
if update_cols.size > 0:
val = Y[:, update_cols] * -PassSet[:, update_cols]
min_ix = np.argmin(val, axis=0)
PassSet[(min_ix, update_cols)] = True
not_opt_set[new_opt_cols] = False
not_opt_cols = not_opt_set.nonzero()[0]
return X, (success, Y, num_cholesky, num_eq)
def normal_eq_comb(AtA, AtB, PassSet=None):
""" Solve many systems of linear equations using combinatorial grouping.
M. H. Van Benthem and M. R. Keenan, J. Chemometrics 2004; 18: 441-450
Parameters
----------
AtA : numpy.array, shape (n,n)
AtB : numpy.array, shape (n,k)
Returns
-------
(Z,num_cholesky,num_eq)
Z : numpy.array, shape (n,k) - solution
num_cholesky : int - the number of unique cholesky decompositions done
num_eq: int - the number of systems of linear equations solved
"""
num_cholesky = 0
num_eq = 0
if AtB.size == 0:
Z = np.zeros([])
elif (PassSet == None) or np.all(PassSet):
Z = nla.solve(AtA, AtB)
num_cholesky = 1
num_eq = AtB.shape[1]
else:
Z = np.zeros(AtB.shape)
if PassSet.shape[1] == 1:
if np.any(PassSet):
cols = PassSet.nonzero()[0]
Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])
num_cholesky = 1
num_eq = 1
else:
#
# Both _column_group_loop() and _column_group_recursive() work well.
# Based on preliminary testing,
# _column_group_loop() is slightly faster for tiny k(<10), but
# _column_group_recursive() is faster for large k's.
#
grps = _column_group_recursive(PassSet)
for gr in grps:
cols = PassSet[:, gr[0]].nonzero()[0]
if cols.size > 0:
ix1 = np.ix_(cols, gr)
ix2 = np.ix_(cols, cols)
#
# scipy.linalg.cho_solve can be used instead of numpy.linalg.solve.
# For small n(<200), numpy.linalg.solve appears faster, whereas
# for large n(>500), scipy.linalg.cho_solve appears faster.
# Usage example of scipy.linalg.cho_solve:
# Z[ix1] = sla.cho_solve(sla.cho_factor(AtA[ix2]),AtB[ix1])
#
Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])
num_cholesky += 1
num_eq += len(gr)
num_eq += len(gr)
return Z, num_cholesky, num_eq
def _column_group_loop(B):
""" Given a binary matrix, find groups of the same columns
with a looping strategy
Parameters
----------
B : numpy.array, True/False in each element
Returns
-------
A list of arrays - each array contain indices of columns that are the same.
"""
initial = [np.arange(0, B.shape[1])]
before = initial
after = []
for i in xrange(0, B.shape[0]):
all_ones = True
vec = B[i]
for cols in before:
if len(cols) == 1:
after.append(cols)
else:
all_ones = False
subvec = vec[cols]
trues = subvec.nonzero()[0]
falses = (-subvec).nonzero()[0]
if trues.size > 0:
after.append(cols[trues])
if falses.size > 0:
after.append(cols[falses])
before = after
after = []
if all_ones:
break
return before
def _column_group_recursive(B):
""" Given a binary matrix, find groups of the same columns
with a recursive strategy
Parameters
----------
B : numpy.array, True/False in each element
Returns
-------
A list of arrays - each array contain indices of columns that are the same.
"""
initial = np.arange(0, B.shape[1])
return [a for a in column_group_sub(B, 0, initial) if len(a) > 0]
def column_group_sub(B, i, cols):
vec = B[i][cols]
if len(cols) <= 1:
return [cols]
if i == (B.shape[0] - 1):
col_trues = cols[vec.nonzero()[0]]
col_falses = cols[(-vec).nonzero()[0]]
return [col_trues, col_falses]
else:
col_trues = cols[vec.nonzero()[0]]
col_falses = cols[(-vec).nonzero()[0]]
after = column_group_sub(B, i + 1, col_trues)
after.extend(column_group_sub(B, i + 1, col_falses))
return after
def _test_column_grouping(m=10, n=5000, num_repeat=5, verbose=False):
print '\nTesting column_grouping ...'
A = np.array([[True, False, False, False, False],
[True, True, False, True, True]])
grps1 = _column_group_loop(A)
grps2 = _column_group_recursive(A)
grps3 = [np.array([0]),
np.array([1, 3, 4]),
np.array([2])]
print 'OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail'
print 'OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps3)]) else 'Fail'
for i in xrange(0, num_repeat):
A = np.random.rand(m, n)
B = A > 0.5
start = time.time()
grps1 = _column_group_loop(B)
elapsed_loop = time.time() - start
start = time.time()
grps2 = _column_group_recursive(B)
elapsed_recursive = time.time() - start
if verbose:
print 'Loop :', elapsed_loop
print 'Recursive:', elapsed_recursive
print 'OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail'
# sorted_idx = np.concatenate(grps)
# print B
# print sorted_idx
# print B[:,sorted_idx]
return
def _test_normal_eq_comb(m=10, k=3, num_repeat=5):
print '\nTesting normal_eq_comb() ...'
for i in xrange(0, num_repeat):
A = np.random.rand(2 * m, m)
X = np.random.rand(m, k)
C = (np.random.rand(m, k) > 0.5)
X[-C] = 0
B = A.dot(X)
B = A.T.dot(B)
A = A.T.dot(A)
Sol, a, b = normal_eq_comb(A, B, C)
print 'OK' if np.allclose(X, Sol) else 'Fail'
return
def _test_nnlsm():
print '\nTesting nnls routines ...'
m = 100
n = 10
k = 200
rep = 5
for r in xrange(0, rep):
A = np.random.rand(m, n)
X_org = np.random.rand(n, k)
X_org[np.random.rand(n, k) < 0.5] = 0
B = A.dot(X_org)
# B = np.random.rand(m,k)
# A = np.random.rand(m,n/2)
# A = np.concatenate((A,A),axis=1)
# A = A + np.random.rand(m,n)*0.01
# B = np.random.rand(m,k)
import time
start = time.time()
C1, info = nnlsm_blockpivot(A, B)
elapsed2 = time.time() - start
rel_norm2 = nla.norm(C1 - X_org) / nla.norm(X_org)
print 'nnlsm_blockpivot: ', 'OK ' if info[0] else 'Fail',\
'elapsed:{0:.4f} error:{1:.4e}'.format(elapsed2, rel_norm2)
start = time.time()
C2, info = nnlsm_activeset(A, B)
num_backup = 0
elapsed1 = time.time() - start
rel_norm1 = nla.norm(C2 - X_org) / nla.norm(X_org)
print 'nnlsm_activeset: ', 'OK ' if info[0] else 'Fail',\
'elapsed:{0:.4f} error:{1:.4e}'.format(elapsed1, rel_norm1)
import scipy.optimize as opt
start = time.time()
C3 = np.zeros([n, k])
for i in xrange(0, k):
res = opt.nnls(A, B[:, i])
C3[:, i] = res[0]
elapsed3 = time.time() - start
rel_norm3 = nla.norm(C3 - X_org) / nla.norm(X_org)
print 'scipy.optimize.nnls: ', 'OK ',\
'elapsed:{0:.4f} error:{1:.4e}'.format(elapsed3, rel_norm3)
if num_backup > 0:
break
if rel_norm1 > 10e-5 or rel_norm2 > 10e-5 or rel_norm3 > 10e-5:
break
print ''
if __name__ == '__main__':
_test_column_grouping()
_test_normal_eq_comb()
_test_nnlsm()
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
from collections import OrderedDict
import paddle.trainer.config_parser as cp
import struct
import tarfile
import cStringIO
from topology import Topology
__all__ = ['Parameters', 'create']
def create(layers):
"""
Create parameter pool by topology.
:param layers:
:return:
"""
topology = Topology(layers)
pool = Parameters()
initializers = cp.g_parameter_initializer_map
for param in topology.proto().parameters:
pool.__append_config__(param)
if param.name in initializers:
pool[param.name] = initializers[param.name](param.name)
return pool
class Parameters(object):
"""
`Parameters` manages all the learnable parameters in a neural network.
It stores parameters' information in an OrderedDict. The key is
the name of a parameter, and value is a parameter's configuration(in
protobuf format), such as initialization mean and std, its size, whether it
is a static parameter, and so on.
:param __param_conf__: store the configurations of learnable parameters in
the network in an OrderedDict. Parameter is added one by one into the
dict by following their created order in the network: parameters of
the previous layers in a network are careted first. You can visit the
parameters from bottom to top by iterating over this dict.
:type __param_conf__: OrderedDict
:param __gradient_machines__: all of the parameters in a neural network are
appended to a PaddlePaddle gradient machine, which is used internally to
copy parameter values between C++ and Python end.
:type __gradient_machines__: list
:param __tmp_params__: a dict to store dummy parameters if no
__gradient_machines__ is appended to `Parameters`.
:type __tmp_params__: dict
Basically usage is
.. code-block:: python
data = paddle.layers.data(...)
...
out = paddle.layers.fc(...)
parameters = paddle.parameters.create(out)
parameter_names = parameters.names()
fc_mat = parameters.get('fc')
print fc_mat
"""
def __init__(self):
self.__param_conf__ = OrderedDict()
self.__gradient_machines__ = []
self.__tmp_params__ = dict()
def __append_config__(self, param_conf):
"""
Append a parameter configuration. It used to initialize Parameters and
should be invoked only in paddle.parameters.create
:param param_conf: The parameter configuration in protobuf
:type param_conf: ParameterConfig
:return: Nothing
"""
if not isinstance(param_conf, ParameterConfig):
raise ValueError("param_conf must be paddle.proto.ParameterConfig")
if param_conf.name in self.__param_conf__:
raise ValueError("duplicated parameter %s" % param_conf.name)
self.__param_conf__[param_conf.name] = param_conf
def update_param_conf(self, model_config):
for p in model_config.parameters:
self.__param_conf__[p.name] = p
def keys(self):
"""
keys are the names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.__param_conf__.keys()
def names(self):
"""
names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.keys()
def has_key(self, key):
"""
has_key return true if there are such parameter name == key
:param key: Parameter name
:type key: basestring
:return: True if contains such key
"""
return key in self.__param_conf__.keys()
def __iter__(self):
"""
Return an iterator of parameter name. It is used by `for loop`
or `in` operator.
.. code-block:: python
parameters = paddle.parameters.create(...)
if "fc_param" in parameters:
print 'OK'
:return: an iterator of parameter name
:rtype: iterator
"""
return iter(self.__param_conf__)
def __getter_inner(self, key, param_type):
import py_paddle.swig_paddle as api
shape = self.get_shape(key)
if len(self.__gradient_machines__) == 0:
# create new parameter in python numpy.
if key in self.__tmp_params__:
return self.__tmp_params__[key]
else:
return np.ndarray(shape=shape, dtype=np.float32)
else:
for each_gradient_machine in self.__gradient_machines__:
param = __get_parameter_in_gradient_machine__(
each_gradient_machine, key)
# for simplify implementation now, we always copy from C++
assert isinstance(param, api.Parameter)
val = param.getBuf(param_type)
assert isinstance(val, api.Vector)
val = val.copyToNumpyArray()
return val
# else continue
raise RuntimeError("Unexpected branch")
def __getitem__(self, key):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_VALUE)
def get_shape(self, key):
"""
get shape of the parameter.
:param key: parameter name
:type key: basestring
:return: parameter's shape
:rtype: tuple
"""
if not isinstance(key, basestring):
raise ValueError("parameter name should be string")
if not self.has_key(key):
raise ValueError("No such parameter %s" % key)
conf = self.__param_conf__[key]
dims = conf.dims if conf.dims else (1, conf.size)
return tuple(map(int, dims))
def __setitem__(self, key, value):
"""
Set parameter by parameter name & value. It use Python dict syntax.
:note: It will always copy the parameter to C++ side.
:param key: Parameter name
:type key: basestring
:param value: Parameter matrix.
:type value: np.ndarray
:return: Nothing
"""
if not isinstance(value, np.ndarray):
raise ValueError("Must return ndarray")
value = value.astype(dtype=np.float32)
shape = self.get_shape(key)
if value.shape != shape:
raise ValueError("Value shape mismatch, expect %s, should %s" %
(shape, value.shape))
if len(self.__gradient_machines__) == 0:
self.__tmp_params__[key] = value
else:
for each_gradient_machine in self.__gradient_machines__:
__copy_parameter_to_gradient_machine__(each_gradient_machine,
key, value)
def get(self, parameter_name):
"""
Get parameter by parameter name.
:note: It will always copy the parameter from C++ side.
:param parameter_name: parameter name
:type parameter_name: basestring
:return: The parameter matrix.
:rtype: np.ndarray
"""
return self.__getitem__(key=parameter_name)
def get_grad(self, key):
"""
Get grandient by parameter name.
:note: It will always copy the parameter from C++ side.
:param key: parameter name
:type key: basestring
:return: The grandient matrix.
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
if self.__param_conf__[key].is_static:
return np.zeros(self.__param_conf__[key].size, dtype=np.float32)
return self.__getter_inner(key, api.PARAMETER_GRADIENT)
def set(self, parameter_name, value):
"""
Set parameter by parameter name & matrix.
:param parameter_name: parameter name
:type parameter_name: basestring
:param value: parameter matrix
:type value: np.ndarray
:return: Nothing.
"""
self.__setitem__(key=parameter_name, value=value)
def append_gradient_machine(self, gradient_machine):
"""
append gradient machine to parameters. This method is used internally in
Trainer.train.
:param gradient_machine: PaddlePaddle C++ GradientMachine object.
:type gradient_machine: api.GradientMachine
:return:
"""
import py_paddle.swig_paddle as api
if not isinstance(gradient_machine, api.GradientMachine):
raise ValueError("gradient_machine should be api.GradientMachine")
if len(self.__tmp_params__) != 0:
for name, val in self.__tmp_params__.iteritems():
try:
__copy_parameter_to_gradient_machine__(gradient_machine,
name, val)
except ValueError:
# If no such parameter in gradient machine, then don't copy
pass
self.__gradient_machines__.append(gradient_machine)
def serialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
param = self.get(name)
size = reduce(lambda a, b: a * b, param.shape)
f.write(struct.pack("IIQ", 0, 4, size))
param = param.astype(np.float32)
s = param.tostring()
wrote_size = 0
buf = buffer(s, wrote_size, 65535)
while buf: # f.write crashes with big data blog.
f.write(buf)
wrote_size += 65535
buf = buffer(s, wrote_size, 65535)
def deserialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
f.read(16) # header
arr = np.frombuffer(f.read(), dtype=np.float32)
self.set(name, arr.reshape(self.get_shape(name)))
def to_tar(self, f):
"""
Save parameters to a tar file.
WARNING: You should use `paddle.v2.trainer.SGD.save_parameter_to_tar(f)`
to save parameters most of the time. Otherwise, some settings such
as model average will not take effect.
:param f:
:type f: file
:return:
"""
tar = tarfile.TarFile(fileobj=f, mode='w')
for nm in self.names():
buf = cStringIO.StringIO()
self.serialize(nm, buf)
tarinfo = tarfile.TarInfo(name=nm)
buf.seek(0)
tarinfo.size = len(buf.getvalue())
tar.addfile(tarinfo, buf)
conf = self.__param_conf__[nm]
confStr = conf.SerializeToString()
tarinfo = tarfile.TarInfo(name="%s.protobuf" % nm)
tarinfo.size = len(confStr)
buf = cStringIO.StringIO(confStr)
buf.seek(0)
tar.addfile(tarinfo, fileobj=buf)
@staticmethod
def from_tar(f):
"""
Create a `Parameters` object from the given file. And
the `Parameters` only contains the parameters in this
file. It is adapted the parameters are same in the
defined network and the given file. For example, it
can be used in the inference.
:param f: the initialized model file.
:type f: tar file
:return: A Parameters object.
:rtype: Parameters.
"""
params = Parameters()
tar = tarfile.TarFile(fileobj=f, mode='r')
for finfo in tar:
assert isinstance(finfo, tarfile.TarInfo)
if finfo.name.endswith('.protobuf'):
f = tar.extractfile(finfo)
conf = ParameterConfig()
conf.ParseFromString(f.read())
params.__append_config__(conf)
for param_name in params.names():
f = tar.extractfile(param_name)
params.deserialize(param_name, f)
return params
def init_from_tar(self, f, exclude_params=[]):
"""
Different from `from_tar`, this interface can be used to
init partial network parameters from another saved model.
:param f: the initialized model file.
:type f: tar file
:param exclude_params: the names of parameters that should
not be initialized from the model file.
:type exclude_params: list of strings
:return: Nothing.
"""
tar_param = Parameters.from_tar(f)
for pname in tar_param.names():
if pname in self.names() and pname not in exclude_params:
self.set(pname, tar_param.get(pname))
def __get_parameter_in_gradient_machine__(gradient_machine, name):
"""
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:return:
:rtype: api.Parameter
"""
params = filter(lambda p: p.getName() == name,
gradient_machine.getParameters())
if len(params) == 0:
raise ValueError("No such parameter")
elif len(params) > 1:
raise ValueError("Unexpected branch")
else:
return params[0]
def __copy_parameter_to_gradient_machine__(gradient_machine, name, arr):
"""
Copy a python ndarray into the gradient machine.
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:param arr:
:type arr: np.ndarray
:return:
:rtype: api.Parameter
"""
import py_paddle.swig_paddle as api
param = __get_parameter_in_gradient_machine__(gradient_machine, name)
vec = param.getBuf(api.PARAMETER_VALUE)
assert isinstance(vec, api.Vector)
vec.copyFromNumpyArray(arr.flatten())
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registry for the applications
"""
import glob
import logging
import os
try:
import json
except ImportError:
import simplejson as json
import common
LOG = logging.getLogger(__name__)
class AppRegistry(object):
"""
Represents a registry.
"""
def __init__(self):
"""Open the existing registry"""
self._reg_path = os.path.join(common.INSTALL_ROOT, 'app.reg')
self._initialized = False
self._apps = { } # Map of name -> HueApp
self._open()
def _open(self):
"""Open the registry file. May raise OSError"""
if os.path.exists(self._reg_path):
reg_file = file(self._reg_path)
app_list = json.load(reg_file)
reg_file.close()
for app_json in app_list:
app_json.setdefault('author', 'Unknown') # Added after 0.9
app = HueApp.create(app_json)
self._apps[app.name] = app
self._initialized = True
def _write(self, path):
"""Write out the registry to the given path"""
outfile = file(path, 'w')
json.dump(self._apps.values(), outfile, cls=AppJsonEncoder, indent=2)
outfile.close()
def contains(self, app):
"""Returns whether the app (of the same version) is in the registry"""
try:
existing = self._apps[app.name]
return existing.version == app.version
except KeyError:
return False
def register(self, app):
"""register(app) -> True/False"""
assert self._initialized, "Registry not yet initialized"
try:
existing = self._apps[app.name]
version_diff = common.cmp_version(existing.version, app.version)
if version_diff == 0:
LOG.warn('%s is already registered' % (app,))
return False
elif version_diff < 0:
LOG.info('Upgrading %s from version %s' % (app, existing.version))
elif version_diff > 0:
LOG.error('A newer version (%s) of %s is already installed' % (existing.version, app))
return False
except KeyError:
pass
LOG.info('Updating registry with %s' % (app,))
self._apps[app.name] = app
return True
def unregister(self, app_name):
"""unregister(app_Name) -> HueApp. May raise KeyError"""
assert self._initialized, "Registry not yet initialized"
app = self._apps[app_name]
del self._apps[app_name]
return app
def get_all_apps(self):
"""get_all_apps() -> List of HueApp"""
return self._apps.values()
def save(self):
"""Save and write out the registry"""
assert self._initialized, "Registry not yet initialized"
tmp_path = self._reg_path + '.new'
self._write(tmp_path)
os.rename(tmp_path, self._reg_path)
LOG.info('=== Saved registry at %s' % (self._reg_path,))
class HueApp(object):
"""
Represents an app.
"""
@staticmethod
def create(json):
return HueApp(json['name'], json['version'], json['path'], json['desc'], json['author'])
def __init__(self, name, version, path, desc, author):
self.name = name
self.version = version
self.path = path
self.desc = desc
self.author = author
def __str__(self):
return "%s v.%s" % (self.name, self.version)
def __cmp__(self, other):
if not isinstance(other, HueApp):
raise TypeError
return cmp((self.name, self.version), (other.name, other.version))
def jsonable(self):
return dict(name=self.name, version=self.version, path=self.path,
desc=self.desc, author=self.author)
def find_ext_pys(self):
"""find_ext_pys() -> A list of paths for all ext-py packages"""
return glob.glob(os.path.join(self.path, 'ext-py', '*'))
def get_conffiles(self):
"""get_conffiles() -> A list of config (.ini) files"""
ini_files = glob.glob(os.path.join(self.path, 'conf', '*.ini'))
return [ os.path.abspath(ini) for ini in ini_files ]
def install_conf(self):
"""
install_conf() -> True/False
Symlink the app's conf/*.ini files into the conf directory.
"""
installed = [ ]
for target in self.get_conffiles():
link_name = os.path.join(common.HUE_CONF_DIR, os.path.basename(target))
# Does the link already exists?
if os.path.islink(link_name):
try:
cur = os.readlink(link_name)
if cur == target:
LOG.warn("Symlink for configuration already exists: %s" % (link_name,))
installed.append(link_name)
continue
# Remove broken link
if not os.path.exists(cur):
os.unlink(link_name)
LOG.warn("Removing broken link: %s" % (link_name,))
except OSError, ex:
LOG.warn("Error checking for existing link %s: %s" % (link_name, ex))
# Actually install the link
try:
os.symlink(target, link_name)
LOG.info('Symlink config %s -> %s' % (link_name, target))
installed.append(link_name)
except OSError, ex:
LOG.error("Failed to symlink %s to %s: %s" % (target, link_name, ex))
for lnk in installed:
try:
os.unlink(lnk)
except OSError, ex2:
LOG.error("Failed to cleanup link %s: %s" % (link_name, ex2))
return False
return True
def uninstall_conf(self):
"""uninstall_conf() -> True/False"""
app_conf_dir = os.path.abspath(os.path.join(self.path, 'conf'))
if not os.path.isdir(app_conf_dir):
return True
# Check all symlink in the conf dir and remove any that point to this app
for name in os.listdir(common.HUE_CONF_DIR):
path = os.path.join(common.HUE_CONF_DIR, name)
if not os.path.islink(path):
continue
target = os.readlink(path)
target_dir = os.path.abspath(os.path.dirname(target))
if target_dir == app_conf_dir:
try:
os.unlink(path)
LOG.info('Remove config symlink %s -> %s' % (path, target))
except OSError, ex:
LOG.error("Failed to remove configuration link %s: %s" % (path, ex))
return False
return True
class AppJsonEncoder(json.JSONEncoder):
def __init__(self, **kwargs):
json.JSONEncoder.__init__(self, **kwargs)
def default(self, obj):
if isinstance(obj, HueApp):
return obj.jsonable()
return json.JSONEncoder.default(self, obj)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.resourcemanager_v3.services.tag_values import pagers
from google.cloud.resourcemanager_v3.types import tag_values
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TagValuesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import TagValuesGrpcAsyncIOTransport
from .client import TagValuesClient
class TagValuesAsyncClient:
"""Allow users to create and manage tag values."""
_client: TagValuesClient
DEFAULT_ENDPOINT = TagValuesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = TagValuesClient.DEFAULT_MTLS_ENDPOINT
tag_value_path = staticmethod(TagValuesClient.tag_value_path)
parse_tag_value_path = staticmethod(TagValuesClient.parse_tag_value_path)
common_billing_account_path = staticmethod(
TagValuesClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
TagValuesClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(TagValuesClient.common_folder_path)
parse_common_folder_path = staticmethod(TagValuesClient.parse_common_folder_path)
common_organization_path = staticmethod(TagValuesClient.common_organization_path)
parse_common_organization_path = staticmethod(
TagValuesClient.parse_common_organization_path
)
common_project_path = staticmethod(TagValuesClient.common_project_path)
parse_common_project_path = staticmethod(TagValuesClient.parse_common_project_path)
common_location_path = staticmethod(TagValuesClient.common_location_path)
parse_common_location_path = staticmethod(
TagValuesClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TagValuesAsyncClient: The constructed client.
"""
return TagValuesClient.from_service_account_info.__func__(TagValuesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TagValuesAsyncClient: The constructed client.
"""
return TagValuesClient.from_service_account_file.__func__(TagValuesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return TagValuesClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> TagValuesTransport:
"""Returns the transport used by the client instance.
Returns:
TagValuesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(TagValuesClient).get_transport_class, type(TagValuesClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, TagValuesTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the tag values client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.TagValuesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = TagValuesClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_tag_values(
self,
request: Union[tag_values.ListTagValuesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTagValuesAsyncPager:
r"""Lists all TagValues for a specific TagKey.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_list_tag_values():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
request = resourcemanager_v3.ListTagValuesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tag_values(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.ListTagValuesRequest, dict]):
The request object. The request message for listing
TagValues for the specified TagKey.
parent (:class:`str`):
Required. Resource name for TagKey, parent of the
TagValues to be listed, in the format ``tagKeys/123``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.services.tag_values.pagers.ListTagValuesAsyncPager:
The ListTagValues response.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_values.ListTagValuesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tag_values,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTagValuesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_tag_value(
self,
request: Union[tag_values.GetTagValueRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tag_values.TagValue:
r"""Retrieves TagValue. If the TagValue or namespaced name does not
exist, or if the user does not have permission to view it, this
method will return ``PERMISSION_DENIED``.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_get_tag_value():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
request = resourcemanager_v3.GetTagValueRequest(
name="name_value",
)
# Make the request
response = client.get_tag_value(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.GetTagValueRequest, dict]):
The request object. The request message for getting a
TagValue.
name (:class:`str`):
Required. Resource name for TagValue to be fetched in
the format ``tagValues/456``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.types.TagValue:
A TagValue is a child of a particular
TagKey. This is used to group cloud
resources for the purpose of controlling
them using policies.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_values.GetTagValueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_tag_value,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_tag_value(
self,
request: Union[tag_values.CreateTagValueRequest, dict] = None,
*,
tag_value: tag_values.TagValue = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a TagValue as a child of the specified
TagKey. If a another request with the same parameters is
sent while the original request is in process the second
request will receive an error. A maximum of 300
TagValues can exist under a TagKey at any given time.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_create_tag_value():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
tag_value = resourcemanager_v3.TagValue()
tag_value.short_name = "short_name_value"
request = resourcemanager_v3.CreateTagValueRequest(
tag_value=tag_value,
)
# Make the request
operation = client.create_tag_value(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.CreateTagValueRequest, dict]):
The request object. The request message for creating a
TagValue.
tag_value (:class:`google.cloud.resourcemanager_v3.types.TagValue`):
Required. The TagValue to be created. Only fields
``short_name``, ``description``, and ``parent`` are
considered during the creation request.
This corresponds to the ``tag_value`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.TagValue` A TagValue is a child of a particular TagKey. This is used to group
cloud resources for the purpose of controlling them
using policies.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag_value])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_values.CreateTagValueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag_value is not None:
request.tag_value = tag_value
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_tag_value,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
tag_values.TagValue,
metadata_type=tag_values.CreateTagValueMetadata,
)
# Done; return the response.
return response
async def update_tag_value(
self,
request: Union[tag_values.UpdateTagValueRequest, dict] = None,
*,
tag_value: tag_values.TagValue = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the attributes of the TagValue resource.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_update_tag_value():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
tag_value = resourcemanager_v3.TagValue()
tag_value.short_name = "short_name_value"
request = resourcemanager_v3.UpdateTagValueRequest(
tag_value=tag_value,
)
# Make the request
operation = client.update_tag_value(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.UpdateTagValueRequest, dict]):
The request object. The request message for updating a
TagValue.
tag_value (:class:`google.cloud.resourcemanager_v3.types.TagValue`):
Required. The new definition of the TagValue. Only
fields ``description`` and ``etag`` fields can be
updated by this request. If the ``etag`` field is
nonempty, it must match the ``etag`` field of the
existing ControlGroup. Otherwise,
``FAILED_PRECONDITION`` will be returned.
This corresponds to the ``tag_value`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Optional. Fields to be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.TagValue` A TagValue is a child of a particular TagKey. This is used to group
cloud resources for the purpose of controlling them
using policies.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag_value, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_values.UpdateTagValueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag_value is not None:
request.tag_value = tag_value
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_tag_value,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tag_value.name", request.tag_value.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
tag_values.TagValue,
metadata_type=tag_values.UpdateTagValueMetadata,
)
# Done; return the response.
return response
async def delete_tag_value(
self,
request: Union[tag_values.DeleteTagValueRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a TagValue. The TagValue cannot have any
bindings when it is deleted.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_delete_tag_value():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
request = resourcemanager_v3.DeleteTagValueRequest(
name="name_value",
)
# Make the request
operation = client.delete_tag_value(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.resourcemanager_v3.types.DeleteTagValueRequest, dict]):
The request object. The request message for deleting a
TagValue.
name (:class:`str`):
Required. Resource name for TagValue
to be deleted in the format
tagValues/456.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.TagValue` A TagValue is a child of a particular TagKey. This is used to group
cloud resources for the purpose of controlling them
using policies.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tag_values.DeleteTagValueRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_tag_value,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
tag_values.TagValue,
metadata_type=tag_values.DeleteTagValueMetadata,
)
# Done; return the response.
return response
async def get_iam_policy(
self,
request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a TagValue. The returned
policy may be empty if no such policy or resource exists. The
``resource`` field should be the TagValue's resource name. For
example: ``tagValues/1234``. The caller must have the
``cloudresourcemanager.googleapis.com/tagValues.getIamPolicy``
permission on the identified TagValue to get the access control
policy.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_get_iam_policy():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
request = resourcemanager_v3.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.get_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ [email protected] -
group:\ [email protected] - domain:google.com -
serviceAccount:\ [email protected]
role: roles/resourcemanager.organizationAdmin -
members: - user:\ [email protected] role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def set_iam_policy(
self,
request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a TagValue, replacing any
existing policy. The ``resource`` field should be the TagValue's
resource name. For example: ``tagValues/1234``. The caller must
have ``resourcemanager.tagValues.setIamPolicy`` permission on
the identified tagValue.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_set_iam_policy():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
request = resourcemanager_v3.SetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.set_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:[email protected]",
"group:[email protected]",
"domain:google.com",
"serviceAccount:[email protected]"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:[email protected]"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ [email protected] -
group:\ [email protected] - domain:google.com -
serviceAccount:\ [email protected]
role: roles/resourcemanager.organizationAdmin -
members: - user:\ [email protected] role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.set_iam_policy,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def test_iam_permissions(
self,
request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that a caller has on the specified TagValue.
The ``resource`` field should be the TagValue's resource name.
For example: ``tagValues/1234``.
There are no permissions required for making this API call.
.. code-block:: python
from google.cloud import resourcemanager_v3
def sample_test_iam_permissions():
# Create a client
client = resourcemanager_v3.TagValuesClient()
# Initialize request argument(s)
request = resourcemanager_v3.TestIamPermissionsRequest(
resource="resource_value",
permissions=['permissions_value_1', 'permissions_value_2'],
)
# Make the request
response = client.test_iam_permissions(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for
`TestIamPermissions` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
the operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
permissions (:class:`Sequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions,
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-resourcemanager",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TagValuesAsyncClient",)
|
|
#! /usr/bin/env python3
import sys
import math
import glob
from mule.InfoError import *
from mule.postprocessing.JobData import *
from mule.postprocessing.JobsData import *
from mule_local.postprocessing.PlaneDataPhysicalDiff import *
class pickle_PlaneDataPhysicalDiff(InfoError):
def __init__(
self,
ref_file_ending = None,
jobdir_pattern = None,
job_dirs = None,
params = [],
):
"""
Generate the .pickle files in each job directory based on simulation output files given by 'ref_file_ending'
Parameters
----------
ref_file_ending: str
string with ending of reference files
jobdir_pattern: str
string with matching pattern for job directories
params: list of strings
list with optional parameters
'ignore_missing_file':
Don't throw an error if a file is missing
ohter parameters:
see PlaneDataPhysicalDiff.py
"""
InfoError.__init__(self, "pickle_PlaneDataPhysicalDiff")
self._setup(ref_file_ending=ref_file_ending, jobdir_pattern=jobdir_pattern, job_dirs=job_dirs, params=params)
def _setup(
self,
ref_file_ending = None,
jobdir_pattern = None,
job_dirs = None,
params = [],
):
if job_dirs != None:
j = JobsData(job_dirs=job_dirs, verbosity=0)
else:
if jobdir_pattern == None:
jobdir_pattern = './job_bench*'
j = JobsData(jobdir_pattern, verbosity=0)
jobs = j.get_flattened_data()
no_reference_job_unique_id_found = True
if len(jobs) == 0:
raise Exception("No jobs found!")
for key, job in jobs.items():
print("Processing "+key)
# Sort out jobs which don't have a reference job id
# These jobs are likely the reference jobs themselves
if 'jobgeneration.reference_job_unique_id' not in job:
continue
no_reference_job_unique_id_found = False
reference_job_unique_id = job['jobgeneration.reference_job_unique_id']
print(" + ref job id: "+reference_job_unique_id)
ref_key = None
for skey, sjob in jobs.items():
if sjob['jobgeneration.job_unique_id'] == reference_job_unique_id:
if ref_key != None:
raise Exception("FATAL: Reference job already found and now there's another one? This is probably not what you wanted, there might be 2 reference jobs")
ref_key = skey
if ref_key == None:
print("")
print("FATAL: missing reference job with id "+reference_job_unique_id)
print("")
print("FATAL: reference job was intended for job with dirpath: "+job['jobgeneration.job_dirpath'])
print("")
print("FATAL: Hint: If specifying job directories manually, reference job *MUST* be included in the provided job directories!")
print("")
raise Exception("Reference job not found!")
# Load reference job
ref_job = jobs[ref_key]
#
# Load
# ref_files: list of reference files
# use_ref_file_ending: file ending for pickle output file
#
# Were the reference filenames provided?
# ...
if 'output.reference_filenames' in job:
# ... then we use the reference files
# They are available in 'output.reference_filenames' and separated by ';'
ref_files = job['output.reference_filenames'].split(";")
#
# Now we have to find the file ending without the time stamp
# We do this to generate a unique pickle file which is independent of the time
#
# We guess that this starts at the last '_' character in the filename
# E.g. '_t00000864000.00000000.csv'
pos = ref_files[0].rfind('_')
if pos < 0:
raise Exception("File ending not found for reference file '"+ref_files[0]+"'")
use_ref_file_ending = ref_files[0][pos:]
print("use_ref_file_ending: "+use_ref_file_ending)
if len(ref_files) == 0:
print("Meta tags with name of reference files not found!")
print("*"*80)
print("Reference directory: "+ref_job['jobgeneration.job_dirpath'])
print("Job directory: "+job['jobgeneration.job_dirpath'])
print("Reference file endings: "+use_ref_file_ending)
print("*"*80)
print("* Skipping this job data")
print("*"*80)
#raise Exception("Meta tags with names of reference files not found!")
continue
else:
if ref_file_ending != None:
use_ref_file_ending = ref_file_ending
else:
print("*"*80)
# "output_%s_t%020.8f.csv"
use_ref_file_ending = "_t{:020.8f}.csv".format(float(ref_job['runtime.max_simulation_time'])/(60*60))
if use_ref_file_ending == "":
raise Exception("No reference file ending provided / found")
# Load reference files
ref_files = []
files = os.listdir(ref_job['jobgeneration.job_dirpath'])
for f in files:
if use_ref_file_ending in f:
ref_files.append(f)
if len(ref_files) == 0:
print("No reference files found or meta tag with reference file names not detected!")
print("*"*80)
print("Reference directory: "+ref_job['jobgeneration.job_dirpath'])
print("Job directory: "+job['jobgeneration.job_dirpath'])
print("Reference file endings: "+use_ref_file_ending)
print("*"*80)
# Search for tag which indicates that simulation was successfully finished
if not 'output.simulation_successfully_finished' in job:
print("(ignoring error, since simulation was not successfully finished)")
else:
raise Exception("No reference files not found or meta tag with reference file names not detected!")
for ref_file in ref_files:
print("")
print("Reference file: "+ref_file)
if '_spec_' in ref_file:
self.info("WARNING: Skipping '"+ref_file+"', since this is spectral data")
else:
s = None
try:
s = PlaneDataPhysicalDiff(
ref_job['jobgeneration.job_dirpath']+'/'+ref_file,
job['jobgeneration.job_dirpath']+'/'+ref_file,
params
)
except FileNotFoundError as e:
# Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
if "ignore_missing_file" in params:
self.info("Ignoring Error:")
self.info(str(e))
continue
raise Exception(e)
except IOError as e:
# Ignoring missing files should be configured via "ignore_missing_file" parameter, see above
if "ignore_missing_file" in params:
self.info("Ignoring Error:")
self.info(str(e))
continue
raise Exception(e)
s.print()
pickle_filename = 'plane_data_diff_'+ref_file.replace('output_', '').replace(use_ref_file_ending, '')+'.pickle'
print("Writing file "+pickle_filename)
s.write_file(job['jobgeneration.job_dirpath']+'/'+pickle_filename)
print(ref_key)
print("")
if no_reference_job_unique_id_found:
print("*"*80)
print("Warning: No data generated")
print("No job with a reference_job_unique_id found!")
print("*"*80)
if __name__ == '__main__':
p = pickle_PlaneDataPhysicalDiff()
|
|
#
# CatalogsBase.py -- Catalogs plugin for Ginga fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Bunch, Future
from ginga import GingaPlugin
from ginga import cmap, imap
from ginga import wcs
class CatalogsBase(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
super(CatalogsBase, self).__init__(fv, fitsimage)
self.mycolor = 'skyblue'
self.color_cursor = 'red'
self.limit_stars_to_area = False
self.pan_to_selected = False
self.use_dss_channel = False
self.plot_max = 500
self.plot_limit = 100
self.plot_start = 0
# star list
self.starlist = []
# catalog listing
self.table = None
self.layertag = 'catalog-canvas'
self.areatag = None
self.curstar = None
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Catalogs')
self.settings.load(onError='silent')
self.image_server_options = []
self.image_server_params = None
self.catalog_server_options = []
self.catalog_server_params = None
self.dc = fv.getDrawClasses()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('cursor-down', self.btndown)
canvas.set_callback('cursor-up', self.btnup)
canvas.set_callback('draw-event', self.getarea)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
def ok(self):
return self.close()
def cancel(self):
return self.close()
def update_gui(self):
self.fv.update_pending()
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_operation_channel(chname, str(self))
return True
def start(self, future=None):
self.instructions()
# start catalog operation
try:
obj = self.fitsimage.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
self.fitsimage.add(self.canvas, tag=self.layertag)
# Raise the params tab
self._raise_tab(self.w.params)
self.setfromimage()
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
#self.fv.showStatus("Draw a rectangle with the right mouse button")
def stop(self):
# stop catalog operation
self.clearAll()
# remove the canvas from the image
self.canvas.ui_setActive(False)
try:
self.fitsimage.deleteObjectByTag(self.layertag)
except:
pass
try:
self.table.close()
except:
pass
self.fv.showStatus("")
def redo(self):
obj = self.canvas.getObjectByTag(self.areatag)
if obj.kind != 'rectangle':
self.stop()
return True
try:
image = self.fitsimage.get_image()
# calculate center of bbox
wd = obj.x2 - obj.x1
dw = wd // 2
ht = obj.y2 - obj.y1
dh = ht // 2
ctr_x, ctr_y = obj.x1 + dw, obj.y1 + dh
ra_ctr, dec_ctr = image.pixtoradec(ctr_x, ctr_y, format='str')
# Calculate RA and DEC for the three points
# origination point
ra_org, dec_org = image.pixtoradec(obj.x1, obj.y1)
# destination point
ra_dst, dec_dst = image.pixtoradec(obj.x2, obj.y2)
# "heel" point making a right triangle
ra_heel, dec_heel = image.pixtoradec(obj.x1, obj.y2)
ht_deg = image.deltaStarsRaDecDeg(ra_org, dec_org, ra_heel, dec_heel)
wd_deg = image.deltaStarsRaDecDeg(ra_heel, dec_heel, ra_dst, dec_dst)
radius_deg = image.deltaStarsRaDecDeg(ra_heel, dec_heel, ra_dst, dec_dst)
# width and height are specified in arcmin
sgn, deg, mn, sec = wcs.degToDms(wd_deg)
wd = deg*60.0 + float(mn) + sec/60.0
sgn, deg, mn, sec = wcs.degToDms(ht_deg)
ht = deg*60.0 + float(mn) + sec/60.0
sgn, deg, mn, sec = wcs.degToDms(radius_deg)
radius = deg*60.0 + float(mn) + sec/60.0
#wd, ht, radius = wd_deg, ht_deg, radius_deg
except Exception, e:
self.fv.showStatus('BAD WCS: %s' % str(e))
return True
# Copy the image parameters out to the widget
d = { 'ra': ra_ctr, 'dec': dec_ctr, 'width': str(wd),
'height': ht, 'r': radius, 'r2': radius,
'r1': 0.0,
}
self._update_widgets(d)
return True
def btndown(self, canvas, button, data_x, data_y):
return True
def btnup(self, canvas, button, data_x, data_y):
objs = self.canvas.getItemsAt(data_x, data_y)
for obj in objs:
if (obj.tag != None) and obj.tag.startswith('star'):
info = obj.get_data()
self.table.show_selection(info.star)
return True
return True
def highlight_object(self, obj, tag, color, redraw=True):
x = obj.objects[0].x
y = obj.objects[0].y
delta = 10
radius = obj.objects[0].radius + delta
hilite = self.dc.Circle(x, y, radius, linewidth=4, color=color)
obj.add(hilite, tag=tag, redraw=redraw)
def highlight_objects(self, objs, tag, color, redraw=True):
for obj in objs:
self.highlight_object(obj, tag, color, redraw=False)
if redraw:
self.canvas.redraw()
def unhighlight_object(self, obj, tag):
# delete the highlight ring of the former cursor object
try:
#hilite = obj.objects[2]
obj.deleteObjectByTag(tag)
except:
pass
def highlight_cursor(self, obj):
if self.curstar:
bnch = self.curstar
if bnch.obj == obj:
# <-- we are already highlighting this object
return True
# delete the highlight ring of the former cursor object
self.unhighlight_object(bnch.obj, 'cursor')
self.highlight_object(obj, 'cursor', self.color_cursor)
self.curstar = Bunch.Bunch(obj=obj)
self.canvas.redraw()
def setfromimage(self):
x1, y1 = 0, 0
x2, y2 = self.fitsimage.get_data_size()
Rectangle = self.canvas.getDrawClass('Rectangle')
tag = self.canvas.add(Rectangle(x1, y1, x2, y2,
color=self.mycolor))
self.getarea(self.canvas, tag)
def getarea(self, canvas, tag):
obj = canvas.getObjectByTag(tag)
if obj.kind != 'rectangle':
return True
if self.areatag:
try:
canvas.deleteObjectByTag(self.areatag)
except:
pass
obj.color = self.mycolor
obj.linestyle = 'solid'
canvas.redraw(whence=3)
self.areatag = tag
# Raise the params tab
self._raise_tab(self.w.params)
return self.redo()
def getimage_cb(self):
params = self.get_params(self.image_server_params)
index = self._get_cbidx(self.w.server)
server = self.image_server_options[index]
self.clearAll()
if self.use_dss_channel:
chname = 'DSS'
if not self.fv.has_channel(chname):
self.fv.add_channel(chname)
else:
chname = self.fv.get_channelName(self.fitsimage)
self.fitsimage.onscreen_message("Querying image db...",
delay=1.0)
# Offload this network task to a non-gui thread
self.fv.nongui_do(self.getimage, server, params, chname)
def getimage(self, server, params, chname):
try:
fitspath = self.fv.get_sky_image(server, params)
except Exception as e:
errmsg = "Query exception: %s" % (str(e))
self.logger.error(errmsg)
# pop up the error in the GUI under "Errors" tab
self.fv.gui_do(self.fv.show_error, errmsg)
return
self.fv.load_file(fitspath, chname=chname)
# Update the GUI
def getimage_update(self):
self.setfromimage()
self.redo()
self.fv.gui_do(getimage_update)
def getcatalog_cb(self):
params = self.get_params(self.catalog_server_params)
index = self._get_cbidx(self.w2.server)
server = self.catalog_server_options[index]
obj = None
if self.limit_stars_to_area:
# Look for the defining object to filter stars
# If none, then use the visible image area
try:
obj = self.canvas.getObjectByTag(self.areatag)
except KeyError:
pass
self.reset()
self.fitsimage.onscreen_message("Querying catalog db...",
delay=1.0)
# Offload this network task to a non-gui thread
self.fv.nongui_do(self.getcatalog, server, params, obj)
def getcatalog(self, server, params, obj):
try:
starlist, info = self.fv.get_catalog(server, params)
self.logger.debug("starlist=%s" % str(starlist))
starlist = self.filter_results(starlist, obj)
# Update the GUI
self.fv.gui_do(self.update_catalog, starlist, info)
except Exception as e:
errmsg = "Query exception: %s" % (str(e))
self.logger.error(errmsg)
# pop up the error in the GUI under "Errors" tab
self.fv.gui_do(self.fv.show_error, errmsg)
def update_catalog(self, starlist, info):
self.starlist = starlist
self.table.show_table(self, info, starlist)
# Raise the listing tab
self._raise_tab(self.w.listing)
self._update_plotscroll()
def filter_results(self, starlist, filter_obj):
image = self.fitsimage.get_image()
# Filter starts by a containing object, if provided
if filter_obj:
stars = []
for star in starlist:
x, y = image.radectopix(star['ra_deg'], star['dec_deg'])
if filter_obj.contains(x, y):
stars.append(star)
starlist = stars
return starlist
def clear(self):
objects = self.canvas.getObjectsByTagpfx('star')
self.canvas.deleteObjects(objects)
def clearAll(self):
self.canvas.deleteAllObjects()
def reset(self):
self.clear()
#self.clearAll()
self.table.clear()
def plot_star(self, obj, image=None):
if not image:
image = self.fitsimage.get_image()
x, y = image.radectopix(obj['ra_deg'], obj['dec_deg'])
#print "STAR at %d,%d" % (x, y)
# TODO: auto-pick a decent radius
radius = 10
color = self.table.get_color(obj)
#print "color is %s" % str(color)
circle = self.dc.Circle(x, y, radius, color=color)
point = self.dc.Point(x, y, radius, color=color)
## What is this from?
if obj.has_key('pick'):
# Some objects returned from the star catalog are marked
# with the attribute 'pick'. If present then we show the
# star with or without the cross, otherwise we always show the
# cross
if not obj['pick']:
star = self.dc.Canvas(circle, point)
else:
star = self.dc.Canvas(circle)
else:
star = self.dc.Canvas(circle, point)
star.set_data(star=obj)
obj.canvobj = star
self.canvas.add(star, tagpfx='star', redraw=False)
def pan_to_star(self, star):
# Set pan position to star
image = self.fitsimage.get_image()
x, y = image.radectopix(star['ra_deg'], star['dec_deg'])
self.fitsimage.panset_xy(x, y)
def get_plot_range(self):
length = len(self.starlist)
if length <= self.plot_limit:
i = 0
else:
i = self.plot_start
i = int(min(i, length - self.plot_limit))
length = self.plot_limit
return (i, length)
def replot_stars(self, selected=[]):
self.clear()
image = self.fitsimage.get_image()
canvas = self.canvas
# Set the color bar and plot color range based on the stars
# we are plotting
i, length = self.get_plot_range()
self.table.set_minmax(i, length)
# remove references to old plot objects from starlist
for j in xrange(len(self.starlist)):
obj = self.starlist[j]
obj.canvobj = None
# plot stars in range
subset = self.table.get_subset_from_starlist(i, i+length)
for obj in subset:
self.plot_star(obj, image=image)
# plot stars in selected list even if they are not in the range
#for obj in selected:
selected = self.table.get_selected()
for obj in selected:
if (not obj.has_key('canvobj')) or (obj.canvobj == None):
self.plot_star(obj, image=image)
self.highlight_object(obj.canvobj, 'selected', 'skyblue')
canvas.redraw(whence=3)
class CatalogListingBase(object):
def __init__(self, logger, container):
super(CatalogListingBase, self).__init__()
self.logger = logger
self.tag = None
self.mycolor = 'skyblue'
self.cmap_names = cmap.get_names()
self.imap_names = imap.get_names()
self.magcmap = 'stairs8'
self.magimap = 'ramp'
self.mag_field = 'mag'
self.mag_max = 25.0
self.mag_min = 0.0
# keys: are name, ra, dec, mag, flag, b_r, preference, priority, dst
# TODO: automate this generation
self.columns = [('Name', 'name'),
('RA', 'ra'),
('DEC', 'dec'),
('Mag', 'mag'),
('Preference', 'preference'),
('Priority', 'priority'),
('Description', 'description'),
]
self.catalog = None
self.cursor = 0
self.color_cursor = 'red'
self.color_selected = 'skyblue'
self.selection_mode = 'single'
self.selected = []
self.moving_cursor = False
self.btn = Bunch.Bunch()
self.cmap = cmap.get_cmap(self.magcmap)
self.imap = imap.get_imap('ramp')
self._build_gui(container)
def get_color(self, obj):
try:
mag = obj[self.mag_field]
except:
return self.mycolor
# calculate range of values
rng = float(self.mag_max - self.mag_min)
# clip magnitude to the range we have defined
mag = max(self.mag_min, mag)
mag = min(self.mag_max, mag)
if rng != 0.0:
point = float(mag - self.mag_min) / rng
else:
point = 1.0
# sanity check: clip to 0-1 range
point = max(0.0, point)
point = min(1.0, point)
# map to a 8-bit color range
point = int(point * 255.0)
# Apply colormap.
rgbmap = self.cbar.get_rgbmap()
(r, g, b) = rgbmap.get_rgbval(point)
r = float(r) / 255.0
g = float(g) / 255.0
b = float(b) / 255.0
return (r, g, b)
def mark_selection(self, star, fromtable=False):
"""Mark or unmark a star as selected. (fromtable)==True if the
selection action came from the table (instead of the star plot).
"""
self.logger.debug("star selected name=%s ra=%s dec=%s" % (
star['name'], star['ra'], star['dec']))
if star in self.selected:
# Item is already selected--so unselect it
self.selected.remove(star)
try:
# remove selection from table
self._unselect_tv(star, fromtable=fromtable)
# unhighlight star in plot
self.catalog.unhighlight_object(star.canvobj, 'selected')
except Exception, e:
self.logger.warn("Error unhilighting star: %s" % (str(e)))
return False
else:
if self.selection_mode == 'single':
# if selection mode is 'single' unselect any existing selections
for star2 in self.selected:
self.selected.remove(star2)
try:
self._unselect_tv(star2, fromtable=fromtable)
self.catalog.unhighlight_object(star2.canvobj, 'selected')
except Exception, e:
self.logger.warn("Error unhilighting star: %s" % (str(e)))
self.selected.append(star)
try:
# If this star is not plotted, then plot it
if (not star.has_key('canvobj')) or (star.canvobj == None):
self.catalog.plot_star(star)
# highlight line in table
self._select_tv(star, fromtable=fromtable)
# highlight the plot object
self.catalog.highlight_object(star.canvobj, 'selected', 'skyblue')
if self.catalog.pan_to_selected:
self.catalog.pan_to_star(star)
except Exception, e:
self.logger.warn("Error hilighting star: %s" % (str(e)))
return True
def show_selection(self, star):
"""This method is called when the user clicks on a plotted star in the
fitsviewer.
"""
self.mark_selection(star)
def clear(self):
try:
self.catalog.clear()
except Exception, e:
# may not have generated a catalog yet
self.logger.warn("Error clearing star table: %s" % (str(e)))
def get_selected(self):
return self.selected
def replot_stars(self):
self.catalog.replot_stars()
canvobjs = map(lambda star: star.canvobj, self.selected)
self.catalog.highlight_objects(canvobjs, 'selected', 'skyblue')
def set_cmap_byname(self, name):
# Get colormap
cm = cmap.get_cmap(name)
self.cbar.set_cmap(cm)
self.replot_stars()
def set_imap_byname(self, name):
# Get intensity map
im = imap.get_imap(name)
self.cbar.set_imap(im)
self.replot_stars()
def set_minmax(self, i, length):
subset = self.get_subset_from_starlist(i, i+length)
values = map(lambda star: float(star[self.mag_field]),
subset)
self.mag_max = max(values)
self.mag_min = min(values)
self.cbar.set_range(self.mag_min, self.mag_max)
def _set_field(self, name):
# select new field to use for color plotting
self.mag_field = name
# determine the range of the values
if self.catalog != None:
i, length = self.catalog.get_plot_range()
self.set_minmax(i, length)
def set_field(self, name):
self._set_field(name)
self.replot_stars()
# END
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# from functools import reduce
class ConditionalAccumulatorTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.accumulator_ref.op.node_def)
def testConstructorWithShape(self):
with ops.Graph().as_default():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.accumulator_ref.op.node_def)
def testAccumulatorSizeEmpty(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorSetGlobalStep(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
def testAccumulatorApplyGradFloat32(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
def testDtypes(self):
with self.test_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.ConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
q.apply_grad((e,)).run()
result = sess.run(q.take_grad(1))
self.assertEqual(sum(elems) / len(elems), result)
def testAccumulatorMultipleAccumulators(self):
with self.test_session():
q_f32_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f32_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_0 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
q_f16_1 = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
accums[i].apply_grad((i + 10.0,)).run()
for i in range(len(accums)):
result = accums[i].take_grad(1).eval()
self.assertEqual(result, i + 10.0)
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
for x, y in zip(elems[0], elems[1])]
accum_ops = [q.apply_grad(x) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
is_all_equal = True
val = takeg_t.eval()
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
def testAccumulatorApplyGradWithWrongShape(self):
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
with self.assertRaises(ValueError):
q.apply_grad([[1.0], [2.0], [3.0]])
def testAccumulatorDynamicShape(self):
with self.test_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(c, d)]
for c, d in zip(elems[0], elems[1])]
takeg_t = q.take_grad(1)
for elem in elems:
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = takeg_t.eval()
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
def testAccumulatorWrongDynamicShape(self):
with self.test_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=None)
x = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
def testAccumulatorSizeAfterApplyGrad(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
# Applying gradient multiple times to increase size from 0 to 2.
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
# Extract will reduce size to 0
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
# Take gradients always sets the size back to 0 if successful.
accum_op = q.apply_grad((10.0,), local_step=1)
accum_op.run()
accum_op.run()
accum_op.run()
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 4)
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorTakeGrad(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(constant_op.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
def testAccumulatorInvalidTakeGrad(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
takeg_t = q.take_grad(-1)
for accum_op in accum_ops:
accum_op.run()
with self.assertRaises(errors_impl.InvalidArgumentError):
takeg_t.eval()
def testAccumulatorRepeatedTakeGrad(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave + 0.0, val)
def testAccumulatorIncrementGlobalStep(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
global_step = variables.Variable(0, name="global_step")
new_global_step = math_ops.add(global_step, 1)
inc_global_step = state_ops.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
variables.global_variables_initializer().run()
for _ in range(3):
set_global_step_op.run()
inc_global_step.eval()
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.test_session():
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
for ls in local_steps:
set_global_step_op = q.set_global_step(ls)
set_global_step_op.run()
for accum_op in accum_ops:
accum_op.run()
takeg_t = q.take_grad(1)
val = takeg_t.eval()
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
def testParallelApplyGrad(self):
with self.test_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
def apply_grad(accum_op):
sess.run(accum_op)
threads = [
self.checkedThread(
target=apply_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = takeg_t.eval()
self.assertEqual(val, sum(elems) / len(elems))
def testParallelTakeGrad(self):
with self.test_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
def apply_grad():
for accum_op in accum_ops:
time.sleep(1.0)
sess.run(accum_op)
apply_grad_thread = self.checkedThread(target=apply_grad)
results = []
def take_grad():
results.append(sess.run(takeg_t))
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_grad_thread.start()
for thread in threads:
thread.join()
apply_grad_thread.join()
self.assertItemsEqual(elems, results)
def testAccumulatorApplyAndBlockingTake(self):
with self.test_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(3)
def apply_grad():
time.sleep(1.0)
for accum_op in accum_ops:
sess.run(accum_op)
return_array = []
def take_grad():
return_array.append(sess.run(takeg_t))
accum_thread = self.checkedThread(target=apply_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self.assertEqual([elems_ave], return_array)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(takeg_op)
def testAccumulatorCancel(self):
with self.test_session() as sess:
q = data_flow_ops.ConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
if __name__ == "__main__":
test.main()
|
|
"""Support for Xiaomi Philips Lights."""
import asyncio
import datetime
from datetime import timedelta
from functools import partial
import logging
from math import ceil
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_COLOR_TEMP,
ATTR_ENTITY_ID,
DOMAIN,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
Light,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.util import color, dt
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Philips Light"
DATA_KEY = "light.xiaomi_miio"
CONF_MODEL = "model"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODEL): vol.In(
[
"philips.light.sread1",
"philips.light.ceiling",
"philips.light.zyceiling",
"philips.light.moonlight",
"philips.light.bulb",
"philips.light.candle",
"philips.light.candle2",
"philips.light.mono1",
"philips.light.downlight",
]
),
}
)
# The light does not accept cct values < 1
CCT_MIN = 1
CCT_MAX = 100
DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS = 4
DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES = 1
SUCCESS = ["ok"]
ATTR_MODEL = "model"
ATTR_SCENE = "scene"
ATTR_DELAYED_TURN_OFF = "delayed_turn_off"
ATTR_TIME_PERIOD = "time_period"
ATTR_NIGHT_LIGHT_MODE = "night_light_mode"
ATTR_AUTOMATIC_COLOR_TEMPERATURE = "automatic_color_temperature"
ATTR_REMINDER = "reminder"
ATTR_EYECARE_MODE = "eyecare_mode"
# Moonlight
ATTR_SLEEP_ASSISTANT = "sleep_assistant"
ATTR_SLEEP_OFF_TIME = "sleep_off_time"
ATTR_TOTAL_ASSISTANT_SLEEP_TIME = "total_assistant_sleep_time"
ATTR_BRAND_SLEEP = "brand_sleep"
ATTR_BRAND = "brand"
SERVICE_SET_SCENE = "xiaomi_miio_set_scene"
SERVICE_SET_DELAYED_TURN_OFF = "xiaomi_miio_set_delayed_turn_off"
SERVICE_REMINDER_ON = "xiaomi_miio_reminder_on"
SERVICE_REMINDER_OFF = "xiaomi_miio_reminder_off"
SERVICE_NIGHT_LIGHT_MODE_ON = "xiaomi_miio_night_light_mode_on"
SERVICE_NIGHT_LIGHT_MODE_OFF = "xiaomi_miio_night_light_mode_off"
SERVICE_EYECARE_MODE_ON = "xiaomi_miio_eyecare_mode_on"
SERVICE_EYECARE_MODE_OFF = "xiaomi_miio_eyecare_mode_off"
XIAOMI_MIIO_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_SET_SCENE = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_SCENE): vol.All(vol.Coerce(int), vol.Clamp(min=1, max=6))}
)
SERVICE_SCHEMA_SET_DELAYED_TURN_OFF = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_TIME_PERIOD): vol.All(cv.time_period, cv.positive_timedelta)}
)
SERVICE_TO_METHOD = {
SERVICE_SET_DELAYED_TURN_OFF: {
"method": "async_set_delayed_turn_off",
"schema": SERVICE_SCHEMA_SET_DELAYED_TURN_OFF,
},
SERVICE_SET_SCENE: {
"method": "async_set_scene",
"schema": SERVICE_SCHEMA_SET_SCENE,
},
SERVICE_REMINDER_ON: {"method": "async_reminder_on"},
SERVICE_REMINDER_OFF: {"method": "async_reminder_off"},
SERVICE_NIGHT_LIGHT_MODE_ON: {"method": "async_night_light_mode_on"},
SERVICE_NIGHT_LIGHT_MODE_OFF: {"method": "async_night_light_mode_off"},
SERVICE_EYECARE_MODE_ON: {"method": "async_eyecare_mode_on"},
SERVICE_EYECARE_MODE_OFF: {"method": "async_eyecare_mode_off"},
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the light from config."""
from miio import Device, DeviceException
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = miio_device.info()
model = device_info.model
unique_id = "{}-{}".format(model, device_info.mac_address)
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model == "philips.light.sread1":
from miio import PhilipsEyecare
light = PhilipsEyecare(host, token)
primary_device = XiaomiPhilipsEyecareLamp(name, light, model, unique_id)
devices.append(primary_device)
hass.data[DATA_KEY][host] = primary_device
secondary_device = XiaomiPhilipsEyecareLampAmbientLight(
name, light, model, unique_id
)
devices.append(secondary_device)
# The ambient light doesn't expose additional services.
# A hass.data[DATA_KEY] entry isn't needed.
elif model in ["philips.light.ceiling", "philips.light.zyceiling"]:
from miio import Ceil
light = Ceil(host, token)
device = XiaomiPhilipsCeilingLamp(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model == "philips.light.moonlight":
from miio import PhilipsMoonlight
light = PhilipsMoonlight(host, token)
device = XiaomiPhilipsMoonlightLamp(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in [
"philips.light.bulb",
"philips.light.candle",
"philips.light.candle2",
"philips.light.downlight",
]:
from miio import PhilipsBulb
light = PhilipsBulb(host, token)
device = XiaomiPhilipsBulb(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model == "philips.light.mono1":
from miio import PhilipsBulb
light = PhilipsBulb(host, token)
device = XiaomiPhilipsGenericLight(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/philipslight/issues "
"and provide the following data: %s",
model,
)
return False
async_add_entities(devices, update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on Xiaomi Philips Lights."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_devices = [
dev
for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids
]
else:
target_devices = hass.data[DATA_KEY].values()
update_tasks = []
for target_device in target_devices:
if not hasattr(target_device, method["method"]):
continue
await getattr(target_device, method["method"])(**params)
update_tasks.append(target_device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for xiaomi_miio_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[xiaomi_miio_service].get(
"schema", XIAOMI_MIIO_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, xiaomi_miio_service, async_service_handler, schema=schema
)
class XiaomiPhilipsAbstractLight(Light):
"""Representation of a Abstract Xiaomi Philips Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
self._name = name
self._light = light
self._model = model
self._unique_id = unique_id
self._brightness = None
self._available = False
self._state = None
self._state_attrs = {ATTR_MODEL: self._model}
@property
def should_poll(self):
"""Poll the light."""
return True
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a light command handling error messages."""
from miio import DeviceException
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from light: %s", result)
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._light.on)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command("Turning the light off failed.", self._light.off)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
class XiaomiPhilipsGenericLight(XiaomiPhilipsAbstractLight):
"""Representation of a Generic Xiaomi Philips Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update({ATTR_SCENE: None, ATTR_DELAYED_TURN_OFF: None})
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
async def async_set_scene(self, scene: int = 1):
"""Set the fixed scene."""
await self._try_command(
"Setting a fixed scene failed.", self._light.set_scene, scene
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._light.delay_off,
time_period.total_seconds(),
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(microsecond=0) + timedelta(seconds=countdown)
if previous is None:
return new
lower = timedelta(seconds=-DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
upper = timedelta(seconds=DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsBulb(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Bulb."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._color_temp = None
@property
def color_temp(self):
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 333
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: " "%s bri, %s cct",
self._light.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: " "%s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._light.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._light.on)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
@staticmethod
def translate(value, left_min, left_max, right_min, right_max):
"""Map a value from left span to right span."""
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span)
return int(right_min + (value_scaled * right_span))
class XiaomiPhilipsCeilingLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Ceiling Lamp."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update(
{ATTR_NIGHT_LIGHT_MODE: None, ATTR_AUTOMATIC_COLOR_TEMPERATURE: None}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 370
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_AUTOMATIC_COLOR_TEMPERATURE: state.automatic_color_temperature,
}
)
class XiaomiPhilipsEyecareLamp(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Eyecare Lamp 2."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update(
{ATTR_REMINDER: None, ATTR_NIGHT_LIGHT_MODE: None, ATTR_EYECARE_MODE: None}
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_REMINDER: state.reminder,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_EYECARE_MODE: state.eyecare,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._light.delay_off,
round(time_period.total_seconds() / 60),
)
async def async_reminder_on(self):
"""Enable the eye fatigue notification."""
await self._try_command(
"Turning on the reminder failed.", self._light.reminder_on
)
async def async_reminder_off(self):
"""Disable the eye fatigue notification."""
await self._try_command(
"Turning off the reminder failed.", self._light.reminder_off
)
async def async_night_light_mode_on(self):
"""Turn the smart night light mode on."""
await self._try_command(
"Turning on the smart night light mode failed.",
self._light.smart_night_light_on,
)
async def async_night_light_mode_off(self):
"""Turn the smart night light mode off."""
await self._try_command(
"Turning off the smart night light mode failed.",
self._light.smart_night_light_off,
)
async def async_eyecare_mode_on(self):
"""Turn the eyecare mode on."""
await self._try_command(
"Turning on the eyecare mode failed.", self._light.eyecare_on
)
async def async_eyecare_mode_off(self):
"""Turn the eyecare mode off."""
await self._try_command(
"Turning off the eyecare mode failed.", self._light.eyecare_off
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(second=0, microsecond=0) + timedelta(
minutes=countdown
)
if previous is None:
return new
lower = timedelta(minutes=-DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
upper = timedelta(minutes=DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsEyecareLampAmbientLight(XiaomiPhilipsAbstractLight):
"""Representation of a Xiaomi Philips Eyecare Lamp Ambient Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
name = "{} Ambient Light".format(name)
if unique_id is not None:
unique_id = "{}-{}".format(unique_id, "ambient")
super().__init__(name, light, model, unique_id)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness of the ambient light: %s %s%%",
brightness,
percent_brightness,
)
result = await self._try_command(
"Setting brightness of the ambient failed: %s",
self._light.set_ambient_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the ambient light on failed.", self._light.ambient_on
)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command(
"Turning the ambient light off failed.", self._light.ambient_off
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.ambient
self._brightness = ceil((255 / 100.0) * state.ambient_brightness)
class XiaomiPhilipsMoonlightLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Zhirui Bedside Lamp."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._hs_color = None
self._state_attrs.pop(ATTR_DELAYED_TURN_OFF)
self._state_attrs.update(
{
ATTR_SLEEP_ASSISTANT: None,
ATTR_SLEEP_OFF_TIME: None,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: None,
ATTR_BRAND_SLEEP: None,
ATTR_BRAND: None,
}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 588
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return self._hs_color
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
rgb = color.color_hs_to_RGB(*hs_color)
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
_LOGGER.debug(
"Setting brightness and color: " "%s %s%%, %s",
brightness,
percent_brightness,
rgb,
)
result = await self._try_command(
"Setting brightness and color failed: " "%s bri, %s color",
self._light.set_brightness_and_rgb,
percent_brightness,
rgb,
)
if result:
self._hs_color = hs_color
self._brightness = brightness
elif ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: " "%s bri, %s cct",
self._light.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_HS_COLOR in kwargs:
_LOGGER.debug("Setting color: %s", rgb)
result = await self._try_command(
"Setting color failed: %s", self._light.set_rgb, rgb
)
if result:
self._hs_color = hs_color
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: " "%s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._light.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._light.on)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
self._hs_color = color.color_RGB_to_hs(*state.rgb)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_SLEEP_ASSISTANT: state.sleep_assistant,
ATTR_SLEEP_OFF_TIME: state.sleep_off_time,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: state.total_assistant_sleep_time,
ATTR_BRAND_SLEEP: state.brand_sleep,
ATTR_BRAND: state.brand,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off. Unsupported."""
return
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"ReparameterizationType",
"FULLY_REPARAMETERIZED",
"NOT_REPARAMETERIZED",
"Distribution",
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape",
"batch_shape_tensor",
"cdf",
"covariance",
"cross_entropy",
"entropy",
"event_shape",
"event_shape_tensor",
"kl_divergence",
"log_cdf",
"log_prob",
"log_survival_function",
"mean",
"mode",
"prob",
"sample",
"stddev",
"survival_function",
"variance",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@tf_export("distributions.ReparameterizationType")
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
tf_export("distributions.FULLY_REPARAMETERIZED").export_constant(
__name__, "FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
tf_export("distributions.NOT_REPARAMETERIZED").export_constant(
__name__, "NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
@tf_export("distributions.Distribution")
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
if not name or name[-1] != "/": # `name` is not a name scope
non_unique_name = name or type(self).__name__
with ops.name_scope(non_unique_name) as name:
pass
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used:
# `parameters = dict(locals())`.
return {k: v for k, v in self._parameters.items()
if not k.startswith("__") and k != "self"}
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._event_shape())
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError:
return math_ops.log(self._prob(value, **kwargs))
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError:
return math_ops.exp(self._log_prob(value, **kwargs))
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError:
return math_ops.log(self._cdf(value, **kwargs))
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError:
return math_ops.exp(self._log_cdf(value, **kwargs))
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError:
return math_ops.log1p(-self.cdf(value, **kwargs))
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError:
return 1. - self.cdf(value, **kwargs)
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented")
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._quantile(value, **kwargs)
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError:
return math_ops.square(self._stddev())
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError:
return math_ops.sqrt(self._variance())
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tf.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tf.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
def __str__(self):
return ("tf.distributions.{type_name}("
"\"{self_name}\""
"{maybe_batch_shape}"
"{maybe_event_shape}"
", dtype={dtype})".format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(", batch_shape={}".format(self.batch_shape)
if self.batch_shape.ndims is not None
else ""),
maybe_event_shape=(", event_shape={}".format(self.event_shape)
if self.event_shape.ndims is not None
else ""),
dtype=self.dtype.name))
def __repr__(self):
return ("<tf.distributions.{type_name} "
"'{self_name}'"
" batch_shape={batch_shape}"
" event_shape={event_shape}"
" dtype={dtype}>".format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
event_shape=self.event_shape,
dtype=self.dtype.name))
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
|
# Copyright (c) 2014 Matthias Klumpp <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import shutil
import glob
import apt_pkg
import yaml
from argparse import ArgumentParser
from datetime import datetime, timedelta
from apt_pkg import version_compare
from sqlalchemy.sql import exists
from debile.utils.deb822 import Dsc
from debile.master.utils import init_master, session, emit
from debile.master.orm import (Person, Suite, Component, Arch, Check, Group,
GroupSuite, Source, Binary, Deb, Job, Result,
create_source, create_jobs)
from rapidumolib.pkginfo import PackageBuildInfoRetriever
from rapidumolib.config import RapidumoConfig
from rapidumolib.buildcheck import BuildCheck
NEEDSBUILD_EXPORT_DIR = "/srv/dak/export/needsbuild"
class ArchiveDebileBridge:
def __init__(self, config):
self._conf = RapidumoConfig()
self._affinity_preference = config["affinity_preference"]
self._archive_path = "%s/%s" % (self._conf.archive_config['path'], self._conf.distro_name)
self._pkginfo = PackageBuildInfoRetriever(self._conf)
self._bcheck = BuildCheck(self._conf)
def _create_debile_source(self, session, pkg):
user = session.query(Person).filter_by(email="[email protected]").one()
group_suite = session.query(GroupSuite).join(GroupSuite.group).join(GroupSuite.suite).filter(
Group.name == "default",
Suite.name == pkg.suite,
).one()
component = session.query(Component).filter(
Component.name == pkg.component
).one()
dsc_fname = "{root}/{directory}/{filename}".format(
root=self._archive_path,
directory=pkg.directory,
filename=pkg.dsc,
)
dsc = Dsc(open(dsc_fname))
if 'Build-Architecture-Indep' in dsc:
valid_affinities = dsc['Build-Architecture-Indep']
elif 'X-Build-Architecture-Indep' in dsc:
valid_affinities = dsc['X-Build-Architecture-Indep']
elif 'X-Arch-Indep-Build-Arch' in dsc:
valid_affinities = dsc['X-Arch-Indep-Build-Arch']
else:
valid_affinities = "any"
source = create_source(dsc, group_suite, component, user,
self._affinity_preference, valid_affinities)
source.directory = pkg.directory
source.dsc_filename = pkg.dsc
session.add(source)
for aname in pkg.installed_archs:
arch = session.query(Arch).filter_by(name=aname).one()
binary = Binary(source=source, arch=arch, uploaded_at=source.uploaded_at)
session.add(binary)
for name, arch, filename in pkg.binaries:
if arch == binary.arch.name:
directory, _, filename = filename.rpartition('/')
deb = Deb(binary=binary, directory=directory, filename=filename)
session.add(deb)
create_jobs(source, dose_report="No dose-builddebcheck report available yet.")
oldsources = session.query(Source).filter(
Source.group_suite == source.group_suite,
Source.name == source.name,
)
for oldsource in oldsources:
if version_compare(oldsource.version, source.version) >= 0:
continue
# Drop any old jobs that are still pending.
for job in oldsource.jobs:
if (job.check.build and not any(job.built_binaries)) or not any(job.results):
session.delete(job)
elif job.failed is None:
job.failed = any(result.failed for result in job.results)
job.builder = None
job.assigned_at = None
job.finished_at = None
# Actually remove jobs marked for deletion above.
session.commit()
# If after cleanup there is no build jobs left, remove the source completely
if not any(job.check.build for job in oldsource.jobs):
session.delete(oldsource)
print("Created source for %s %s" % (source.name, source.version))
emit('accept', 'source', source.debilize())
def _create_debile_binaries(self, session, source, pkg):
arch_all = session.query(Arch).filter(Arch.name == "all").one()
arches = session.query(Arch).filter(Arch.name.in_(pkg.installed_archs)).all()
if arch_all in source.arches and arch_all not in arches and source.affinity in arches:
if not session.query(exists().where((Job.source == source) &
(Job.arch == arch_all) &
Job.check.has(Check.build == True))).scalar():
# We have the arch:affinity binary but is still lacking the arch:all binary
# Make sure debile builds the arch:all binary separately
check = session.query(Check).filter(Check.build == True).one()
job = Job(check=check, arch=arch_all,
source=source, binary=None)
session.add(job)
for arch in arches:
if session.query(exists().where((Binary.source == source) & (Binary.arch == arch))).scalar():
continue
# Find the job for this binary
job = session.query(Job).join(Job.check).filter(
Job.source == source,
Job.arch == arch,
Check.build == True,
).first()
if not job and arch == arch_all and source.affinity in arches:
# The arch:all binary might have been created by the arch:affinity build job.
job = session.query(Job).join(Job.check).filter(
Job.source == source,
Job.arch == source.affinity,
Check.build == True,
).first()
if job and (not job.finished_at or job.failed is True):
# Dak accepted a binary upload that debile-master didn't ask for
if arch != arch_all and not any(job.built_binaries):
session.delete(job)
job = None
if job:
binary = job.new_binary(arch)
else:
binary = Binary(source=source, arch=arch, uploaded_at=datetime.utcnow())
session.add(binary)
for name, arch, filename in pkg.binaries:
if arch == binary.arch.name:
directory, _, filename = filename.rpartition('/')
deb = Deb(binary=binary, directory=directory, filename=filename)
session.add(deb)
print("Created binary for %s %s on %s" % (binary.name, binary.version, binary.arch))
emit('accept', 'binary', binary.debilize())
def _create_depwait_report(self, suite):
base_suite = self._conf.get_base_suite(suite)
components = self._conf.get_supported_components(base_suite).split(" ")
supported_archs = self._conf.get_supported_archs(base_suite).split(" ")
bcheck_data = {}
for component in components:
bcheck_data[component] = {}
for arch in supported_archs:
yaml_data = self._bcheck.get_package_states_yaml(suite, component, arch)
yaml_data = yaml_data.replace("%3a", ":") # Support for wheezy version of dose-builddebcheck
report_data = yaml.safe_load(yaml_data)['report']
if not report_data:
report_data = list()
bcheck_data[component][arch] = report_data
yaml_file = open("%s/depwait-%s-%s_%s.yml" % (NEEDSBUILD_EXPORT_DIR, suite, component, arch), "w")
yaml_file.write(yaml_data)
yaml_file.close()
return bcheck_data
def _get_package_depwait_report(self, bcheck_data, job):
arch = job.source.affinity if job.arch.name == "all" else job.arch
for nbpkg in bcheck_data[job.component.name][arch.name]:
if (nbpkg['package'] == ("src:" + job.source.name) and (nbpkg['version'] == job.source.version)):
return nbpkg
return None
def import_pkgs(self, suite):
pkg_dict = self._pkginfo.get_packages_dict(suite)
for pkg in pkg_dict.values():
try:
with session() as s:
source = s.query(Source).join(Source.group_suite).join(GroupSuite.group).join(GroupSuite.suite).filter(
Source.name == pkg.pkgname,
Source.version == pkg.version,
Group.name == "default",
Suite.name == pkg.suite,
).first()
if not source:
self._create_debile_source(s, pkg)
elif pkg.installed_archs:
self._create_debile_binaries(s, source, pkg)
except Exception as ex:
print("Skipping %s (%s) in %s due to error: %s" % (pkg.pkgname, pkg.version, pkg.suite, str(ex)))
def unblock_jobs(self, suite):
bcheck_data = self._create_depwait_report(suite)
with session() as s:
jobs = s.query(Job).join(Job.check).join(Job.source).join(Source.group_suite).join(GroupSuite.group).join(GroupSuite.suite).filter(
Group.name == "default",
Suite.name == suite,
Check.build == True,
(Job.dose_report != None) | ~Job.built_binaries.any()
)
for job in jobs:
try:
report = self._get_package_depwait_report(bcheck_data, job)
if report and report['status'] != "ok":
dose_report = "Unknown problem"
for reason in report["reasons"]:
if "missing" in reason:
dose_report = ("Unsat dependency %s" %
(reason["missing"]["pkg"]["unsat-dependency"]))
break
elif "conflict" in reason:
dose_report = ("Conflict between %s and %s" %
(reason["conflict"]["pkg1"]["package"],
reason["conflict"]["pkg2"]["package"]))
break
if job.dose_report != dose_report:
job.dose_report = dose_report
elif job.dose_report != None:
job.dose_report = None
print("Unblocked job %s (%s) %s" %
(job.source.name, job.source.version, job.name))
except Exception as ex:
print("Skipping %s (%s) %s due to error: %s" %
(job.source.name, job.source.version, job.name, str(ex)))
def prune_pkgs(self, suite):
base_suite = self._conf.get_base_suite(suite)
suites = [suite, base_suite] if suite != base_suite else [suite]
components = self._conf.get_supported_components(base_suite).split(" ")
pkg_list = []
for s in suites:
for c in components:
pkg_list += self._pkginfo._get_package_list(s, c)
pkgs = set()
pkgs.update(pkg.pkgname + " " + pkg.version for pkg in pkg_list)
with session() as s:
sources = s.query(Source).join(Source.group_suite).join(GroupSuite.group).join(GroupSuite.suite).filter(
Group.name == "default",
Suite.name == suite,
)
for source in sources:
if not (source.name + " " + source.version) in pkgs and not os.path.exists(source.dsc_path):
print("Removed obsolete source %s %s" % (source.name, source.version))
# Package no longer in the archive (neither in the index nor the pool)
s.delete(source)
def reschedule_jobs(self):
with session() as s:
cutoff = datetime.utcnow() - timedelta(days=1)
jobs = s.query(Job).filter(
Job.failed.is_(None),
Job.finished_at != None,
Job.finished_at < cutoff,
)
for job in jobs:
# Still missing the .dud a day after the builder told debile-master it had finished the job
print("Rescheduling %s in %s due to missing *.dud upload" % (str(job), str(job.group_suite)))
job.failed = None
job.builder = None
job.assigned_at = None
job.finished_at = None
cutoff = datetime.utcnow() - timedelta(days=7)
jobs = s.query(Job).join(Job.check).filter(
Check.build == True,
Job.failed.is_(False),
~Job.built_binaries.any(),
Job.finished_at != None,
Job.finished_at < cutoff,
)
for job in jobs:
# Still missing the .changes a week after the builder told debile-master it had finished the build job
print("Rescheduling %s in %s due to missing *.changes upload" % (str(job), str(job.group_suite)))
job.failed = None
job.builder = None
job.assigned_at = None
job.finished_at = None
def clean_results(self):
path = None
dirs = set()
with session() as s:
group = s.query(Group).filter_by(name="default").one()
path = group.files_path
dirs.update(x.directory for x in s.query(Result).join(Result.job).join(Job.source).join(Source.group_suite).filter(GroupSuite.group == group))
old_cwd = os.getcwd()
try:
os.chdir(path)
for dir in glob.iglob("*/*/*"):
if os.path.isdir(dir) and dir not in dirs:
# An orphaned results path, remove it
shutil.rmtree(dir)
print("Removed orphaned result dir %s" % dir)
finally:
os.chdir(old_cwd)
def main():
# init Apt, we need it later
apt_pkg.init()
parser = ArgumentParser(description="Debile Tanglu integration script")
actions = parser.add_argument_group("Actions")
actions.add_argument("--import", action="store_true", dest="import_pkgs",
help="Import new packages from Dak to Debile")
actions.add_argument("--unblock", action="store_true", dest="unblock_jobs",
help="Run dose and unblock jobs that are now buildable")
actions.add_argument("--prune", action="store_true", dest="prune_pkgs",
help="Prune packages no longer in Dak from Debile")
actions.add_argument("--reschedule", action="store_true", dest="reschedule_jobs",
help="Reschedule jobs where debile is still waiting for an upload")
actions.add_argument("--clean", action="store_true", dest="clean_results",
help="Remove unreferenced result directories")
parser.add_argument("--config", action="store", dest="config", default=None,
help="Path to the master.yaml config file.")
parser.add_argument("suites", action="store", nargs='*',
help="Suites to process.")
args = parser.parse_args()
config = init_master(args.config)
bridge = ArchiveDebileBridge(config)
if args.import_pkgs:
for suite in args.suites:
bridge.import_pkgs(suite)
if args.unblock_jobs:
for suite in args.suites:
bridge.unblock_jobs(suite)
if args.prune_pkgs:
for suite in args.suites:
bridge.prune_pkgs(suite)
if args.reschedule_jobs:
bridge.reschedule_jobs()
if args.clean_results:
bridge.clean_results()
if __name__ == '__main__':
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
main()
|
|
import datetime
import json
import operator
from functools import reduce
from pathlib import Path
from django.conf import settings
from django.core.cache import cache
from django.db import models, transaction
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django_hosts.resolvers import reverse
from django.utils.text import unescape_entities
from releases.models import Release
from . import utils
class DocumentReleaseManager(models.Manager):
def current(self, lang='en'):
current = self.get(is_default=True)
if lang != 'en':
try:
return self.get(lang=lang, release=current.release)
except DocumentRelease.DoesNotExist:
pass
return current
def current_version(self):
current_version = cache.get(DocumentRelease.DEFAULT_CACHE_KEY)
if not current_version:
try:
current_version = self.current().version
except DocumentRelease.DoesNotExist:
current_version = 'dev'
cache.set(
DocumentRelease.DEFAULT_CACHE_KEY,
current_version,
settings.CACHE_MIDDLEWARE_SECONDS,
)
return current_version
def get_by_version_and_lang(self, version, lang):
return self.get(lang=lang, **{'release__isnull': True} if version == 'dev' else {'release': version})
class DocumentRelease(models.Model):
"""
A "release" of documentation -- i.e. English for v1.2.
"""
DEFAULT_CACHE_KEY = "%s_docs_version" % settings.CACHE_MIDDLEWARE_KEY_PREFIX
lang = models.CharField(max_length=2, choices=settings.LANGUAGES, default='en')
release = models.ForeignKey(Release, null=True, limit_choices_to={'status': 'f'})
is_default = models.BooleanField(default=False)
objects = DocumentReleaseManager()
class Meta:
unique_together = ('lang', 'release')
def __str__(self):
return "%s/%s" % (self.lang, self.version)
def get_absolute_url(self):
kwargs = {
'lang': self.lang,
'version': self.version,
}
return reverse('document-index', host='docs', kwargs=kwargs)
def save(self, *args, **kwargs):
# There can be only one. Default, that is.
if self.is_default:
DocumentRelease.objects.update(is_default=False)
cache.set(
self.DEFAULT_CACHE_KEY,
self.version,
settings.CACHE_MIDDLEWARE_SECONDS,
)
super(DocumentRelease, self).save(*args, **kwargs)
@property
def version(self):
return 'dev' if self.release is None else self.release.version
@property
def human_version(self):
"""
Return a "human readable" version of the version.
"""
return "development" if self.release is None else self.release.version
@property
def is_dev(self):
return self.release is None
@property
def is_supported(self):
if self.release is None:
return True
latest_release = (Release.objects
.filter(major=self.release.major,
minor=self.release.minor,
status='f')
.order_by('-micro')
.first())
if latest_release is None:
return True
eol_date = latest_release.eol_date
return eol_date is None or eol_date > datetime.date.today()
@property
def scm_url(self):
url = 'git://github.com/django/django.git'
if not self.is_dev:
url += '@stable/' + self.version + '.x'
return url
@transaction.atomic
def sync_to_db(self, decoded_documents):
"""
Sync the given list of documents (decoded fjson files from sphinx) to
the database. Deletes all the release's documents first then
reinserts them as needed.
"""
self.documents.all().delete()
for document in decoded_documents:
if 'body' not in document or 'title' not in document:
# We don't care about indexing documents with no body or title
continue
Document.objects.create(
release=self,
path=_clean_document_path(document['current_page_name']),
title=unescape_entities(strip_tags(document['title'])),
)
def _clean_document_path(path):
# We have to be a bit careful to reverse-engineer the correct
# relative path component, especially for "index" documents,
# otherwise the search results will be incorrect.
if path.endswith('/index'):
path = path[:-6]
return path
def document_url(doc):
if doc.path:
kwargs = {
'lang': doc.release.lang,
'version': doc.release.version,
'url': doc.path,
}
return reverse('document-detail', host='docs', kwargs=kwargs)
else:
kwargs = {
'lang': doc.release.lang,
'version': doc.release.version,
}
return reverse('document-index', host='docs', kwargs=kwargs)
class DocumentManager(models.Manager):
def breadcrumbs(self, document):
# get an ascending list of parent paths except the root path ('.')
parent_paths = list(Path(document.path).parents)[:-1]
if parent_paths:
or_queries = [models.Q(path=str(path)) for path in parent_paths]
return (self.filter(reduce(operator.or_, or_queries))
.filter(release=document.release)
.exclude(pk=document.pk)
.order_by('path'))
else:
return self.none()
class Document(models.Model):
"""
An individual document. Used mainly as a hook point for the search.
"""
release = models.ForeignKey(DocumentRelease, related_name='documents')
path = models.CharField(max_length=500)
title = models.CharField(max_length=500)
objects = DocumentManager()
class Meta:
unique_together = ('release', 'path')
def __str__(self):
return "/".join([self.release.lang, self.release.version, self.path])
def get_absolute_url(self):
return document_url(self)
@cached_property
def root(self):
return utils.get_doc_root(self.release.lang, self.release.version)
@cached_property
def full_path(self):
return utils.get_doc_path(self.root, self.path)
@cached_property
def body(self):
"""The document's body"""
with open(str(self.full_path)) as fp:
doc = json.load(fp)
return doc['body']
|
|
import pp, os, sys, time, dill, operator
import readFromFile
from SA_LCP import SA_LCP
import Utils
from ArrRef import ArrRef
from ArrRef import eBits
#from transpose import transpose
#from transpose import blockTrans
from compS import compS
dill.settings['recurse'] = True
_BSIZE = 2048
_SCAN_LOG_BSIZE = 10
_SCAN_BSIZE = (1 << _SCAN_LOG_BSIZE)
_INT_MAX = sys.maxint
MAX_RADIX = 8
BUCKETS = 256
_TRANS_THRESHHOLD = 64
_F_BSIZE = (2*_SCAN_BSIZE)
_MERGE_BSIZE = 8192
def mod3iss1(i):
return (i%3 == 1)
def radixBlock(A, B, Tmp, counts, offsets, Boffset, n, m, extract):
for i in range(0,m):
counts[i] = 0
for j in range(0,n):
k = Tmp[j] = ArrRef(A[j]).eBitsExec(extract)
counts[k] += 1
s = Boffset
for i in range(0,m):
s += counts[i]
offsets[i] = s
for j in range(n-1,-1,-1):
offsets[Tmp[j]] = offsets[Tmp[j]] - 1
x = offsets[Tmp[j]]
B[x] = A[j]
def radixStepSerial(A, B, Tmp, buckets, n, m, extract):
radixBlock(A, B, Tmp, buckets, buckets, 0, n, m, extract)
for i in range(0,n):
A[i] = B[i]
return
###################
def parallel_rBlock(A, B, Tmp, m, extract, cnts, nn, oB, i):
od = i*nn
nni = min(max(n-od,0), nn)
radixBlock(A+od, B, Tmp+od, cnts + m*i, oB + m*i, od, nni, m, extract)
##################
#Se pudo lograr solo hacerlo hasta un tamano menor
def radixStep(A, B, Tmp, BK, numBK, n, m, top, extract, js):
expand = 32
blocks = min(numBK/3,(1+n/(BUCKETS*expand)))
if (blocks < 2):
radixStepSerial(A, B, Tmp, BK[0], n, m, extract)
return
'''
nn = (n + blocks - 1)/ blocks
cnts = BK
oA = (BK + blocks)
oB = (BK + 2*blocks)
jobs = [(i, js.submit(pBlocked_for, (_ss, i, _bsize, _ee, body, f, g,), (reduceSerial,))) for i in range(0,blocks)]
for i, job in jobs:
sums[i] = job()
return sums
'''
def radixLoopBottomUp(A, B, Tmp, BK, numBK, n, bits, top, f, js):
rounds = 1 + (bits - 1) / MAX_RADIX
rbits = 1+(bits-1)/rounds
bitOffset = 0
while(bitOffset < bits):
if (bitOffset+rbits > bits):
rbits = bits-bitOffset
radixStep(A, B, Tmp, BK, numBK, n, 1 << rbits, top, eBits(rbits,bitOffset,f), js)
bitOffset += rbits
###################
def parallel_rLoopTopD(n, offsets, y, i, A, B, Tmp, BK, bits, f, js):
segOffset = offsets[i]
segNextOffset = n if (i == BUCKETS-1) else offsets[i+1]
segLen = segNextOffset - segOffset
blocksOffset = (math.floor(segOffset * y)) + i + 1
blocksNextOffset = (math.floor(segNextOffset * y)) + i + 2
blockLen = blocksNextOffset - blocksOffset
radixLoopTopDown(A + segOffset, B + segOffset, Tmp + segOffset, BK + blocksOffset, blockLen, segLen, bits - MAX_RADIX, f, js)
#################
def radixLoopTopDown(A, B, Tmp, BK, numBK, n, bits, f, js):
if (n == 0):
return
if (bits <= MAX_RADIX):
radixStep(A, B, Tmp, BK, numBK, n, 1 << bits, True, eBits(bits,0,f), js)
elif(numBK >= BUCKETS+1):
radixStep(A, B, Tmp, BK, numBK, n, BUCKETS, True, eBits(MAX_RADIX,bits-MAX_RADIX,f), js)
offsets = BK[0]
remain = numBK - BUCKETS - 1
y = remain / n
jobs = [(i, js.submit(parallel_rLoopTopD, (n, offsets, y, i, A, B, Tmp, BK, bits, f, js), (radixStep,))) for i in range(0,BUCKETS)]
for i, job in jobs:
job()
else:
radixLoopBottomUp(A, B, Tmp, BK, numBK, n, bits, False, f, js)
def iSort(A, bucketOffsets, n, m, bottomUp, f, js):
bits = Utils.log2Up(m)
B = [0]*n
Tmp = [0]*n
numBK = 1 + n/(BUCKETS*8)
BK = [[0]*BUCKETS]*numBK
if (bottomUp):
radixLoopBottomUp(A, B, Tmp, BK, numBK, n, bits, True, f, js)
else:
radixLoopTopDown(A, B, Tmp, BK, numBK, n, bits, f, js)
#caso bucket sea no vacio
if (bucketOffsets != None):
#paralelizar
for i in range(0,m):
bucketOffsets[i] = n
for i in range(0, n-1):
v = f(ArrRef(A[i]))
vn = f(ArrRef(A[i+1]))
if (v != vn):
bucketOffsets[vn] = i + 1
bucketOffsets[f(ArrRef(A[0]))] = 0
scanIBack(bucketOffsets,bucketOffsets, m, min, n);
del B
del Tmp
del BK
#def iSort(A, bucketOffsets, n, m, f, js):
# iSort(A, bucketOffsets, n, m, False, f, js)
def iSortInic(A, n, m, f, js):
iSort(A, None, n, m, False, f, js)
def iSortBottomUp(A, n, m, f, js):
iSort(A, None, n, m, True, f, js)
def radixSortPair(A, n, m, js):
iSortInic(A, n, m, ArrRef.getF, js)
def reduceSerial(s, e, f, g):
r = g(s)
for j in range(s+1,e):
r = f(r,g(j))
return r
#########
def pBlocked_for(_ss, _i, _bsize, _ee, body, f, g):
_s = _ss + _i * (_bsize)
_e = min(_s + (_bsize), _ee)
return body(_s, _e, f, g)
########
def nblocks(n, bsize):
return (1 + ((n) - 1)/(bsize))
def blocked_for(_s, _e, _bsize, body, f, g, js):
_ss = _s
_ee = _e
_n = _ee - _ss
_l = nblocks(_n, _bsize)
sums = [0]*_l
#paralelizar
jobs = [(i, js.submit(pBlocked_for, (_ss, i, _bsize, _ee, body, f, g,), (reduceSerial,))) for i in range(0,_l)]
for i, job in jobs:
sums[i] = job()
return sums
def reduce(s, e, f, g, js):
l = nblocks(e-s, _SCAN_BSIZE)
if (l <= 1):
return reduceSerial(s, e, f, g)
Sums = blocked_for(s, e, _SCAN_BSIZE, reduceSerial, f, g, js)
r = reduce(0, l, f, ArrRef(Sums).get, js)
del Sums
return r
def reduceInit(A, n, f, js):
return reduce(0, n, f, ArrRef(A).get, js)
############
def fillSS(A,i):
return (ord(A[i]) + 1)
############
def fillCBig(s, j, bits):
return [(s[j] << 2*bits) + (s[j+1] << bits) + s[j+2], j]
############
def fillC(s, j):
return [s[j + 2], j]
############
def fillCFirst(s, i):
return s[i]
##################
# i = i, j = i - 1
def fillName12(s, i, j):
if ((s[i] != s[j]) or (s[i + 1] != s[j + 1]) or (s[i+2] != s[j + 2])):
return 1
else:
return 0
##################
def scanSerial(Out, s, e, f, g, zero, inclusive, back, js):
r = zero
if (inclusive):
if (back):
for i in range(e-1,s-1, -1):
Our[i] = r = f(r,g(i))
else:
for i in range(s, e):
Out[i] = r = f(r,g(i))
else:
if (back):
for i in range(e-1, s-1, -1):
t = g(i)
Out[i] = r
r = f(r,t)
else:
for i in range(s, e):
t = g(i)
Out[i] = r
r = f(r,t)
return r
def scan(Out, s, e, f, g, zero, inclusive, back, js):
n = e - s
l = nblocks(n, _SCAN_BSIZE)
if (l <= 2):
return scanSerial(Out, s, e, f, g, zero, inclusive, back, js)
#para blockes mayores a 2
# Sums = [0]*l
# Sums = blocked_for(s, e, _SCAN_BSIZE, reduceSerial, f, g, js)
# total = scan(Sums, 0, l, f, ArrRef(Sums).get, zero, False, back, js)
def scanI(In, Out, n, f, zero, js):
return scan(Out, 0, n, f, ArrRef(In).get, zero, True, False, js)
##############################
def fillS12(name12, i):
return name12[i]
######################
def fillSA12(i, SA12, n1):
l = SA12[i]
if ((l < n1)):
return 3 * l + 1
else:
return 3 * (l - n1) + 2
#####################
def fillRank(i):
return i + 2
###################
def fillFl(p, In, i):
return p(In[i])
####################
def packSerial(Out, Fl, s, e, f, js):
k = 0
for i in range(s, e):
if (Fl[i]):
Out[k] = f(i)
k = k + 1
return k
def pack(Out, Fl, s, e, f, js):
l = nblocks(e - s, _F_BSIZE)
if ( l <= 1):
return packSerial(Out, Fl, s, e, f, js)
def packInic(In, Out, Fl, n, js):
return pack(Out, Fl, 0, n, ArrRef(In).get, js)
def filterI(In, Out, n, p, js):
Fl = [False]*n
jobs = [(i, js.submit(fillFl,(p, In, i),)) for i in range(0,n)]
for i, job in jobs:
Fl[i] = bool(job())
m = packInic(In, Out, Fl, n, js)
del Fl
return m
########################
def fillD(i, s, s0):
return [s[s0[i] - 1], s0[i] - 1]
######################
def fillSA0(D, i):
return D[i][1]
#######################
def binSearch(S, n, v, f):
T = S #apunta al comienzo de S
pT = 0
while (n > 0):
mid = n/2
if (f(v, T[mid])):
n = mid
else:
n = (n - mid) - 1
pT = pT + mid + 1
return pT
#merge(SA0+o, n0-o, SA12+1-o, n12+o-1, SA, comp);
#merge(SA0, n0-o, SA12, n12+o-1, SA, comp.comp,o, js)
def merge(S1, l1, S2, l2, R, f, o, js):
lr = l1 + l2
if ( lr > _MERGE_BSIZE):
if (l2 > l1):
merge(S2, l2, S1, l1, R, f, js)
else:
m1 = l1/2
m2 = binSearch(S2, l2, S1[m1], f)
#paralelizar despues!
merge(S1, m1, S2, m2, R, f, js)
merge(S1, l1 - m1, S2, l2 - m2, R, f)
else:
#Son punteros, en python seian los indices en las listas
pR = 0 #inicio de R
pS1 = 0 + o # inicio de S1
pS2 = 0 + 1 - o #inicio de S2
eS1 = pS1 + l1 # corrido en l1
eS2 = pS2 + l2 #corrido en l2
while (True):
if (pS1 == eS1):
R[pR:pR] = S2[pS2:eS2]
break
if (pS2 == eS2):
R[pR:pR] = S1[pS1:eS1]
break
if f(S2[pS2],S1[pS1]):
R[pR] = S2[pS2]
pS2 = pS2 + 1
pR = pR + 1
else:
R[pR] = S1[pS1]
pS1 = pS1 + 1
pR = pR + 1
def suffixArrayRec(s, n, K, js):
n = n + 1
n0 = (n + 2)/3
n1 = (n + 1)/3
n12 = n - n0
C = [0]*n12
bits = Utils.log2Up(K)
if (bits < 11):
jobs = [(i, js.submit(fillCBig,(s, 1 + (i + i + i)/2,bits),)) for i in range(0,n12)]
for i, job in jobs:
C[i] = job()
#iniciar radixSort
radixSortPair(C, n12, 1 << 3*bits, js)
else:
jobs = [(i, js.submit(fillC,(s,1 + (i + i + i)/2))) for i in range(0,n12)]
for i, job in jobs:
C[i] = job()
radixSortPair(C, n12, K, js)
jobs = [(i, js.submit(fillCFirst,(s, C[i][1] + 1),)) for i in range(0,n12)]
for i, job in jobs:
C[i][0] = job()
radixSortPair(C, n12, K, js)
jobs = [(i, js.submit(fillCFirst,(s, C[i][1]),)) for i in range(0,n12)]
for i, job in jobs:
C[i][0] = job()
radixSortPair(C, n12, K, js)
sorted12 = [0]*n12
for i in range(0,n12):
sorted12[i] = C[i][1]
del C
name12 = [0]*n12
jobs = [(i, js.submit(fillName12,(s, sorted12[i], sorted12[i-1]),)) for i in range(1,n12)]
for i, job in jobs:
name12[i] = job()
name12[0] = 1
###listo hacia arriba
scanI(name12, name12, n12, operator.__add__, 0, js)
names = name12[n12-1]
LCP12 = None
SA12_LCP = None
SA12 = None
if (names < n12):
s12 = [0]*(n12 + 3)
s12[n12] = s12[n12 + 1] = s12[n12 + 2] = 0
jobs = [(i, js.submit(fillS12,(name12, i),)) for i in range(0,n12)]
for i, job in jobs:
if (sorted12[i] % 3 == 1):
s12[sorted12[i] / 3] = job()
else:
s12[sorted12[i] / 3 + n1] = job()
del name12
del sorted12
SA12_LCP = suffixArrayRec(s12, n12, names+1, js)
SA12 = SA12_LCP
del s12
jobs = [(i, js.submit(fillSA12,(i, SA12, n1),)) for i in range(0,n12)]
for i, job in jobs:
SA12[i] = job()
else:
del name12
SA12 = sorted12
rank = [0]*(n + 2)
rank[n] = 1
rank[n + 1] = 0
jobs = [(i, js.submit(fillRank,(i,),)) for i in range(0,n12)]
for i, job in jobs:
rank[SA12[i]] = job()
s0 = [0]*n0
x = filterI(SA12, s0, n12, mod3iss1, js)
D = [0]*n0
D[0] = [s[n - 1], n - 1]
jobs = [(i, js.submit(fillD,(i, s, s0),)) for i in range(0,x)]
for i, job in jobs:
D[i + n0 - x] = job()
radixSortPair(D, n0, K, js)
SA0 = s0
jobs = [(i, js.submit(fillSA0,(D, i),)) for i in range(0,n0)]
for i, job in jobs:
SA0[i] = job()
del D
comp = compS(s, rank)
SA = [0]*n
o = 1 if (n%3 == 1) else 0
SA12.extend([-1])
SA0.extend([-1])
merge(SA0, n0-o, SA12, n12+o-1, SA, comp.comp,o, js)
SA12 = SA12[:-1]
SA0 = SA0[:-1]
del SA0
del SA12
del rank
return SA
def suffixArray(sa_lcp, js):
n = sa_lcp.N
sa_lcp.SS = [0]*(n + 3)
sa_lcp.SS[n] = sa_lcp.SS[n+1] = sa_lcp.SS[n+2] = 0
jobs = [(inp, js.submit(fillSS,(sa_lcp.S,inp), )) for inp in range(0,n)]
for i, job in jobs:
sa_lcp.SS[i] = job()
#Reduce para obtener k
k = 1 + reduceInit(sa_lcp.SS, sa_lcp.N, max, js)
SA_LCP = suffixArrayRec(sa_lcp.SS, sa_lcp.N, k, js)
return SA_LCP
fileName = ""
if len(sys.argv) < 3:
sys.exit('Usage: %s input-file processors' % sys.argv[0])
if not os.path.exists(sys.argv[1]):
sys.exit('ERROR: input-file %s was not found!' % sys.argv[1])
else:
processors = 1
if len(sys.argv) == 3:
processors = int(sys.argv[2])
fileName = str(sys.argv[1])
ppservers = ()
job_server = pp.Server(processors, ppservers=ppservers)
print (job_server.get_ncpus(), " workers\n")
(S,n) = readFromFile.read(fileName)
# 10 veces
times = []
N = 10
for rep in range(0,N):
time1 = time.clock()
sa_lcp = SA_LCP(S,n)
SA = suffixArray(sa_lcp, job_server)
print("repetition # ", rep+1)
print("time: ",time.clock()-time1)
print("SA: ", SA)
times.append(time.clock()-time1)
prom = sum(times)/N
print("average: ", prom)
|
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import re
import textwrap
import marshal
from pkg_resources import get_build_platform, Distribution
from setuptools.extension import Library
from setuptools import Command
from .._path import ensure_directory
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
"""
for base, dirs, files in os.walk(dir):
dirs.sort()
files.sort()
yield base, dirs, files
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, importlib.util
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
spec = importlib.util.spec_from_file_location(__name__,__file__)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s", self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self): # noqa: C901 # is too complex (14) # FIXME
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s", native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
path = os.path.join(base, name)
if name.endswith('.py'):
log.debug("Deleting %s", path)
os.unlink(path)
if base.endswith('__pycache__'):
path_old = path
pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
m = re.match(pattern, name)
path_new = os.path.join(
base, os.pardir, m.group('name') + '.pyc')
log.info(
"Renaming file from [%s] to [%s]"
% (path_old, path_new))
try:
os.remove(path_new)
except OSError:
pass
os.rename(path_old, path_new)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
return 'w'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in sorted_walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = sorted_walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 7):
skip = 12 # skip magic & date & file size
else:
skip = 16 # skip magic & reserved? & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, str):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'", p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in sorted_walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in sorted_walk(base_dir):
visit(None, dirname, files)
return zip_filename
|
|
# LXC Python Library
# for compatibility with LXC 0.8 and 0.9
# on Ubuntu 12.04/12.10/13.04
# Author: Elie Deloumeau
# Contact: [email protected]
# The MIT License (MIT)
# Copyright (c) 2013 Elie Deloumeau
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
sys.path.append('../')
from lxclite import exists, stopped, ContainerDoesntExists
import os
import platform
import re
import subprocess
import time
from io import StringIO
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
import configparser
except ImportError:
import ConfigParser as configparser
class CalledProcessError(Exception):
pass
cgroup = {}
cgroup['type'] = 'lxc.network.type'
cgroup['link'] = 'lxc.network.link'
cgroup['flags'] = 'lxc.network.flags'
cgroup['hwaddr'] = 'lxc.network.hwaddr'
cgroup['rootfs'] = 'lxc.rootfs'
cgroup['utsname'] = 'lxc.utsname'
cgroup['arch'] = 'lxc.arch'
cgroup['ipv4'] = 'lxc.network.ipv4'
cgroup['memlimit'] = 'lxc.cgroup.memory.limit_in_bytes'
cgroup['swlimit'] = 'lxc.cgroup.memory.memsw.limit_in_bytes'
cgroup['cpus'] = 'lxc.cgroup.cpuset.cpus'
cgroup['shares'] = 'lxc.cgroup.cpu.shares'
cgroup['deny'] = 'lxc.cgroup.devices.deny'
cgroup['allow'] = 'lxc.cgroup.devices.allow'
def FakeSection(fp):
content = u"[DEFAULT]\n%s" % fp.read()
return StringIO(content)
def DelSection(filename=None):
if filename:
load = open(filename, 'r')
read = load.readlines()
load.close()
i = 0
while i < len(read):
if '[DEFAULT]' in read[i]:
del read[i]
break
load = open(filename, 'w')
load.writelines(read)
load.close()
def file_exist(filename):
'''
checks if a given file exist or not
'''
try:
with open(filename) as f:
f.close()
return True
except IOError:
return False
def ls_auto():
'''
returns a list of autostart containers
'''
try:
auto_list = os.listdir('/etc/lxc/auto/')
except OSError:
auto_list = []
return auto_list
def memory_usage(name):
'''
returns memory usage in MB
'''
if not exists(name):
raise ContainerDoesntExists(
"The container (%s) does not exist!" % name)
if name in stopped():
return 0
cmd = ['lxc-cgroup -n %s memory.usage_in_bytes' % name]
try:
out = subprocess.check_output(cmd, shell=True,
universal_newlines=True).splitlines()
except:
return 0
return int(out[0])/1024/1024
def host_memory_usage():
'''
returns a dict of host memory usage values
{'percent': int((used/total)*100),
'percent_cached':int((cached/total)*100),
'used': int(used/1024),
'total': int(total/1024)}
'''
out = open('/proc/meminfo')
for line in out:
if 'MemTotal:' == line.split()[0]:
split = line.split()
total = float(split[1])
if 'MemFree:' == line.split()[0]:
split = line.split()
free = float(split[1])
if 'Buffers:' == line.split()[0]:
split = line.split()
buffers = float(split[1])
if 'Cached:' == line.split()[0]:
split = line.split()
cached = float(split[1])
out.close()
used = (total - (free + buffers + cached))
return {'percent': int((used/total)*100),
'percent_cached': int(((cached)/total)*100),
'used': int(used/1024),
'total': int(total/1024)}
def host_cpu_percent():
'''
returns CPU usage in percent
'''
f = open('/proc/stat', 'r')
line = f.readlines()[0]
data = line.split()
previdle = float(data[4])
prevtotal = float(data[1]) + float(data[2]) + \
float(data[3]) + float(data[4])
f.close()
time.sleep(0.1)
f = open('/proc/stat', 'r')
line = f.readlines()[0]
data = line.split()
idle = float(data[4])
total = float(data[1]) + float(data[2]) + float(data[3]) + float(data[4])
f.close()
intervaltotal = total - prevtotal
percent = 100 * (intervaltotal - (idle - previdle)) / intervaltotal
return str('%.1f' % percent)
def host_disk_usage(partition=None):
'''
returns a dict of disk usage values
{'total': usage[1],
'used': usage[2],
'free': usage[3],
'percent': usage[4]}
'''
if not partition:
partition = '/'
usage = subprocess.check_output(['df -h %s' % partition],
universal_newlines=True,
shell=True).split('\n')[1].split()
return {'total': usage[1],
'used': usage[2],
'free': usage[3],
'percent': usage[4]}
def host_uptime():
'''
returns a dict of the system uptime
{'day': days,
'time': '%d:%02d' % (hours,minutes)}
'''
f = open('/proc/uptime')
uptime = int(f.readlines()[0].split('.')[0])
minutes = uptime / 60 % 60
hours = uptime / 60 / 60 % 24
days = uptime / 60 / 60 / 24
f.close()
return {'day': days,
'time': '%d:%02d' % (hours, minutes)}
def check_ubuntu():
'''
return the System version
'''
dist = '%s %s' % (platform.linux_distribution()[0],
platform.linux_distribution()[1])
return dist
def get_templates_list():
'''
returns a sorted lxc templates list
'''
templates = []
path = None
try:
path = os.listdir('/usr/share/lxc/templates')
except:
path = os.listdir('/usr/lib/lxc/templates')
if path:
for line in path:
templates.append(line.replace('lxc-', ''))
return sorted(templates)
def check_version():
'''
returns latest LWP version (dict with current and latest)
'''
f = open('version')
current = float(f.read())
f.close()
latest = float(urlopen('http://lxc-webpanel.github.com/version').read())
return {'current': current,
'latest': latest}
def get_net_settings():
'''
returns a dict of all known settings for LXC networking
'''
filename = '/etc/default/lxc-net'
if not file_exist(filename):
filename = '/etc/default/lxc'
if not file_exist(filename):
return False
config = configparser.SafeConfigParser()
cfg = {}
config.readfp(FakeSection(open(filename)))
cfg['use'] = config.get('DEFAULT', 'USE_LXC_BRIDGE').strip('"')
cfg['bridge'] = config.get('DEFAULT', 'LXC_BRIDGE').strip('"')
cfg['address'] = config.get('DEFAULT', 'LXC_ADDR').strip('"')
cfg['netmask'] = config.get('DEFAULT', 'LXC_NETMASK').strip('"')
cfg['network'] = config.get('DEFAULT', 'LXC_NETWORK').strip('"')
cfg['range'] = config.get('DEFAULT', 'LXC_DHCP_RANGE').strip('"')
cfg['max'] = config.get('DEFAULT', 'LXC_DHCP_MAX').strip('"')
return cfg
def get_container_settings(name):
'''
returns a dict of all utils settings for a container
'''
if os.geteuid():
filename = os.path.expanduser('~/.local/share/lxc/%s/config' % name)
else:
filename = '/var/lib/lxc/%s/config' % name
if not file_exist(filename):
return False
config = configparser.SafeConfigParser()
cfg = {}
config.readfp(FakeSection(open(filename)))
try:
cfg['type'] = config.get('DEFAULT', cgroup['type'])
except configparser.NoOptionError:
cfg['type'] = ''
try:
cfg['link'] = config.get('DEFAULT', cgroup['link'])
except configparser.NoOptionError:
cfg['link'] = ''
try:
cfg['flags'] = config.get('DEFAULT', cgroup['flags'])
except configparser.NoOptionError:
cfg['flags'] = ''
try:
cfg['hwaddr'] = config.get('DEFAULT', cgroup['hwaddr'])
except configparser.NoOptionError:
cfg['hwaddr'] = ''
try:
cfg['rootfs'] = config.get('DEFAULT', cgroup['rootfs'])
except configparser.NoOptionError:
cfg['rootfs'] = ''
try:
cfg['utsname'] = config.get('DEFAULT', cgroup['utsname'])
except configparser.NoOptionError:
cfg['utsname'] = ''
try:
cfg['arch'] = config.get('DEFAULT', cgroup['arch'])
except configparser.NoOptionError:
cfg['arch'] = ''
try:
cfg['ipv4'] = config.get('DEFAULT', cgroup['ipv4'])
except configparser.NoOptionError:
cfg['ipv4'] = ''
try:
cfg['memlimit'] = re.sub(r'[a-zA-Z]', '',
config.get('DEFAULT', cgroup['memlimit']))
except configparser.NoOptionError:
cfg['memlimit'] = ''
try:
cfg['swlimit'] = re.sub(r'[a-zA-Z]', '',
config.get('DEFAULT', cgroup['swlimit']))
except configparser.NoOptionError:
cfg['swlimit'] = ''
try:
cfg['cpus'] = config.get('DEFAULT', cgroup['cpus'])
except configparser.NoOptionError:
cfg['cpus'] = ''
try:
cfg['shares'] = config.get('DEFAULT', cgroup['shares'])
except configparser.NoOptionError:
cfg['shares'] = ''
if '%s.conf' % name in ls_auto():
cfg['auto'] = True
else:
cfg['auto'] = False
return cfg
def push_net_value(key, value, filename='/etc/default/lxc'):
'''
replace a var in the lxc-net config file
'''
if filename:
config = configparser.RawConfigParser()
config.readfp(FakeSection(open(filename)))
if not value:
config.remove_option('DEFAULT', key)
else:
config.set('DEFAULT', key, value)
with open(filename, 'wb') as configfile:
config.write(configfile)
DelSection(filename=filename)
load = open(filename, 'r')
read = load.readlines()
load.close()
i = 0
while i < len(read):
if ' = ' in read[i]:
split = read[i].split(' = ')
split[1] = split[1].strip('\n')
if '\"' in split[1]:
read[i] = '%s=%s\n' % (split[0].upper(), split[1])
else:
read[i] = '%s=\"%s\"\n' % (split[0].upper(), split[1])
i += 1
load = open(filename, 'w')
load.writelines(read)
load.close()
def push_config_value(key, value, container=None):
'''
replace a var in a container config file
'''
def save_cgroup_devices(filename=None):
'''
returns multiple values (lxc.cgroup.devices.deny and
lxc.cgroup.devices.allow) in a list because configparser cannot
make this...
'''
if filename:
values = []
i = 0
load = open(filename, 'r')
read = load.readlines()
load.close()
while i < len(read):
if not read[i].startswith('#') and \
re.match('lxc.cgroup.devices.deny|'
'lxc.cgroup.devices.allow', read[i]):
values.append(read[i])
i += 1
return values
if container:
if os.geteuid():
filename = os.path.expanduser('~/.local/share/lxc/%s/config' %
container)
else:
filename = '/var/lib/lxc/%s/config' % container
save = save_cgroup_devices(filename=filename)
config = configparser.RawConfigParser()
config.readfp(FakeSection(open(filename)))
if not value:
config.remove_option('DEFAULT', key)
elif key == cgroup['memlimit'] or key == cgroup['swlimit'] \
and value is not False:
config.set('DEFAULT', key, '%sM' % value)
else:
config.set('DEFAULT', key, value)
# Bugfix (can't duplicate keys with config parser)
if config.has_option('DEFAULT', cgroup['deny']) or \
config.has_option('DEFAULT', cgroup['allow']):
config.remove_option('DEFAULT', cgroup['deny'])
config.remove_option('DEFAULT', cgroup['allow'])
with open(filename, 'wb') as configfile:
config.write(configfile)
DelSection(filename=filename)
with open(filename, "a") as configfile:
configfile.writelines(save)
def net_restart():
'''
restarts LXC networking
'''
cmd = ['/usr/sbin/service lxc-net restart']
try:
subprocess.check_call(cmd, shell=True)
return 0
except CalledProcessError:
return 1
|
|
from group import Group
import itertools
def start(force_algo, debug=False):
create_groups(force_algo, debug)
if debug:
for group in force_algo.groups:
print group
find_special_groups(force_algo, debug)
force_algo.groups = sorted(force_algo.groups, cmp=group_compare)
find_special_pins(force_algo, debug)
if debug:
for group in force_algo.groups:
print group
find_neighbors(force_algo, debug)
if debug:
for group in force_algo.groups:
print group
for group in force_algo.groups:
group.is_bias = is_bias_group(group, force_algo)
def group_compare(x, y):
'''
Sort groups by their group_id
groups on the low level with long IDs came first
'''
return len(y.group_id) - len(x.group_id)
def find_special_groups(forceOptimizer, debug):
new_groups = []
new_level = 0
for group in forceOptimizer.groups:
if len(group.blocks):
bias_blocks = []
in_blocks = []
for block in group.blocks:
for pin in block.pins.values():
if pin.net.startswith("vbias"):
bias_blocks.append(block)
break
if pin.net.startswith("in"):
in_blocks.append(block)
'''
if len(bias_blocks) < len(group.blocks) and len(bias_blocks):
new_group_id = group.group_id[:]
new_group_id.append(len(group.childs))
new_group = Group(new_group_id)
if new_group not in new_groups:
new_level = len(new_group_id)
new_groups.append(new_group)
new_group.parent = group
group.childs.append(new_group)
new_group.is_bias_connected = True
for block in bias_blocks:
block.groups = new_group_id
group.blocks.remove(block)
new_group.blocks.add(block)
if len(in_blocks) and len(in_blocks)< len(group.blocks):
new_group_id = group.group_id[:]
new_group_id.append(len(group.childs))
new_group = Group(new_group_id)
if new_group not in new_groups:
new_level = len(new_group_id)
new_groups.append(new_group)
new_group.parent = group
group.childs.append(new_group)
for block in in_blocks:
block.groups = new_group_id
group.blocks.remove(block)
new_group.blocks.add(block)
'''
if len(bias_blocks) < len(group.blocks) and len(bias_blocks):
new_group_id = group.parent.group_id[:]
new_group_id.append(len(group.parent.childs))
new_group = Group(new_group_id)
if new_group not in new_groups:
new_groups.append(new_group)
new_group.parent = group.parent
group.parent.childs.append(new_group)
new_group.is_bias_connected = True
for block in bias_blocks:
block.groups = new_group_id
group.blocks.remove(block)
new_group.blocks.add(block)
if len(in_blocks) and len(in_blocks)< len(group.blocks):
new_group_id = group.parent.group_id[:]
new_group_id.append(len(group.parent.childs))
new_group = Group(new_group_id)
if new_group not in new_groups:
new_groups.append(new_group)
new_group.parent = group.parent
group.parent.childs.append(new_group)
new_group.is_bias_connected = True
for block in in_blocks:
block.groups = new_group_id
group.blocks.remove(block)
new_group.blocks.add(block)
if new_level:
for group in forceOptimizer.groups:
if len(group.group_id) == new_level - 1:
if len(group.blocks):
new_group_id = group.group_id[:]
new_group_id.append(len(group.childs))
new_group = Group(new_group_id)
if new_group not in new_groups:
new_groups.append(new_group)
new_group.parent = group
group.childs.append(new_group)
blocks = list(group.blocks)
for block in blocks:
print block.name
block.groups = new_group_id[:]
group.blocks.remove(block)
new_group.blocks.add(block)
for group in new_groups:
forceOptimizer.groups.append(group)
def is_bias_group(group, forceOptimizer):
if len(group.blocks):
vbias_net = set()
for block in group.blocks:
for pin in block.pins.values():
if pin.net.lower() in forceOptimizer.bias_nets:
vbias_net.add(pin.net.lower())
if vbias_net == forceOptimizer.bias_nets:
return True
else:
return False
else:
return False
def find_special_pins (forceOptimizer, debug):
for group in forceOptimizer.groups:
group.connected_gnd = 0
group.connected_vcc = 0
group.connected_out = 0
group.connected_inp = 0
for block in group.blocks:
#check the connection to important pins
for p in block.pins.values():
for pin in forceOptimizer.pins_south:
if p.net.lower().startswith(pin):
group.connected_gnd += 1
group.block_south.add(block)
for pin in forceOptimizer.pins_north:
if p.net.lower().startswith(pin):
group.connected_vcc += 1
group.block_north.add(block)
for pin in forceOptimizer.pins_east:
if p.net.lower().startswith(pin):
group.connected_out += 1
group.block_east.add(block)
for pin in forceOptimizer.pins_west:
if p.net.lower().startswith(pin):
group.block_west.add(block)
if p.net.lower().startswith("in"):
group.connected_inp #+= 1
if group.connected_out > 0:
group.connected_parent_east += group.connected_out
if group.connected_gnd > 0:
group.connected_parent_south += group.connected_gnd
if group.connected_vcc > 0:
group.connected_parent_north += group.connected_vcc
for child in group.childs:
group.connected_gnd += child.connected_gnd
group.connected_vcc += child.connected_vcc
group.connected_out += child.connected_out
group.connected_inp += child.connected_inp
group.connected_parent_east = group.connected_out
group.connected_parent_south = group.connected_gnd
group.connected_parent_north = group.connected_vcc
def create_groups(forceOptimizer, debug=False):
'''
DESCRIPTION: Create the groups and add them to the list: groups
STATE: finish
'''
if debug:
print ""
print "============="
print "Create Groups"
print "============="
print ""
#go through all blocks in circuit
for block in forceOptimizer.blocks:
group_id = block.groups # the lowest group get all IDs
if debug:
pins = ""
for p in block.pins.values():
pins += " " + p.net
print "Block: ", block.name, " Group ID", group_id, "Pins:", pins
group = search_group(group_id,forceOptimizer) # check if the group allready exists
if group is None: # create a new group if needed
if debug:
print ("Create a new Group with ID", group_id)
group = Group(group_id)
forceOptimizer.groups.append(group)
if block.name.startswith('i'):
group.block_north.add(block);
group.block_west.add(block)
#add the block to the low level group
group.add_block(block)
#if group has parents create them
if len(group.group_id) > 1:
create_parent(group, forceOptimizer, debug)
#else add group to main group
else:
forceOptimizer.group_main.add_child(group)
group.parent = forceOptimizer.group_main
def search_group(group_id, forceOptimizer):
'''
PARAMETER: group_ids is an array with the IDs of the parent Groups and the ID of the searched group
return the group if it exists, else None
STATE: not finish
'''
for group in forceOptimizer.groups:
if group.group_id == group_id:
return group
return None
def create_parent(child, forceOptimizer, debug):
'''
DESCRIPTION: builds recursive the parents of the groupe, which containts the block
when the algo reached the last parent, it will add them to the main group
PARAMETER: child The group which need a parent
STATE: finish
'''
if debug:
print "create parent for child:", child.group_id
group_id = child.group_id[:len(child.group_id) - 1] # remove the last ID
if debug:
print "parents ID:", group_id
group = search_group(group_id,forceOptimizer) # check if the group allready exists
if group is None: # create a new group if needed
group = Group(group_id)
forceOptimizer.groups.append(group)
if debug:
print "Parent not exist, create a new Group"
group.add_child(child)
child.parent = group
#check the connection to important pins
group.connected_gnd = 0
group.connected_vcc = 0
group.connected_out = 0
group.connected_inp = 0
for c in group.childs:
group.connected_gnd += c.connected_gnd
group.connected_vcc += c.connected_vcc
group.connected_out += c.connected_out
group.connected_inp += c.connected_inp
group.connected_parent_east = group.connected_out
group.connected_parent_south = group.connected_gnd
group.connected_parent_north = group.connected_vcc
#if group has parents create them
if len(group_id) > 1:
create_parent(group, forceOptimizer, debug)
#else add group to main group
else:
forceOptimizer.group_main.add_child(group)
group.parent = forceOptimizer.group_main
def find_neighbors(forceOptimizer, debug):
'''
DESCRIPTION: Looking for the neighbors of the groups via pins information of the blocks
STATE: not finish
'''
if debug:
print ""
print "=============="
print "Find Neighbors"
print "=============="
print ""
print "------"
print "Step 1"
print "------"
# go through all blocks in the circuit
for block in forceOptimizer.blocks:
# check all pins in the block
for pin in block.pins.values():
# if pin is not connected to a special pin
if pin.net not in (forceOptimizer.pins_east+forceOptimizer.pins_north+forceOptimizer.pins_south+forceOptimizer.pins_west) and not pin.net.startswith("inp"):
# add the block to block list in the dictionary
if pin.net in forceOptimizer.dictionary_net_blocks:
if block not in forceOptimizer.dictionary_net_blocks[pin.net]:
forceOptimizer.dictionary_net_blocks[pin.net].append(block)
# if the pin.net_name key does not exists in the dictionary,
# create a block list with one element
else:
forceOptimizer.dictionary_net_blocks[pin.net] = [block]
if debug:
print "------"
print "Step 2"
print "------"
# go over all collected nets
for key in forceOptimizer.dictionary_net_blocks.keys():
if key.lower().startswith("vbias"):
forceOptimizer.bias_nets.add(key.lower())
# get the list with the blocks connected to the net
block_list = forceOptimizer.dictionary_net_blocks[key]
if debug:
print key, "Count Blocks:", len(block_list)
# compare the blocks in the list
for block_1, block_2 in itertools.combinations(block_list, 2):
if debug:
print "Block1:", block_1.name, "Block2:", block_2.name
group_1_id = []
group_2_id = []
# start with the high level groups
for i in range(len(block_1.groups)):
group_1_id.append(block_1.groups[i])
group_2_id.append(block_2.groups[i])
if debug:
print "Group1ID:", group_1_id, "Group2ID", group_2_id
# compare the group IDs and when they are different
# then connect the groups with each other
if group_1_id != group_2_id:
group_1 = search_group(group_1_id, forceOptimizer)
group_2 = search_group(group_2_id, forceOptimizer)
# if the groups are already connected, increment the connection number
group_1.add_neighbor(group_2, block_1)
group_2.add_neighbor(group_1, block_2)
|
|
#!/usr/bin/env python3
import locale
locale.setlocale(locale.LC_ALL,'en_US.UTF-8')
import prompt_lmfst
import math
import functools
#NOTE: See the if __name__ == "__main__": block below for a description
# The weights dict defines relative probabilities of transitions.
# They are normalised at each state so that the probabilites sum to one.
# Not all transitions are possible at every step, and this way we can
# ensure that the FST stays stochastic.
# It may also be more intuitive to define the weights as 10:1, instead of
# 0.9091:0.0909 (the true probabilities).
# In the resulting FST these are converted to OpenFst weights, in the log semiring.
# In that semiring the weight (==the cost) of a transition is a
# negative logarithm of the probability.
weights = {
"Correct": 1000., #The correct next word
"Rubbish": 5., #Speech-like noises, hesitations, etc.
"Skip": 10., #Jump forward one word
"Repeat": 30., #Jump backward one word
"JumpForward": 2., #Jump forward multiple words
"JumpBackward": 5., #Jump backward multiple words
"LongJumpDecay":0.9, #A decay term applied to the relative probability for jumps:
#P(jump) = d^n * <jump_relative_probability>, n == number of words jumped over
"Truncation": 5., #An incomplete pronounciation
"PrematureEnd": 3., #Unexpected end of utterance
"FinalState": 1000. #The probability that the utterance ends at the correct point.
#There could also be repetition, rubbish, etc.
}
# The special_labels dict defines non-word labels which are used for
# the miscue paths. Not all need specific labels, but some do.
special_labels = {
"Epsilon": "<eps>",
"Rubbish": "[RUB]",
"Truncation": "[TRUNC]:",
}
## The next functions define recipes for adding the different types of paths
## to the FST.
## The idea is to always consume a label and design the recipes so that for the truely
## ambiguous cases, e.g. where in a line of repetitions to put the repetitions, make
## the choice unified. The FST will not end up 100% deterministic for all phone sequences
## but in the weight of the paths should end up different and thus the desired path
## is found (path of least weight).
def addCorrectPaths(p_fst, weights):
for word in p_fst.words:
p_fst.addArc(word.start, word.final, word.label, word.label, weights["Correct"])
p_fst.addFinalState(p_fst.words[-1].final, weights["FinalState"])
def addRubbishPaths(p_fst, weights, special_labels):
# Rubbish means speech like sounds here. This can model e.g. hesitation sounds ("umm") or
# a failed pronunciation
# We add a path for rubbish both to be inserted before a word and to be substituted for the word.
if len(p_fst.words) > 1:
for word, next_word in zip(p_fst.words, p_fst.words[1:]):
rubbish_state = p_fst.newState()
p_fst.addArc(word.start, rubbish_state,
special_labels["Rubbish"], special_labels["Rubbish"],
weights["Rubbish"])
p_fst.addArc(rubbish_state, word.start,
special_labels["Epsilon"], special_labels["Epsilon"],
weights["Correct"])
p_fst.addArc(rubbish_state, next_word.final,
next_word.label, next_word.label,
weights["Correct"])
# Deal with the last word separately. Don't allow substitution,
# this is notated as premature end instead.
p_fst.addArc(p_fst.words[-1].start, p_fst.words[-1].start,
special_labels["Rubbish"], special_labels["Rubbish"],
weights["Rubbish"])
p_fst.addArc(p_fst.words[-1].final, p_fst.words[-1].final,
special_labels["Rubbish"], special_labels["Rubbish"],
weights["Rubbish"])
def addSkipPaths(p_fst, weights):
# This will loop over all but the last word.
# It makes no sense to skip the last word; that should always mean a
# premature end.
if len(p_fst.words) > 1:
for word, next_word in zip(p_fst.words, p_fst.words[1:]):
p_fst.addArc(word.start, next_word.final,
next_word.label, next_word.label,
weights["Skip"])
def addRepeatPaths(p_fst, weights):
if len(p_fst.words) > 1:
# This will loop over all but the last word:
for word, next_word in zip(p_fst.words, p_fst.words[1:]):
p_fst.addArc(word.final, word.final,
word.label, word.label,
weights["Repeat"])
# Then the last word:
p_fst.addArc(p_fst.words[-1].final, p_fst.words[-1].final,
p_fst.words[-1].label, p_fst.words[-1].label,
weights["Repeat"])
def addPrematureEnds(p_fst, weights):
# Add a premature end at the beginning of every word.
# This way it won't be added to the correct end state.
for word in p_fst.words:
p_fst.addFinalState(word.start, weights["PrematureEnd"])
def addJumpsBackward(p_fst, weights):
# This will care of all but the last word
# (it's simpler code to add jumps from the start state, so we know the next correct word)
for i, word, in enumerate(p_fst.words):
for n, prev_word in enumerate(reversed(p_fst.words[:i])):
# n = number of words jumped over
if n==0:
continue # The latest word is special; we add a repeat arc, not a jump back arc.
decayed_weight = weights["LongJumpDecay"] ** n * weights["JumpBackward"]
p_fst.addArc(word.start, prev_word.final,
prev_word.label, prev_word.label,
decayed_weight)
# We have to deal with the last word separately
for n, prev_word in enumerate(reversed(p_fst.words[:-1])):
decayed_weight = weights["LongJumpDecay"] ** n * weights["JumpBackward"]
p_fst.addArc(p_fst.words[-1].final, prev_word.final,
prev_word.label, prev_word.label,
decayed_weight)
def addJumpsForward(p_fst, weights):
# It's again simpler to add jumps from the start states, so we know the next correct word)
for i, word, in enumerate(p_fst.words):
for n, later_word in enumerate(p_fst.words[i:]):
# n = number of words jumped over
if n == 0:
continue # The next word is special; we add a skip arc, not a jump forward arc.
decayed_weight = weights["LongJumpDecay"] ** n * weights["JumpForward"]
p_fst.addArc(word.start, later_word.final,
later_word.label, later_word.label,
decayed_weight)
def addTruncations(p_fst, weights, special_labels, truncations):
for word in p_fst.words:
truncation_entry = special_labels["Truncation"]+word.label #we must build the entry manually here.
if truncation_entry in truncations: #Some words may not have truncations. For example, words of just one phoneme.
p_fst.addArc(word.start, word.start,
truncation_entry,
truncation_entry,
weights["Truncation"])
def convertRelativeProbs(p_fst):
# First normalises the weights in relative probabilities into true
# probabilities, then converts to negative logarithms.
for state_num, leaves in p_fst.states.items():
#leaves has Arcs and FinalStates, both of which have a property called weight
total_weight = functools.reduce(lambda x, y: x + y.weight, leaves, 0.)
normalised_leaves = []
for leaf in leaves:
if total_weight == 0. or leaf.weight == 0.:
raise ValueError("Relative probability was zero at: " + repr(leaf))
else:
new_weight = -math.log(leaf.weight / total_weight)
normalised_leaves.append(leaf._replace(weight = new_weight))
p_fst.states[state_num] = normalised_leaves
def readTruncations(truncationsfile):
""" Reads truncations from the given file and returns them as a set """
with open(truncationsfile) as fi:
truncationslist = fi.read().split()
return set(truncationslist)
if __name__ == "__main__":
import argparse
import fileinput
parser = argparse.ArgumentParser(description="""
This script creates a reading miscue tolerant language model,
which is suitable for decoding read prompts.
It reads a line of text from the standard input and writes a
text format WFST that models reading errors into the standard
output, in the openfst format. If multiple lines are given in the input,
this script outputs multiple text format FSTs, separated by an empty line.
That format in particular was chosen because of the Kaldi program
compile-training-graphs-fsts.
The script will accept a list of homophones, which in this context also
includes the above mentioned uniquefied words. This will be used for some
miscue types to keep the result deterministic and make sane inferences about
the reading miscues.
""")
parser.add_argument('--correct-word-boost', type=float,
help="""Amount to multiply the correct words probability by.
Lower correct word probability will probably spot more miscues,
but also have more false positives.""")
parser.add_argument('--homophones', help=
"""File that contains a list of homophones.
In each line, words are considered homophones.
e.g.
too two
carat carrot""")
parser.add_argument('--truncations', help=
"""File that contains a list of words that have truncations in the dictionary.
On each line is one word""")
parser.add_argument('--rubbish-label', help=
"""File to read the label to use for rubbish, i.e. spoken noise""")
parser.add_argument('--truncation-label', help=
"""File to read label to use for Truncation,
concatenated with the word, like [TRUNC]:label""")
parser.add_argument('--kaldi-style', action='store_true', help=
"""For kaldi style inputs, but may be useful otherwise as well.
With this option, the first column in the input is treated as an id,
which should be output as is.""")
parser.add_argument("input", help="""Input as a filepath or - for stdin.
Prompts are read line by line.""")
args = parser.parse_args()
if args.rubbish_label:
with open(args.rubbish_label) as fi:
special_labels["Rubbish"] = fi.read().strip()
if args.truncation_label:
with open(args.truncation_label) as fi:
special_labels["Truncation"] = fi.read().strip()
if args.correct_word_boost:
weights["Correct"] = weights["Correct"] * args.correct_boost
if args.truncations:
truncated_words = readTruncations(args.truncations)
homophones = prompt_lmfst.readHomophones(args.homophones)
#Process each line in input:
for line in fileinput.input(args.input):
if args.kaldi_style:
ID, *prompt_tokenised = line.strip().split()
fst = prompt_lmfst.PromptLMFST(homophones = homophones, ID=ID)
else:
prompt_tokenised = line.strip().split()
fst = prompt_lmfst.PromptLMFST(homophones = homophones, ID=None)
fst.addWordSequence(prompt_tokenised)
addCorrectPaths(fst, weights)
addRubbishPaths(fst, weights, special_labels)
addSkipPaths(fst, weights)
addRepeatPaths(fst, weights)
addPrematureEnds(fst, weights)
addJumpsBackward(fst, weights)
addJumpsForward(fst, weights)
if args.truncations is not None:
addTruncations(fst, weights, special_labels, truncated_words)
convertRelativeProbs(fst)
print(fst.inText())
print() #Empty line means end of FST
|
|
#!/usr/bin/env python3
"""Extracts text samples from UDHR translations.
Translations exist for myriad languages at varying levels of support. The tool
focuses on Stage 4+ translations for which the UDHR translation has a reliable
amount of content and structure for scraping text samples.
See more at https://www.unicode.org/udhr.
"""
import enum
import os
import ssl
import tempfile
from urllib import request
from lxml import etree
import zipfile
UHDR_URL_TEMPLATE = 'https://www.un.org/{language_code}/universal-declaration-human-rights/'
UDHR_TRANSLATIONS_ZIP_URL = 'https://www.unicode.org/udhr/assemblies/udhr_xml.zip'
INDEX_XML = 'index.xml'
class UdhrTranslations():
def __init__(self):
self._zip_dir = tempfile.TemporaryDirectory()
self._DownloadUdhrTranslationsZip()
self._udhrs = self._ParseUdhrs()
self._udhr_map = {}
for udhr in self._udhrs:
udhr.Parse(self._LoadUdhrTranslation(udhr))
self._udhr_map[udhr.key] = udhr
self._udhr_map[udhr.iso639_3] = udhr
self._udhr_map[udhr.iso15924] = udhr
self._udhr_map[udhr.bcp47] = udhr
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._zip_dir.cleanup()
def _DownloadUdhrTranslationsZip(self):
with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file:
# Disable SSL verification
ssl._create_default_https_context = ssl._create_unverified_context
request.urlretrieve(UDHR_TRANSLATIONS_ZIP_URL, zip_file.name)
with zipfile.ZipFile(zip_file.name, 'r') as zip_ref:
zip_ref.extractall(self._zip_dir.name)
def _ParseUdhrs(self):
root = etree.parse(os.path.join(self._zip_dir.name, INDEX_XML))
return [self.Udhr(udhr_data, self._zip_dir) for udhr_data in root.xpath('*')]
def _LoadUdhrTranslation(self, udhr):
filename = 'udhr_{key}.xml'.format(key=udhr.key)
path = os.path.join(self._zip_dir.name, filename)
if os.path.exists(path):
return etree.parse(path)
return None
def GetUdhrs(self, min_stage=0):
return [udhr for udhr in self._udhrs if udhr.stage >= min_stage]
def GetUdhr(self, lang_code, min_stage=0):
if lang_code not in self._udhr_map or self._udhr_map[lang_code].stage < min_stage:
return None
return self._udhr_map[lang_code]
class Udhr():
def __init__(self, udhr_data, zip_dir):
self.key = udhr_data.get('f')
self.iso639_3 = udhr_data.get('iso639-3')
self.iso15924 = udhr_data.get('iso15924')
self.bcp47 = udhr_data.get('bcp47')
self.direction = udhr_data.get('dir')
self.ohchr = udhr_data.get('ohchr')
self.stage = int(udhr_data.get('stage'))
self.loc = udhr_data.get('loc')
self.name = udhr_data.get('n')
def Parse(self, translation_data):
if translation_data is None or self.stage < 2:
return
self.title = None
if translation_data.find('./{*}title') is not None:
self.title = translation_data.find('./{*}title').text
preamble_data = translation_data.find('./{*}preamble')
self.preamble = None
if preamble_data is not None:
if preamble_data.find('./{*}title') is not None:
self.preamble = {
'title':
preamble_data.find('./{*}title').text,
'content': [
para.text for para in preamble_data.findall('./{*}para')
],
}
articles_data = translation_data.findall('./{*}article')
self.articles = []
for article_data in articles_data:
article = {
'id':
int(article_data.get('number')),
'title':
article_data.find('./{*}title').text,
'content': [
para.text for para in article_data.findall('./{*}para')
],
}
self.articles.append(article)
def GetSampleTexts(self):
extractor = SampleTextExtractor(udhr)
return extractor.GetSampleTexts()
class TextType(enum.Enum):
GLYPHS = 1
WORD = 2
PHRASE = 3
SENTENCE = 4
PARAGRAPH = 5
PASSAGE = 6
class Size(enum.Enum):
SMALL = 1
MEDIUM = 2
LARGE = 3
def Switch(options, size):
if size not in options:
raise Error('Size {size} not in options: {options}'.format(
options=options, size=size))
return options[size]
class SampleTextExtractor():
def __init__(self, udhr):
self._udhr = udhr
self._glyphs = {}
self._words = {}
self._phrases = {}
self._sentences = {}
self._paragraphs = {}
self._passages = {}
def _ExtractGlyphs(self, size):
if size == Size.SMALL:
# single glyph
pass
elif size == Size.MEDIUM:
# upper and lower of single glyph
pass
elif size == Size.LARGE:
# upper and lower of two glyphs
pass
else:
self._UnsupportedSize(size)
pass
def _ExtractWord(self, size):
options = {
Size.SMALL: (3, 5),
Size.MEDIUM: (6, 8),
Size.LARGE: (8, 11),
}
min_length, max_length = Size.Switch(options, size)
pass
def _ExtractPhrase(self, size):
options = {
Size.SMALL: (3, 7),
Size.MEDIUM: (8, 14),
Size.LARGE: (12, 20),
}
min_length, max_length = Size.Switch(options, size)
pass
def _ExtractSentence(self, size):
options = {
Size.SMALL: (6, 10),
Size.MEDIUM: (12, 20),
Size.LARGE: (23, 35),
}
min_length, max_length = Size.Switch(options, size)
pass
def _ExtractParagraph(self, size):
options = {
Size.SMALL: (10, 20),
Size.MEDIUM: (30, 50),
Size.LARGE: (70, 100),
}
length = Size.Switch(options, size)
pass
def _ExtractPassage(self, size):
options = {
Size.SMALL: 2,
Size.MEDIUM: 3,
Size.LARGE: 5,
}
length = Size.Switch(options, size)
pass
def _UnsupportedSize(self, size):
raise Error('Unsupported size: ' + size)
def _Get(self, text_type, size):
pass
def GetSampleTexts(self):
return {
'hero': [
self._Get(TextType.GLYPHS, Size.LARGE), # Single glyphs (eg AbZx)
],
'type_tester': [
self._Get(TextType.PHRASE, Size.LARGE), # 12-20 word phrase
],
'poster': [
self._Get(TextType.WORD, Size.MEDIUM), # Single word
self._Get(TextType.WORD, Size.LARGE), # Single longer word
self._Get(TextType.SENTENCE, Size.SMALL), # 6-10 word sentence
self._Get(TextType.SENTENCE, Size.MEDIUM), # 12-20 word sentence
],
'specimen': [
self._Get(TextType.SENTENCE, Size.SMALL), # 6-10 word sentence
self._Get(TextType.PARAGRAPH, Size.SMALL), # Paragraph
self._Get(TextType.PARAGRAPH, Size.MEDIUM), # Medium paragraph
self._Get(TextType.PARAGRAPH, Size.LARGE), # Large paragraph
self._Get(TextType.PASSAGE,
Size.SMALL), # Multiple paragraphs split over 2 columns
self._Get(TextType.PASSAGE,
Size.MEDIUM), # Multiple paragraphs split over 3 columns
],
}
|
|
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_raises
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from scipy.spatial import cKDTree
from sklearn import neighbors, datasets
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = np.random.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5,
random_state=0):
"""Test unsupervised neighbors methods"""
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test, return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
"""test the types of valid input into NearestNeighbors"""
X = np.random.random((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), cKDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
"""Test unsupervised radius-based query"""
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-neighbors classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
weight_func = lambda d: d ** -2
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
"""Test radius-based classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
weight_func = lambda d: d ** -2
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-NN classifier on sparse matrices"""
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
"""Test k-neighbors regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = lambda d: d ** -2
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert np.all(abs(y_pred - y_target) < 0.3)
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
"""Test radius-based neighbors regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = lambda d: d ** -2
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert np.all(abs(y_pred - y_target) < radius / 2)
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test radius-based regression on sparse matrices"""
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert (np.mean(knn.predict(X2).round() == y)
> 0.95)
def test_neighbors_iris():
"""Sanity checks on the iris dataset
Puts three points of each label in the plane and performs a
nearest neighbor query on points near the decision boundary.
"""
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert np.mean(clf.predict(iris.data) == iris.target) > 0.95
rgs = neighbors.KNeighborsRegressor(n_neighbors=5,
algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert np.mean(
rgs.predict(iris.data).round() == iris.target) > 0.95
def test_kneighbors_graph():
"""Test kneighbors_graph to build the k-Nearest Neighbor graph."""
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.todense(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.todense(),
[[ 0. , 1.01 , 0. ],
[ 1.01 , 0. , 0. ],
[ 0. , 1.40716026, 0. ]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.todense(),
[[ 1., 1., 0.],
[ 1., 1., 0.],
[ 0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.todense(),
[[ 0. , 1.01 , 2.23606798],
[ 1.01 , 0. , 1.40716026],
[ 2.23606798, 1.40716026, 0. ]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.todense(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_radius_neighbors_graph():
"""Test radius_neighbors_graph to build the Nearest Neighbor graph."""
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.todense(),
[[ 1., 1., 0.],
[ 1., 1., 1.],
[ 0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.todense(),
[[ 0. , 1.01 , 0. ],
[ 1.01 , 0. , 1.40716026],
[ 0. , 1.40716026, 0. ]])
def test_neighbors_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = np.random.random((10, 2))
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
nbrs = cls()
assert_raises(ValueError,
nbrs.predict,
X)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
"""Implement configuration file parsing."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
try:
import config_parser_util
from acl import ACL
from dp import DP
from meter import Meter
from port import Port
from router import Router
from vlan import VLAN
from watcher_conf import WatcherConf
except ImportError:
from faucet import config_parser_util
from faucet.acl import ACL
from faucet.dp import DP
from faucet.meter import Meter
from faucet.port import Port
from faucet.router import Router
from faucet.vlan import VLAN
from faucet.watcher_conf import WatcherConf
V2_TOP_CONFS = (
'acls',
'dps',
'meters',
'routers',
'vlans')
def dp_parser(config_file, logname):
logger = config_parser_util.get_logger(logname)
conf = config_parser_util.read_config(config_file, logname)
config_hashes = None
dps = None
if conf is not None:
version = conf.pop('version', 2)
if version != 2:
logger.fatal('Only config version 2 is supported')
config_hashes, dps = _config_parser_v2(config_file, logname)
return config_hashes, dps
def _dp_parser_v2(logger, acls_conf, dps_conf, meters_conf,
routers_conf, vlans_conf):
dps = []
vid_dp = collections.defaultdict(set)
def _get_vlan_by_identifier(dp_id, vlan_ident, vlans):
if vlan_ident in vlans:
return vlans[vlan_ident]
for vlan in list(vlans.values()):
if int(vlan_ident) == vlan.vid:
return vlan
try:
vid = int(vlan_ident, 0)
except ValueError:
assert False, 'VLAN VID value (%s) is invalid' % vlan_ident
return vlans.setdefault(vlan_ident, VLAN(vid, dp_id))
def _dp_add_vlan(dp, vlan):
if vlan not in dp.vlans:
dp.add_vlan(vlan)
vid_dp[vlan.vid].add(dp.name)
if len(vid_dp[vlan.vid]) > 1:
assert not vlan.bgp_routerid, (
'DPs %s sharing a BGP speaker VLAN is unsupported' % (
str.join(', ', vid_dp[vlan.vid])))
def _dp_parse_port(dp_id, p_identifier, port_conf, vlans):
port = Port(p_identifier, port_conf)
if port.native_vlan is not None:
v_identifier = port.native_vlan
vlan = _get_vlan_by_identifier(dp_id, v_identifier, vlans)
port.native_vlan = vlan
vlan.add_untagged(port)
port_tagged_vlans = []
for v_identifier in port.tagged_vlans:
vlan = _get_vlan_by_identifier(dp_id, v_identifier, vlans)
port_tagged_vlans.append(vlan)
vlan.add_tagged(port)
port.tagged_vlans = port_tagged_vlans
for vlan in port.vlans():
_dp_add_vlan(dp, vlan)
return port
def _dp_add_ports(dp, dp_conf, dp_id, vlans):
ports_conf = dp_conf.pop('interfaces', {})
# as users can config port vlan by using vlan name, we store vid in
# Port instance instead of vlan name for data consistency
for port_num, port_conf in list(ports_conf.items()):
port = _dp_parse_port(dp_id, port_num, port_conf, vlans)
dp.add_port(port)
try:
for identifier, dp_conf in list(dps_conf.items()):
dp = DP(identifier, dp_conf)
dp.sanity_check()
dp_id = dp.dp_id
vlans = {}
for vlan_ident, vlan_conf in list(vlans_conf.items()):
vlans[vlan_ident] = VLAN(vlan_ident, dp_id, vlan_conf)
acls = []
for acl_ident, acl_conf in list(acls_conf.items()):
acls.append((acl_ident, ACL(acl_ident, acl_conf)))
for router_ident, router_conf in list(routers_conf.items()):
router = Router(router_ident, router_conf)
dp.add_router(router_ident, router)
for meter_ident, meter_conf in list(meters_conf.items()):
dp.meters[meter_ident] = Meter(meter_ident, meter_conf)
_dp_add_ports(dp, dp_conf, dp_id, vlans)
for acl_ident, acl in acls:
dp.add_acl(acl_ident, acl)
dps.append(dp)
for dp in dps:
dp.finalize_config(dps)
for dp in dps:
dp.resolve_stack_topology(dps)
except AssertionError as err:
logger.exception('Error in config file: %s', err)
return None
return dps
def _config_parser_v2(config_file, logname):
logger = config_parser_util.get_logger(logname)
config_path = config_parser_util.dp_config_path(config_file)
top_confs = {}
config_hashes = {}
dps = None
for top_conf in V2_TOP_CONFS:
top_confs[top_conf] = {}
if not config_parser_util.dp_include(
config_hashes, config_path, logname, top_confs):
logger.critical('error found while loading config file: %s', config_path)
elif not top_confs['dps']:
logger.critical('DPs not configured in file: %s', config_path)
else:
dps = _dp_parser_v2(
logger,
top_confs['acls'],
top_confs['dps'],
top_confs['meters'],
top_confs['routers'],
top_confs['vlans'])
return (config_hashes, dps)
def get_config_for_api(valves):
config = {}
for i in V2_TOP_CONFS:
config[i] = {}
for valve in list(valves.values()):
valve_conf = valve.get_config_dict()
for i in V2_TOP_CONFS:
if i in valve_conf:
config[i].update(valve_conf[i])
return config
def watcher_parser(config_file, logname, prom_client):
conf = config_parser_util.read_config(config_file, logname)
return _watcher_parser_v2(conf, logname, prom_client)
def _watcher_parser_v2(conf, logname, prom_client):
logger = config_parser_util.get_logger(logname)
result = []
dps = {}
for faucet_file in conf['faucet_configs']:
_, dp_list = dp_parser(faucet_file, logname)
if dp_list:
for dp in dp_list:
dps[dp.name] = dp
dbs = conf.pop('dbs')
for name, dictionary in list(conf['watchers'].items()):
for dp_name in dictionary['dps']:
if dp_name not in dps:
logger.error('dp %s metered but not configured', dp_name)
continue
dp = dps[dp_name]
watcher = WatcherConf(name, dictionary, prom_client)
watcher.add_db(dbs[watcher.db])
watcher.add_dp(dp)
result.append(watcher)
return result
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked language task with progressive training."""
# Lint as: python3
import copy
import dataclasses
import os
from typing import List, Optional
from absl import logging
import numpy
import orbit
import tensorflow as tf
import yaml
from grow_bert.lowcost.config import config_encoder as ecfg
from grow_bert.lowcost.models import bert_pretrain_model as small_pretrainer
from grow_bert.lowcost.models import pretrain_dataloader as small_dataloader
from grow_bert.lowcost.models import transformer_encoder as small_encoder_lib
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import optimization
from official.modeling import tf_utils
from official.modeling.fast_training.progressive import policies
from official.modeling.hyperparams import base_config
from official.nlp.configs import bert
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.nlp.tasks import masked_lm
@dataclasses.dataclass
class MaskedLMConfig(cfg.TaskConfig):
"""The model config."""
init_checkpoint: str = ''
model: bert.PretrainerConfig = bert.PretrainerConfig(
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
],
encoder=encoders.EncoderConfig(bert=encoders.BertEncoderConfig()))
scale_loss: bool = False
train_data: pretrain_dataloader.BertPretrainDataConfig = pretrain_dataloader.BertPretrainDataConfig(
)
small_train_data: pretrain_dataloader.BertPretrainDataConfig = pretrain_dataloader.BertPretrainDataConfig(
)
validation_data: pretrain_dataloader.BertPretrainDataConfig = pretrain_dataloader.BertPretrainDataConfig(
)
@dataclasses.dataclass
class StackingStageConfig(base_config.Config):
num_steps: int = 0
warmup_steps: int = 10000
initial_learning_rate: float = 1e-4
end_learning_rate: float = 0.0
decay_steps: int = 1000000
override_num_layers: Optional[int] = None
small_encoder_config: Optional[
ecfg.SmallEncoderConfig] = ecfg.SmallEncoderConfig()
override_train_data: Optional[
pretrain_dataloader
.BertPretrainDataConfig] = pretrain_dataloader.BertPretrainDataConfig()
override_valid_data: Optional[
pretrain_dataloader
.BertPretrainDataConfig] = pretrain_dataloader.BertPretrainDataConfig()
@dataclasses.dataclass
class ProgStackingConfig(policies.ProgressiveConfig):
stage_list: List[StackingStageConfig] = dataclasses.field(
default_factory=lambda: [ # pylint: disable=g-long-lambda
StackingStageConfig(
num_steps=3000,
warmup_steps=10000,
initial_learning_rate=1e-4,
end_learning_rate=1e-4,
decay_steps=1000000),
StackingStageConfig(
num_steps=3000,
warmup_steps=10000,
initial_learning_rate=1e-4,
end_learning_rate=1e-4,
decay_steps=1000000)
])
@task_factory.register_task_cls(MaskedLMConfig)
class ProgressiveMaskedLM(policies.ProgressivePolicy, masked_lm.MaskedLMTask):
"""Mask language modeling with progressive policy."""
def __init__(self,
strategy,
progressive_config,
optimizer_config,
train_data_config,
small_train_data_config,
task_config,
logging_dir=None):
"""Initialize progressive training manager before the training loop starts.
Arguments:
strategy: A distribution strategy.
progressive_config: ProgressiveConfig. Configuration for this class.
optimizer_config: optimization_config.OptimizerConfig. Configuration for
building the optimizer.
train_data_config: config_definitions.DataConfig. Configuration for
building the training dataset.
task_config: TaskConfig. This is used in base_task.Task.
logging_dir: a string pointing to where the model, summaries etc. will be
saved. This is used in base_task.Task.
"""
self._strategy = strategy
self._progressive_config = progressive_config
self._optimizer_config = optimizer_config
self._train_data_config = train_data_config
self._small_train_data_config = small_train_data_config
self._model_config: bert.PretrainerConfig = task_config.model
masked_lm.MaskedLMTask.__init__(
self, params=task_config, logging_dir=logging_dir)
policies.ProgressivePolicy.__init__(self)
# Overrides policies.ProgressivePolicy
def get_model(self, stage_id, old_model=None):
"""Build model for each stage."""
stage_config: StackingStageConfig = self._progressive_config.stage_list[
stage_id]
if stage_config.small_encoder_config is not None:
encoder_cfg: ecfg.TransformerEncoderConfig = ecfg.from_bert_encoder_config(
self._model_config.encoder.bert, stage_config.small_encoder_config)
model_cfg = copy.deepcopy(self._model_config)
model_cfg.encoder = encoders.EncoderConfig(bert=encoder_cfg)
model = self.build_small_model(model_cfg.as_dict())
else:
model_config = copy.deepcopy(self._model_config)
if stage_config.override_num_layers is not None:
model_config.encoder.bert.num_layers = stage_config.override_num_layers
model = self.build_model(model_config)
_ = model(model.inputs)
if stage_id == 0:
self.initialize(model)
if stage_id > 0 and old_model is not None:
logging.info('Stage %d copying weights.', stage_id)
self.transform_model(small_model=old_model, model=model)
return model
# overrides policies.ProgressivePolicy
def get_train_dataset(self, stage_id):
stage_config = self._progressive_config.stage_list[stage_id]
if stage_config.small_encoder_config is not None:
train_data_config = self._small_train_data_config
if stage_config.override_train_data is not None:
logging.info('stage %d: override small train data to %s', stage_id,
stage_config.override_train_data)
train_data_config = stage_config.override_train_data
return orbit.utils.make_distributed_dataset(self._strategy,
self.build_small_inputs,
train_data_config)
train_data_config = self._train_data_config
if stage_config.override_train_data is not None:
train_data_config = stage_config.override_train_data
logging.info('stage %d: override full train data to %s', stage_id,
stage_config.override_train_data)
return orbit.utils.make_distributed_dataset(self._strategy,
self.build_inputs,
train_data_config)
# overrides policies.ProgressivePolicy
def get_eval_dataset(self, stage_id):
build_func = self.build_inputs
stage_config = self._progressive_config.stage_list[stage_id]
if stage_config.small_encoder_config is not None:
build_func = self.build_small_inputs
valid_data_config = self.task_config.validation_data
if stage_config.override_valid_data is not None:
valid_data_config = stage_config.override_valid_data
logging.info('stage %d: override full valid data to %s', stage_id,
stage_config.override_valid_data)
return orbit.utils.make_distributed_dataset(self._strategy, build_func,
valid_data_config)
def build_small_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
return small_dataloader.PretrainDataLoader(params).load(input_context)
# Overrides policies.ProgressivePolicy
def num_stages(self):
return len(self._progressive_config.stage_list)
# Overrides policies.ProgressivePolicy
def num_steps(self, stage_id):
return self._progressive_config.stage_list[stage_id].num_steps
# Overrides policies.ProgressivePolicy
def get_optimizer(self, stage_id):
"""Build optimizer for each stage."""
params = self._optimizer_config.replace(
learning_rate={
'polynomial': {
'decay_steps':
self._progressive_config.stage_list[stage_id].decay_steps,
'initial_learning_rate':
self._progressive_config.stage_list[stage_id]
.initial_learning_rate,
'end_learning_rate':
self._progressive_config.stage_list[stage_id]
.end_learning_rate,
}
},
warmup={
'polynomial': {
'warmup_steps':
self._progressive_config.stage_list[stage_id].warmup_steps,
}
})
opt_factory = optimization.OptimizerFactory(params)
optimizer = opt_factory.build_optimizer(opt_factory.build_learning_rate())
return optimizer
def build_small_model(self, model_cfg):
encoder_cfg = model_cfg['encoder']['bert']
dataconf = self.task_config.train_data
encoder_network = small_encoder_lib.TransformerEncoder(
vocab_size=encoder_cfg['vocab_size'],
hidden_size=encoder_cfg['hidden_size'],
num_layers=encoder_cfg['num_layers'],
num_attention_heads=encoder_cfg['num_attention_heads'],
intermediate_size=encoder_cfg['intermediate_size'],
activation=tf_utils.get_activation(encoder_cfg['hidden_activation']),
dropout_rate=encoder_cfg['dropout_rate'],
attention_dropout_rate=encoder_cfg['attention_dropout_rate'],
max_sequence_length=encoder_cfg['max_position_embeddings'],
type_vocab_size=encoder_cfg['type_vocab_size'],
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg['initializer_range']),
net2net_ratio=encoder_cfg['net2net_ratio'],
net2net_layers=encoder_cfg['net2net_layers'],
lightatt_layers=encoder_cfg['lightatt_layers'],
input_pool_name=encoder_cfg['input_pool_name'],
input_pool_size=encoder_cfg['input_pool_size'])
sequence_length = dataconf.seq_length
predict_length = dataconf.max_predictions_per_seq
dummy_inputs = dict(
input_mask=tf.zeros((1, sequence_length), dtype=tf.int32),
input_positions=tf.zeros((1, sequence_length), dtype=tf.int32),
input_type_ids=tf.zeros((1, sequence_length), dtype=tf.int32),
input_word_ids=tf.zeros((1, sequence_length), dtype=tf.int32),
masked_lm_positions=tf.zeros((1, predict_length), dtype=tf.int32),
masked_input_ids=tf.zeros((1, predict_length), dtype=tf.int32),
masked_segment_ids=tf.zeros((1, predict_length), dtype=tf.int32),
masked_lm_weights=tf.zeros((1, predict_length), dtype=tf.float32))
_ = encoder_network(dummy_inputs)
if 'cls_heads' in model_cfg:
classification_heads = [
layers.ClassificationHead(**cfg) for cfg in model_cfg['cls_heads']
]
else:
classification_heads = []
model = small_pretrainer.BertPretrainModel(
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg['initializer_range']),
mlm_activation=tf_utils.get_activation(
encoder_cfg['hidden_activation']),
encoder_network=encoder_network,
classification_heads=classification_heads)
_ = model(dummy_inputs)
return model
@staticmethod
def transform_model(small_model, model):
# copy variable weights
# pylint: disable=protected-access
encoder = model.encoder_network
small_encoder = small_model.encoder_network
model_embed_width = encoder.get_config()['embedding_width']
model_hidden_size = encoder.get_config()['hidden_size']
if model_embed_width is not None and model_embed_width != model_hidden_size:
encoder._embedding_projection.set_weights(
small_encoder._embedding_projection_layer.get_weights())
encoder._embedding_layer.set_weights(
small_encoder._embedding_layer.get_weights())
encoder._type_embedding_layer.set_weights(
small_encoder._type_embedding_layer.get_weights())
encoder._position_embedding_layer.set_weights(
small_encoder._position_embedding_layer.get_weights())
encoder._embedding_norm_layer.set_weights(
small_encoder._embedding_norm_layer.get_weights())
encoder._pooler_layer.set_weights(small_encoder._pooler_layer.get_weights())
model.masked_lm.bias.assign(small_model.masked_lm.bias)
model.masked_lm.dense.set_weights(small_model.masked_lm.dense.get_weights())
model.masked_lm.layer_norm.set_weights(
small_model.masked_lm.layer_norm.get_weights())
for i, cls_head in enumerate(model.classification_heads):
cls_head.set_weights(small_model.classification_heads[i].get_weights())
small_layers = small_encoder.transformer_layers
small_num_layers = len(small_layers)
num_layers = len(encoder.transformer_layers)
logging.info('num_layers: %d, num_small_layers: %d', num_layers,
small_num_layers)
if small_num_layers != num_layers:
for i, layer in enumerate(encoder.transformer_layers):
small_idx = i % small_num_layers
logging.info('stack: %d -> %d', i, small_idx)
small_layer = small_layers[small_idx]
layer.set_weights(small_layer.get_weights())
else:
for i, layer in enumerate(encoder.transformer_layers):
logging.info('!!! recover layer %d', i)
small_layer = small_layers[i]
# init attention layer
attention_layer = layer._attention_layer
small_attention_layer = small_layer._attention_layer
attention_layer._value_dense.set_weights(
small_attention_layer._value_dense.get_weights())
attention_layer._output_dense.set_weights(
small_attention_layer._output_dense.get_weights())
if hasattr(
small_layer, 'use_lightatt') and small_layer.use_lightatt and (
not hasattr(layer, 'use_lightatt') or not layer.use_lightatt):
logging.info('!!! recover lightatt')
attention_layer._key_dense.set_weights(encoder.transformer_layers[
i - 1]._attention_layer._key_dense.get_weights())
attention_layer._query_dense.set_weights(encoder.transformer_layers[
i - 1]._attention_layer._query_dense.get_weights())
else:
attention_layer._key_dense.set_weights(
small_attention_layer._key_dense.get_weights())
attention_layer._query_dense.set_weights(
small_attention_layer._query_dense.get_weights())
if hasattr(small_layer,
'net2net_ratio') and small_layer.net2net_ratio is not None:
if hasattr(layer,
'net2net_ratio') and layer.net2net_ratio is not None:
layer._output_dense_small.set_weights(
small_layer._output_dense_small.get_weights())
layer._intermediate_dense_small.set_weights(
small_layer._intermediate_dense_small.get_weights())
else:
k = int(1 // small_layer.net2net_ratio)
logging.info('!!! recover net2net %d', k)
output_kernel, output_bias = layer._output_dense.get_weights()
interm_kernel, interm_bias = layer._intermediate_dense.get_weights()
output_small_kernel, output_small_bias = small_layer._output_dense_small.get_weights(
)
interm_small_kernel, interm_small_bias = small_layer._intermediate_dense_small.get_weights(
)
# check size
small_interm_size = interm_small_kernel.shape[1]
assert interm_kernel.shape[0] == output_kernel.shape[
1] == output_bias.shape[0] == model_hidden_size
error_message = (
f'interm_kernel.shape1={interm_kernel.shape[1]}, '
f'output_kernel.shape[0]={output_kernel.shape[0]}, '
f'small_interm_size={small_interm_size}, k={k}')
assert interm_kernel.shape[1] == output_kernel.shape[
0] == interm_bias.shape[
0] == small_interm_size * k, error_message
# restore
new_output_bias = output_small_bias
new_interm_bias = numpy.tile(interm_small_bias, k)
new_interm_kernel = numpy.tile(interm_small_kernel, [1, k])
new_output_kernel = numpy.tile(output_small_kernel, [k, 1]) / k
layer._output_dense.set_weights(
[new_output_kernel, new_output_bias])
layer._intermediate_dense.set_weights(
[new_interm_kernel, new_interm_bias])
else:
layer._output_dense.set_weights(
small_layer._output_dense.get_weights())
layer._intermediate_dense.set_weights(
small_layer._intermediate_dense.get_weights())
layer._output_layer_norm.set_weights(
small_layer._output_layer_norm.get_weights())
layer._attention_layer_norm.set_weights(
small_layer._attention_layer_norm.get_weights())
# pylint: enable=protected-access
def initialize(self, model):
init_dir_or_path = self.task_config.init_checkpoint
logging.info('init dir_or_path: %s', init_dir_or_path)
if not init_dir_or_path:
return
if tf.io.gfile.isdir(init_dir_or_path):
init_dir = init_dir_or_path
init_path = tf.train.latest_checkpoint(init_dir_or_path)
else:
init_path = init_dir_or_path
init_dir = os.path.dirname(init_path)
logging.info('init dir: %s', init_dir)
logging.info('init path: %s', init_path)
# restore from small model
init_yaml_path = os.path.join(init_dir, 'params.yaml')
if not tf.io.gfile.exists(init_yaml_path):
init_yaml_path = os.path.join(os.path.dirname(init_dir), 'params.yaml')
with tf.io.gfile.GFile(init_yaml_path, 'r') as rf:
init_yaml_config = yaml.safe_load(rf)
init_model_config = init_yaml_config['task']['model']
if 'progressive' in init_yaml_config['trainer']:
stage_list = init_yaml_config['trainer']['progressive']['stage_list']
if stage_list:
small_encoder_config = stage_list[-1]['small_encoder_config']
if small_encoder_config is not None:
small_encoder_config = ecfg.from_bert_encoder_config(
init_model_config['encoder']['bert'], small_encoder_config)
init_model_config['encoder']['bert'] = small_encoder_config.as_dict()
# check if model size matches
assert init_model_config['encoder']['bert'][
'hidden_size'] == model.encoder_network.get_config()['hidden_size']
# build small model
small_model = self.build_small_model(init_model_config)
ckpt = tf.train.Checkpoint(model=small_model)
ckpt.restore(init_path).assert_existing_objects_matched()
self.transform_model(small_model, model)
|
|
import copy
import random
import math
PARTICLES = 108
TEMPERATURE = 2.0
TIME_STEP = 0.003
ANDERSEN_FREQUENCY = 0.01
CUTOFF = 2.5
class Particle:
#Particle properties
position = [0, 0, 0]
previous_position = None
velocity = [0, 0, 0]
force = [0, 0, 0]
potential = 0
virial = 0
def __init__(self, position, velocity, dt):
"""Initialize the particle with given position and velocity."""
self.position = position
self.velocity = velocity
x = self.position[0] - self.velocity[0]*dt
y = self.position[1] - self.velocity[1]*dt
z = self.position[2] - self.velocity[2]*dt
self.previous_position = [x, y, z]
def get_squared_velocity(self):
"""Get the square of the velocity."""
sumv2 = 0
for n in range(len(self.velocity)):
sumv2 += self.velocity[n]**2
return sumv2
def get_force(self):
"""Get the resultant force on the particle."""
force = math.sqrt(self.force[0]**2 + self.force[1]**2 + self.force[2]**2)
return force
class LJContainer:
particles = []
temperature = 0
density = 0
length = 0
nu = ANDERSEN_FREQUENCY
rc = CUTOFF
Ptail = 16.0/3.0 * math.pi * density**2 * (2.0/3.0 * (1/rc)**9 - (1/rc)**3)
Utail = 8.0/3.0 * math.pi * density * (1.0/3.0 * (1/rc)**9 - (1/rc)**3)
data = {"t" : [],
"K" : [],
"V" : [],
"T" : [],
"P" : [],
"temp" : []}
def __init__(self, density):
"""Set up the container."""
#Calculate container dimensions from the density
#Particle radius is set to 1
self.length = (PARTICLES / density)**(1.0/3)
self.temperature = TEMPERATURE
self.density = density
self.timestep = TIME_STEP
#Initialize particles
self.initialize(PARTICLES, TIME_STEP)
def initialize(self, number_of_particles, dt):
"""Initialize the container with particles."""
#Calculate the spacing between particles
spacing = self.length / 6.0
#Generate starting velocities
velocities = self.generate_velocities(number_of_particles)
for n in range(number_of_particles):
#Start with 3 layers of 6x6 particles
x = (n%6 + 1) * spacing
y = ((n-(n/36)*36)/6 + 1) * spacing
z = (n/36 + 1) * spacing
position = [x, y, z,]
velocity = velocities.pop()
particle = Particle(position, velocity, dt)
self.particles.append(particle)
def generate_velocities(self, number_of_particles):
"""Generate an initial velocity distribution of number_of_particles velocities."""
dimensions = 3
velocities = []
vtot = [0] * dimensions
v2tot = [0] * dimensions
fs = [0] * dimensions
#Generate velocities from a uniform distribution
for i in range(number_of_particles):
v = [0] * dimensions
v2 = [0] * dimensions
for d in range(dimensions):
vn = random.uniform(-0.5, 0.5)
v[d] = vn
v2[d] = vn**2
vtot[d] += vn
v2tot[d] += vn**2
velocities.append(v)
#Calculate total velocity and scaling factor
for d in range(dimensions):
vtot[d] = vtot[d] / number_of_particles
v2tot[d] = v2tot[d] / number_of_particles
fs[d] = math.sqrt(self.temperature / v2tot[d])
#Scale and shift velocities
for i in range(number_of_particles):
vn = [0] * dimensions
for d in range(dimensions):
vn[d] = (velocities[i][d] - vtot[d]) * fs[d]
velocities[i] = vn
return velocities
def update_forces(self):
"""Update forces on all particles."""
#Reset before generating new forces
self.reset_particles()
#Calculate cutoff potential
rc = 2.5
ecut = 4 * ((1/rc)**12 - (1/rc)**6)
for i in range(0, len(self.particles) - 1):
for j in range(i, len(self.particles)):
if i != j:
dn = [self.particles[i].position[0] - self.particles[j].position[0],
self.particles[i].position[1] - self.particles[j].position[1],
self.particles[i].position[2] - self.particles[j].position[2]]
#Enforce PBC
dn[0] = dn[0] - self.length*round(dn[0]/self.length)
dn[1] = dn[1] - self.length*round(dn[1]/self.length)
dn[2] = dn[2] - self.length*round(dn[2]/self.length)
r2 = dn[0]**2 + dn[1]**2 + dn[2]**2
if r2 < rc**2:
r2i = 1/r2
r6i = r2i**3
u = 4 * r6i * (r6i - 1) - ecut
f = 48 * r2i * r6i * (r6i - 0.5)
self.particles[i].potential += u
self.particles[j].potential += u
for d in range(3):
self.particles[i].force[d] += f*dn[d]
self.particles[j].force[d] -= f*dn[d]
self.particles[i].virial += f*dn[d]**2
def tick(self, rescale=False):
"""Perform one time step of the system."""
#Andersen thermostat step 1
self.update_positions(self.timestep, Andersen_phase=1)
#Update forces
self.update_forces()
#Andersen thermostat step 2
self.update_positions(self.timestep, Andersen_phase=2)
#Rescale velocities
if rescale:
self.rescale()
def rescale(self):
"""Rescale the velocities to match set temperature."""
scale_factor = [0] * 3
v2tot = [0] * 3
for particle in self.particles:
for dim in range(3):
v2tot[dim] += particle.velocity[dim]**2
for dim in range(3):
v2tot[dim] = v2tot[dim] / len(self.particles)
scale_factor[dim] = math.sqrt(self.temperature / v2tot[dim])
for particle in self.particles:
for dim in range(3):
particle.velocity[dim] = particle.velocity[dim] * scale_factor[dim]
def update_positions(self, dt, Andersen_phase=1):
"""Update the position of the particle according to the velocity Verlet algorithm."""
if Andersen_phase == 1:
for p in self.particles:
for n in range(len(p.position)):
p.position[n] = p.position[n] + dt*p.velocity[n] + 0.5*p.force[n]*dt**2
p.position[n] = p.position[n] % self.length
p.velocity[n] = p.velocity[n] + 0.5*dt*p.force[n]
elif Andersen_phase == 2:
insttemp = 0
for p in self.particles:
for n in range(len(p.position)):
p.velocity[n] = p.velocity[n] + 0.5*dt*p.force[n]
insttemp += p.velocity[n]**2
insttemp = insttemp/(3.0 * len(self.particles))
sigma = math.sqrt(self.temperature)
for p in self.particles:
if random.uniform(0, 1) < self.nu*dt:
for n in range(len(p.position)):
p.velocity[n] = random.gauss(0, sigma)
def reset_particles(self):
"""Reset forces and potential energy on all particles."""
tail = 0 + self.Utail
for i in range(len(self.particles)):
force = [0] * 3
#Reset forces
self.particles[i].force = force
#Reset potential energy to tail correction
self.particles[i].potential = tail
self.particles[i].virial = 0
def get_current_temperature(self):
"""Calculate instantaneous temperature."""
K = 0
#Calculate kinetic energy
for particle in self.particles:
K += particle.get_squared_velocity()
#Get temperature from kinetic energy and degrees of freedom
T = K / (3.0*len(self.particles))
return T
def sample(self, time):
"""Sample ensemble properties."""
self.data["t"].append(time)
K = 0
V = 0
Fr = 0
for particle in self.particles:
K += particle.get_squared_velocity()
V += particle.potential
Fr += particle.virial
#Calculate per-particle energies
K = K/len(self.particles)
V = V/len(self.particles)
#Calculate pressure
P = self.density * self.get_current_temperature() + 1/(3*self.length**3) * Fr + self.Ptail
self.data["K"].append(K)
self.data["V"].append(V)
self.data["T"].append(K + V)
self.data["P"].append(P)
self.data["temp"].append(self.get_current_temperature())
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import mock
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.identity.v3 import trust
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
class TestTrust(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestTrust, self).setUp()
self.trusts_mock = self.app.client_manager.identity.trusts
self.trusts_mock.reset_mock()
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
self.roles_mock = self.app.client_manager.identity.roles
self.roles_mock.reset_mock()
class TestTrustCreate(TestTrust):
def setUp(self):
super(TestTrustCreate, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
self.trusts_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
)
# Get the command object to test
self.cmd = trust.CreateTrust(self.app, None)
def test_trust_create_basic(self):
arglist = [
'--project', identity_fakes.project_id,
'--role', identity_fakes.role_id,
identity_fakes.user_id,
identity_fakes.user_id
]
verifylist = [
('project', identity_fakes.project_id),
('impersonate', False),
('role', [identity_fakes.role_id]),
('trustor', identity_fakes.user_id),
('trustee', identity_fakes.user_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'impersonation': False,
'project': identity_fakes.project_id,
'role_ids': [identity_fakes.role_id],
'expires_at': None,
}
# TrustManager.create(trustee_id, trustor_id, impersonation=,
# project=, role_names=, expires_at=)
self.trusts_mock.create.assert_called_with(
identity_fakes.user_id,
identity_fakes.user_id,
**kwargs
)
collist = ('expires_at', 'id', 'impersonation', 'project_id',
'roles', 'trustee_user_id', 'trustor_user_id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.trust_expires,
identity_fakes.trust_id,
identity_fakes.trust_impersonation,
identity_fakes.project_id,
identity_fakes.role_name,
identity_fakes.user_id,
identity_fakes.user_id
)
self.assertEqual(datalist, data)
class TestTrustDelete(TestTrust):
def setUp(self):
super(TestTrustDelete, self).setUp()
# This is the return value for utils.find_resource()
self.trusts_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
)
self.trusts_mock.delete.return_value = None
# Get the command object to test
self.cmd = trust.DeleteTrust(self.app, None)
def test_trust_delete(self):
arglist = [
identity_fakes.trust_id,
]
verifylist = [
('trust', [identity_fakes.trust_id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.trusts_mock.delete.assert_called_with(
identity_fakes.trust_id,
)
self.assertIsNone(result)
@mock.patch.object(utils, 'find_resource')
def test_delete_multi_trusts_with_exception(self, find_mock):
find_mock.side_effect = [self.trusts_mock.get.return_value,
exceptions.CommandError]
arglist = [
identity_fakes.trust_id,
'unexist_trust',
]
verifylist = [
('trust', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 trusts failed to delete.',
str(e))
find_mock.assert_any_call(self.trusts_mock, identity_fakes.trust_id)
find_mock.assert_any_call(self.trusts_mock, 'unexist_trust')
self.assertEqual(2, find_mock.call_count)
self.trusts_mock.delete.assert_called_once_with(
identity_fakes.trust_id)
class TestTrustList(TestTrust):
def setUp(self):
super(TestTrustList, self).setUp()
self.trusts_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
),
]
# Get the command object to test
self.cmd = trust.ListTrust(self.app, None)
def test_trust_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.trusts_mock.list.assert_called_with()
collist = ('ID', 'Expires At', 'Impersonation', 'Project ID',
'Trustee User ID', 'Trustor User ID')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.trust_id,
identity_fakes.trust_expires,
identity_fakes.trust_impersonation,
identity_fakes.project_id,
identity_fakes.user_id,
identity_fakes.user_id
), )
self.assertEqual(datalist, tuple(data))
class TestTrustShow(TestTrust):
def setUp(self):
super(TestTrustShow, self).setUp()
self.trusts_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
)
# Get the command object to test
self.cmd = trust.ShowTrust(self.app, None)
def test_trust_show(self):
arglist = [
identity_fakes.trust_id,
]
verifylist = [
('trust', identity_fakes.trust_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
self.trusts_mock.get.assert_called_with(identity_fakes.trust_id)
collist = ('expires_at', 'id', 'impersonation', 'project_id',
'roles', 'trustee_user_id', 'trustor_user_id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.trust_expires,
identity_fakes.trust_id,
identity_fakes.trust_impersonation,
identity_fakes.project_id,
identity_fakes.role_name,
identity_fakes.user_id,
identity_fakes.user_id
)
self.assertEqual(datalist, data)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from abc import abstractmethod, abstractproperty
from collections import defaultdict, deque
from pants.backend.core.tasks.task import Task, TaskBase
from pants.base.build_graph import invert_dependencies
from pants.base.workunit import WorkUnit
from pants.goal.goal import Goal
from pants.option.scope import ScopeInfo
class GroupMember(TaskBase):
@classmethod
def name(cls):
"""Returns a name for this group for display purposes.
By default returns the GroupMember subtype's class name.
"""
return cls.__name__
@abstractmethod
def select(self, target):
"""Return ``True`` to claim the target for processing.
Group members are consulted in the order registered in their ``GroupTask`` and the 1st group
member to select a target claims it. Only that group member will be asked to prepare execution
and later execute over a chunk containing the target.
"""
def pre_execute(self):
"""Called before preparing chunks for execution
Always called; even if no chunks are selected by this group member.
"""
def prepare_execute(self, chunks):
"""Prepare to execute the group action across the given target chunks
Only called if chunks have been selected by this group member.
Chunks are guaranteed to be presented in least dependent to most dependent order and to contain
only directly or indirectly invalidated targets.
:param list chunks: A list of chunks, each chunk being a list of targets that should be
processed together.
"""
@abstractmethod
def execute_chunk(self, targets):
"""Process the targets in this chunk.
Only called if chunks have been selected by this group member.
This chunk or targets' dependencies are guaranteed to have been processed in a prior
``execute_chunk`` round by some group member - possibly this one.
:param list targets: A list of targets that should be processed together (ie: 1 chunk)
"""
def post_execute(self):
"""Called when all invalid targets claimed by the group have been processed.
Always called; even if no chunks are selected by this group member.
"""
class GroupIterator(object):
"""Iterates the goals in a group over the chunks they own."""
@staticmethod
def coalesce_targets(targets, discriminator):
"""Returns a list of Targets that `targets` depend on sorted from least dependent to most.
The targets are grouped where possible by target type as categorized by the given discriminator.
This algorithm was historically known as the "bang" algorithm from a time when it was
optionally enabled by appending a '!' (bang) to the command line target.
"""
# We want to sort targets topologically, grouping targets of the same type if possible.
# Algorithm: BFS on the dependency graph with a separate queue per each type.
# First, enqueue the least dependent targets (roots). Choose a type with a non-empty queue,
# and process nodes from this queue until it's exhausted, then move on to the next non-empty
# queue. "To process" means to add the node to the resulting list, and to increment
# the number of "satisfied" dependencies for all its direct dependees. For every dependee
# that has all its dependencies satisfied, enqueue it in the corresponding queue.
# Since it's a directed acyclic graph, eventually all targets will be processed and added
# to the resulting list.
#
# This linear-complexity algorithm replaces the worst-case-quadratic-complexity algorithm
# that used DFS for topological sort, then trying to rearrange the targets in the resulting
# list without breaking the sorting order, repeatedly computing full dependency closure
# for the targets in the list.
#
# For benchmarking, "./pants compile" command was executed on a large target with about 1K nodes
# in the dependency graph. The machine was 2013 MPB with SSD.
# The quadratic implementation took on average about 18 seconds. The linear implementation
# took on average about 1 second.
roots, inverted_deps = invert_dependencies(targets)
queues = defaultdict(deque)
queues_total_size = 0
# Enqueue roots.
for root in roots:
root_type = discriminator(root)
queues[root_type].append(root)
queues_total_size += 1
sorted_targets = []
satisfied_deps = defaultdict(int)
current_type = None
# Is there anything left to process?
while queues_total_size > 0:
# Choose a type with a non-empty queue.
for potential_type in queues.keys():
if queues[potential_type]:
current_type = potential_type
break
# Process targets of this type while possible - they will form a single chunk.
while queues[current_type]:
target = queues[current_type].popleft()
queues_total_size -= 1
sorted_targets.append(target)
# Let the dependees know that one more dependency is satisfied.
if target in inverted_deps:
for dependee in inverted_deps[target]:
satisfied_deps[dependee] += 1
# Does the dependee have all its dependencies satisfied now?
if satisfied_deps[dependee] == len(dependee.dependencies):
dependee_type = discriminator(dependee)
queues[dependee_type].append(dependee)
queues_total_size += 1
# Remove targets that are not claimed by any member.
sorted_targets = filter(discriminator, sorted_targets)
return sorted_targets
def __init__(self, targets, group_members):
"""Creates an iterator that yields tuples of ``(GroupMember, [chunk Targets])``.
Chunks will be returned least dependent to most dependent such that a group member processing a
chunk can be assured that any dependencies of the chunk have been processed already.
:param list targets: The universe of targets to divide up amongst group members.
:param list group_members: A list of group members that forms the group to iterate.
"""
self._targets = targets
self._group_members = group_members
def __iter__(self):
for group_member, chunk in self._create_chunks():
yield group_member, chunk
def _create_chunks(self):
# memoized mapping from target to its type (i.e. member)
target_to_member = dict()
def discriminator(tgt):
if tgt in target_to_member:
return target_to_member[tgt]
for member in self._group_members:
if member.select(tgt):
target_to_member[tgt] = member
return member
target_to_member[tgt] = None
return None
coalesced = self.coalesce_targets(self._targets, discriminator)
chunks = []
def add_chunk(member, chunk):
if member is not None:
chunks.append((member, chunk))
group_member = None
chunk_start = 0
for chunk_num, target in enumerate(coalesced):
target_group_member = discriminator(target)
if target_group_member != group_member and chunk_num > chunk_start:
add_chunk(group_member, coalesced[chunk_start:chunk_num])
chunk_start = chunk_num
group_member = target_group_member
if chunk_start < len(coalesced):
add_chunk(group_member, coalesced[chunk_start:])
return chunks
class GroupTask(Task):
"""A task that coordinates group members who all produce a single product type.
The canonical example is a group of different compilers targeting the same output format; for
example: javac, groovyc, scalac and clojure aot all produce classfiles for the jvm and may depend
on each others outputs for linkage.
Since group members may depend on other group members outputs (a grouped task is only useful if
they do!), a group task ensures that each member is executed in the proper order with the proper
input targets such that its product dependencies are met. Group members only need claim the
targets they own in their `select` implementation and the group task will figure out the rest
from the dependency relationships between the targets selected by the groups members.
"""
_GROUPS = dict()
@classmethod
def named(cls, name, product_type, flag_namespace=None):
"""Returns ``GroupTask`` for the given name.
The logical group embodied by a task is identified with its name and only 1 GroupTask will be
created for a given name. If the task has already been created, it will just be returned.
:param string name: The logical name of the group.
:param list product_type: The names of the product types this group cooperatively produces.
:param list flag_namespace: The parent namespace for flags registered by member tasks.
"""
group_task = cls._GROUPS.get(name)
if not group_task:
class SingletonGroupTask(GroupTask):
_MEMBER_TYPES = []
@classmethod
def global_subsystems(cls):
return (super(SingletonGroupTask, cls).global_subsystems() +
tuple(s for mt in cls._member_types() for s in mt.global_subsystems()))
@classmethod
def task_subsystems(cls):
return (super(SingletonGroupTask, cls).task_subsystems() +
tuple(s for mt in cls._member_types() for s in mt.task_subsystems()))
@classmethod
def product_types(cls):
return product_type
# We'd prefer to get the options_scope from cls.options_scope,
# but unfortunately that hasn't been set yet.
options_scope = '.'.join(flag_namespace)
@classmethod
def known_scope_infos(cls):
"""Yields ScopeInfos for all known scopes for this task, in no particular order."""
# We need this because task.py initializes a cache factory for every task type,
# even if it's never used. This is slightly icky, but is better than forcing tasks
# to explicitly call a cache setup method. And we want to kill GroupTask anyway.
yield ScopeInfo(cls.options_scope, ScopeInfo.TASK)
for subsystem in cls.task_subsystems():
yield ScopeInfo(subsystem.subscope(cls.options_scope), ScopeInfo.TASK_SUBSYSTEM)
for member_type in cls._member_types():
for scope in member_type.known_scope_infos():
yield scope
@classmethod
def register_options_on_scope(cls, options):
for member_type in cls._member_types():
member_type.register_options_on_scope(options)
@classmethod
def _alternate_target_roots(cls, options, address_mapper, build_graph):
# We don't support groups proposing alternate roots.
# There is currently just the jvm compile group which does not need this and GroupTask
# will be removed as part of parallelizing the RoundEngine.
return None
@classmethod
def _prepare(cls, options, round_manager):
for member_type in cls._member_types():
member_type._prepare(options, round_manager)
@property
def group_name(self):
return name
group_task = SingletonGroupTask
cls._GROUPS[name] = group_task
if group_task.product_types() != product_type:
raise ValueError('The group {!r} was already registered with product type: {!r} - refusing to '
'overwrite with new product type: {!r}'.format(name, group_task.product_types(),
product_type))
return group_task
@classmethod
def _member_types(cls):
member_types = getattr(cls, '_MEMBER_TYPES')
if member_types is None:
raise TypeError('New GroupTask types must be created via GroupTask.named.')
return member_types
@classmethod
def add_member(cls, group_member):
"""Enlists a member in this group.
A group task delegates all its work to group members who act cooperatively on targets they
claim. The order members are added affects the target claim process by setting the order the
group members are asked to claim targets in on a first-come, first-served basis.
"""
if not issubclass(group_member, GroupMember):
raise ValueError('Only GroupMember subclasses can join a GroupTask, '
'given {} of type {}'.format(group_member, type(group_member)))
group_member.options_scope = Goal.scope(cls.options_scope, group_member.name())
cls._member_types().append(group_member)
def __init__(self, *args, **kwargs):
super(GroupTask, self).__init__(*args, **kwargs)
self._group_members = [member_type(self.context, os.path.join(self.workdir, member_type.name()))
for member_type in self._member_types()]
@abstractmethod
def product_types(self):
"""GroupTask must be sub-classed to provide a product type."""
@abstractproperty
def group_name(self):
"""GroupTask must be sub-classed to provide a group name."""
def execute(self):
with self.context.new_workunit(name=self.group_name, labels=[WorkUnit.GROUP]):
for group_member in self._group_members:
group_member.pre_execute()
# TODO(John Sirois): implement group-level invalidation? This might be able to be done in
# prepare_execute though by members.
# Chunk targets from the context by group. At the end, we'll have a list of chunks to be
# built.
ordered_chunks = []
chunks_by_member = defaultdict(list)
for group_member, chunk in GroupIterator(self.context.targets(), self._group_members):
ordered_chunks.append((group_member, chunk))
chunks_by_member[group_member].append(chunk)
self.context.log.debug('::: created chunks({})'.format(len(ordered_chunks)))
for i, (group_member, goal_chunk) in enumerate(ordered_chunks):
self.context.log.debug(' chunk({}) [flavor={}]:\n\t{}'.format(
i, group_member.name(), '\n\t'.join(sorted(map(str, goal_chunk)))))
# prep
for group_member, chunks in chunks_by_member.items():
group_member.prepare_execute(chunks)
# chunk zig zag
for group_member, chunk in ordered_chunks:
group_member.execute_chunk(chunk)
# finalize
for group_member in self._group_members:
group_member.post_execute()
|
|
# -*- coding: utf-8 -*-
"""
The heart of the ecstasy package, containing the main *Parser* class.
"""
import re
import warnings
import collections
import ecstasy.flags as flags
import ecstasy.errors as errors
def beautify(string, *args, **kwargs):
"""
Convenient interface to the ecstasy package.
Arguments:
string (str): The string to beautify with ecstasy.
args (list): The positional arguments.
kwargs (dict): The keyword ('always') arguments.
"""
parser = Parser(args, kwargs)
return parser.beautify(string)
class Phrase(object):
"""
Class describing a single parsed phrase.
When a string is parsed in ecastasy, specially-marked regions of
text are converted taken note of and converted into Phrase objects,
which are later then used to replace the parsed strings (including any
tags or arguments) with the string itself as well as the formatting
codes specified by the arguments passed to Parser.beautify(), which
are then interpreted by the command line.
Attributes:
string (str): The text of the phrase (between opening and closing tags).
opening (int): The index of the opening tag.
closing (int): The index of the closing tag.
style (int): The formatting/style flag-combination of the phrase.
nested (list): A list of nested Phrase objects (children).
override (bool): The phrase's override specification.
"""
def __init__(self,
opening=None,
closing=None,
string="",
style=0,
args=None,
nested=None,
override=False,
increment=False):
self.string = string
self.opening = opening
self.closing = closing
self.style = style
self.arguments = args if args else []
self.nested = nested if nested else []
self.override = override
self.increment = increment
def __str__(self):
return self.string
def __eq__(self, other):
return (self.string == other.string and
self.opening == other.opening and
self.closing == other.closing and
self.style == other.style and
self.arguments == other.arguments and
self.nested == other.nested and
self.override == other.override and
self.increment == other.increment)
class Parser(object):
"""
Handles parsing and beautification of a string.
This is the main class of the entire ecastasy package. It is
initialized with a set of positional and keyword arguments that
determine which styles (flag-combinations) are used for which
phrases (tag-marked regions of text) found during parsing. Its
beautify() method is then used to beautify a string according
to the arguments passed to the constructor.
Note:
From the outside, the package-level beautify() method should
handle the construction and beautify()-call process all-in-one
(for convenience).
Attributes:
always: The list of 'always' (keyword) arguments.
positional: The list of positional arguments.
tags: A compiled regex matching opening or closing tags.
argument: A compiled regex matching well-formed phrase arguments.
counter: A counter for positional arguments.
"""
def __init__(self, args, kwargs):
"""
Initializes a Parser instance.
Arguments:
args (list): The positional arguments.
kwargs (dict): The 'always' (keyword) arguments.
"""
self.always = kwargs
self.positional = self.get_flags(args) if args else []
self.meta = re.compile(r"[()<>]")
self.arguments = re.compile(r"^(-?\d,?)+!?$|"
r"^!?(-?\d,?)+$|"
r"^(!\+?|\+!?)$")
# Used in self.stringify to auto-increment
# positional argument positions
self.counter = 0
def get_flags(self, args):
"""
Checks and retrieves positional and 'always' (keyword) flags
from the many ways in which they may be passed to the
constructor (or the beautify() method on package-level).
Positional arguments can be passed either:
* Individually, where each flag-combination is one positional argument.
* Packaged inside a list, which is then expanded. There can be
multiple of such lists passed as arguments because it facilitates
interaction with the ecastasy module (one may want to organize
and update styles in certain ways depending on one's program), but
each list will be expanded and all flag-combinations found inside
each list will be interpreted as a single style argument, as if it
had been passed in the way desribed above (individually).
'Always' arguments can be passed either:
* Individually, with keyword-argument syntax, i.e. <word>=<style>
* In a dictionary, which is expanded exactly like positional
arguments passed in lists (i.e. each key/value pair in the
dictionary is interpreted as if it had been passed individually,
as key=value to the constructor/the external beautify() method).
Note:
self.always is set equal to the keyword arguments passed to the
constructor and then modified directly (when 'always'-arguments
are found), while the positional arguments are put into a list
here and returned (i.e. no interaction with self.positional).
Arguments:
args (list): The positional arguments passed to the constructor.
Returns:
The positional arguments.
Raises:
errors.FlagError: If an invalid (out-of-range)
flag combination was passed.
errors.EcstasyError: If one of the arguments is of invalid type.
"""
positional = []
for argument in args:
# A flag is an instance of a subclass of
# flags.Flags if it was passed alone
if isinstance(argument, flags.Flags):
positional.append(argument)
# or is an integer if it was (bitwise) OR'd
# with another flag (a "flag combination")
elif isinstance(argument, int):
if argument < 0 or argument >= flags.LIMIT:
raise errors.FlagError("Flag value '{0}' is out of range "
"!".format(argument))
positional.append(argument)
# Dictionaries store 'always'-arguments
elif isinstance(argument, dict):
for key, value in argument.items():
# Simple 'always'-argument where one string
# is mapped to one formatting flag-combination
if isinstance(key, str):
self.always[key] = value
# Complex 'always'-argument with a
# tuple containing strings, each with the same
# flag-combination (same value)
elif isinstance(key, tuple):
for i in key:
self.always[i] = value
else:
raise errors.EcstasyError("Key '{0}' in dictionary "
"argument passed is neither "
"a string nor a tuple "
"of strings!".format(key))
elif isinstance(argument, collections.Iterable):
positional += self.get_flags(argument)
else:
raise errors.EcstasyError("Argument '{0}' is neither a flag, a "
"(bitwise) OR'd flag-combination, a "
"dictionary nor an iterable of "
"positional arguments "
"!".format(argument))
return positional
def beautify(self, string):
"""
Wraps together all actions needed to beautify a string, i.e.
parse the string and then stringify the phrases (replace tags
with formatting codes).
Arguments:
string (str): The string to beautify/parse.
Returns:
The parsed, stringified and ultimately beautified string.
Raises:
errors.ArgumentError if phrases were found, but not a single style
(flag combination) was supplied.
"""
if not string:
return string
# string may differ because of escaped characters
string, phrases = self.parse(string)
if not phrases:
return string
if not self.positional and not self.always:
raise errors.ArgumentError("Found phrases, but no styles "
"were supplied!")
return self.stringify(string, phrases)
def parse(self, string, root=None):
"""
Parses a string to handle escaped tags and retrieve phrases.
This method works recursively to parse nested tags. When escaped
tags are found, those are removed from the string. Also argument
sequences are removed from the string. The string returned can
thus be quite different from the string passed.
Arguments:
string (str): The string to parse.
root (Phrase): If in a recursive call, the root/parent phrase.
Returns:
For one, the escaped string (without escape characters and
phrase arguments). For the other, it depends on the stack-depth.
If this is the lowest recursion depth/level (i.e. the stack
call resulting from the first function call in self.beautify()),
it will return a list of phrases. For higher stack levels (
i.e. resulting from recursive function calls from with
self.parse(), for nested phrases), it returns exactly one
Phrase instance.
Raises:
errors.ParseError: If no closing tag could be
found for an opening tag.
"""
phrases = []
meta = self.meta.search(string)
while meta:
# Save some function calls
pos = meta.start()
if meta.group() == "<":
string, child, meta = self.open_phrase(string, pos)
if child and root:
root.nested.append(child)
elif child:
phrases.append(child)
# else it was escaped (+ new meta)
continue
elif root:
if meta.group() == "(":
meta = self.meta.search(string, pos + 1)
if meta.group() == ")":
string, root, meta = self.handle_arguments(string,
root,
pos,
meta.start())
continue
elif meta.group() == ">":
string, phrase, meta = self.close_phrase(string,
root,
pos)
if phrase:
return string, phrase
# else was escaped (+ new meta)
continue
string, meta = self.escape_meta(string, pos)
if not root:
return string, phrases
# If this is not the first stack-depth the function should
# have returned upon finding a closing tag,
# i.e. we should never have gotten here.
word = re.search(r"([\w\s]+)(?![\d]*>[\w\s]+>)", string)
what = "No closing tag found for opening tag"
if word:
what += " after expression '{0}'".format(word.group())
raise errors.ParseError(what + "!")
def escape_meta(self, string, pos):
"""
Checks if a meta character is escaped or else warns about it.
If the meta character has an escape character ('\') preceding it,
the meta character is escaped. If it does not, a warning is emitted
that the user should escape it.
Arguments:
string (str): The relevant string in which the character was found.
pos (int): The index of the meta character within the string.
Returns:
The possibly escaped string and the next meta match.
"""
# Replace escape character
if pos > 0 and string[pos - 1] == "\\":
string = string[:pos - 1] + string[pos:]
else:
warnings.warn("Un-escaped meta-character: '{0}' (Escape"
" it with a '\\')".format(string[pos]),
Warning)
pos += 1
meta = self.meta.search(string, pos)
return string, meta
def open_phrase(self, string, pos):
"""
Helper function of self.parse() handling opening tags.
Arguments:
string (str): The string being parsed.
pos (int): The index/position of the opening tag in the string.
Returns:
The (possibly) escaped string, a child phrase if the opening tag
was not escaped and otherwise None, and a new tag match, either
starting at one index passed the escaped tag or one index passed
the closing tag of the child.
"""
# Check for escaping
if string[pos - 1] == "\\":
# Remove the escape character
string = string[:pos - 1] + string[pos:]
# When removing the escape character, the
# pos tag index is pushed one back
pos -= 1
# If the escape character was not itself (double)
# escaped we can look for the next tag
if pos == 0 or string[pos - 1] != "\\":
tag = self.meta.search(string, pos + 1)
return string, None, tag
child = Phrase(pos)
escaped, child = self.parse(string[pos + 1:], child)
string = string[:pos + 1] + escaped
tag = self.meta.search(string, child.closing + 1)
return string, child, tag
def close_phrase(self, string, root, pos):
"""
Helper function of self.parse() handling closing tags.
Arguments:
string (str): The string being parsed.
root (Phrase): The current root phrase.
pos (int): The index/position of the closing tag in the string.
Returns:
Always the (possibly) escaped string, then either the fully
formed phrase if the closing tag was not escaped (with its
'closing' and 'string' attributes set) and otherwise None,
and lastly the next tag if the closing tag was indeed escaped
and otherwise None -- i.e. either the tuple
(string, phrase, None) or (string, None, tag).
"""
# Whatever is between the opening tag and this closing tag
substring = string[:pos]
# Escape-character to escape the closing tag (/>)
if substring.endswith("\\"):
# Get rid of the escape character either way
string = string[:pos - 1] + string[pos:]
# Check if not double-escaped
if not substring[:-1].endswith("\\"):
# pos is now one index passed the closing tag
tag = self.meta.search(string, pos)
return string, None, tag
# Double-escape means this is really supposed to be a
# closing tag and thus we can return the phrase.
else:
# The closing position should be in the same scope
# as the scope of the opening position (scope in
# the sense of to which phrase the positions are
# relative to). -1 due to the escaped character but
# + 1 because index 0 is phrase.opening + 1
root.closing = root.opening + pos
root.string = string[:pos - 1]
else:
root.closing = root.opening + 1 + pos
root.string = string[:pos]
return string, root, None
def handle_arguments(self, string, root, opening, closing):
"""
Handles phrase-arguments.
Sets the override and increment flags if found. Also makes
sure that the argument sequence is at the start of the phrase
and else warns about the unescaped meta characters. If the
arguments are indeed at the start but do not match the arguments
regular expression, an error is raised.
Arguments:
string (str): The string being parsed.
root (str): The current root phrase.
opening (int): The index of the opening paranthese.
closing (int): The index of the closing paranthese.
Returns:
The (possibly escaped) string, the root phrase (if no escaping,
then with arguments and flags) and the next meta match.
Raises:
errors.ParseError: If the arguments are invalid.
"""
# The actual argument string (ignore whitespace)
args = string[opening + 1 : closing].replace(" ", "")
# The argument sequence must be at the start of the phrase
# and must match the allowed argument regular expression
if opening > 0 or not self.arguments.match(args):
if opening == 0:
raise errors.ParseError("Invalid argument sequence!")
# If escape_meta does indeed escape a character and removes
# a backward slash, the positions 'opening' and 'closing' are no
# longer valid. escape_meta does a search for the next meta
# character though, which is then the closing parantheses,
# so we can use its index value (in the now escaped string)
string, meta = self.escape_meta(string, opening)
string, meta = self.escape_meta(string, meta.start())
return string, root, meta
if "!" in args:
root.override = True
args = args.replace("!", "")
if "+" in args:
root.increment = True
args = args.replace("+", "")
root.arguments = [int(i) for i in args.split(",") if i]
# Remove the argument string including parantheses
string = string[closing + 1:]
meta = self.meta.search(string)
return string, root, meta
def stringify(self, string, phrases, parent=None):
"""
Stringifies phrases.
After parsing of the string via self.parse(), this method takes the
escaped string and the list of phrases returned by self.parse() and
replaces the original phrases (with tags) with the Phrase-objects in
the list and adds the appropriate flag-combinations as determined by
the string or the position of the phrase (the string if it's in
self.always, i.e. an 'always' argument). This method also works
recursively to handle nested phrases (and resetting of parent-phrase
styles).
Arguments:
string (str): The escaped string returned by self.parse().
phrases (list): The list of Phrase-objects returned by self.parse().
parent (Phrase): For recursive calls, the current parent Phrase.
Returns:
The finished, beautifully beautified string.
Raises:
errors.ArgumentError: If more positional arguments are requested
than were supplied.
"""
last_tag = 0
beauty = ""
for phrase in phrases:
beauty += string[last_tag : phrase.opening]
if phrase.string in self.always and not phrase.override:
phrase.style = self.always[phrase.string]
if phrase.arguments:
combination = 0
for i in phrase.arguments:
try:
combination |= self.positional[i]
except IndexError:
raise errors.ArgumentError("Positional argument '{0}' "
"is out of range"
"!".format(i))
phrase.style |= combination
elif (phrase.string not in self.always or
phrase.increment or phrase.override):
try:
combination = self.positional[self.counter]
if phrase.increment or not phrase.override:
self.counter += 1
except IndexError:
self.raise_not_enough_arguments(phrase.string)
phrase.style |= combination
phrase.style = flags.codify(phrase.style)
if phrase.nested:
phrase.string = self.stringify(phrase.string,
phrase.nested,
phrase)
# After a nested phrase is over, we reset the style to the
# parent style, this gives the notion of nested styles.
reset = parent.style if parent else ""
# \033[ signifies the start of a command-line escape-sequence
beauty += "\033[{0}m{1}\033[0;{2}m".format(phrase.style,
phrase,
reset)
last_tag = phrase.closing + 1
beauty += string[last_tag:]
return beauty
def raise_not_enough_arguments(self, string):
"""
Raises an errors.ArgumentError if not enough arguments were supplied.
Takes care of formatting for detailed error messages.
Arguments:
string (str): The string of the phrase for which there weren't enough
arguments.
Raises:
errors.ArgumentError with a detailed error message.
"""
requested = errors.number(self.counter + 1)
number = len(self.positional)
verb = "was" if number == 1 else "were"
what = "Requested {} formatting argument for "\
"'{}' but only {} {} supplied!"
what = what.format(requested, string, number, verb)
raise errors.ArgumentError(what)
|
|
# Copyright 2020 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Expansion of realms_config.Realm into a flat form."""
import collections
from components.auth.proto import realms_pb2
from components.config import validation as cfg_validation
from proto import realms_config_pb2
from realms import common
from realms import permissions
from realms import validation
def expand_realms(db, project_id, realms_cfg):
"""Expands realms_config_pb2.RealmsCfg into a flat realms_pb2.Realms.
The returned realms_pb2.Realms contains realms and permissions of a single
project only. Permissions not mentioned in the project's realms are omitted.
All realms_pb2.Permission messages have names only (no metadata). api_version
field is omitted.
All such realms_pb2.Realms messages across all projects (plus a list of all
defined permissions with all their metadata) are later merged together into
a final universal realms_pb2.Realms by realms.merge(...) in
components/auth/replication.py.
Args:
db: a permissions.DB instance with current permissions and roles.
project_id: ID of a LUCI project to use as a prefix in realm names.
realms_cfg: an instance of realms_config_pb2.RealmsCfg to expand.
Returns:
realms_pb2.Realms with expanded realms (with caveats mentioned above).
Raises:
ValueError if the validation fails.
"""
# `internal` is True when expanding internal realms (defined in a service
# config file). Such realms can use internal roles and permissions and they
# do not have implicit root bindings (since they are not associated with
# any "project:<X>" identity used in implicit root bindings).
internal = project_id == common.INTERNAL_PROJECT
# The server code could have changed since the config passed the validation
# and realms_cfg may not be valid anymore. Verify it still is. The code below
# depends crucially on the validity of realms_cfg.
validation.Validator(
cfg_validation.Context.raise_on_error(), db, internal,
).validate(realms_cfg)
# Make sure @root realm exist and append implicit bindings to it. We need to
# do it before enumerating conditions below to actually instantiate all
# Condition objects that we'll need to visit (some of them may come from
# implicit bindings). Pre-instantiating them is important because we rely
# on their uniqne and stable id(...) for faster hash map lookups.
realms_map = to_realms_map(
realms_cfg,
db.implicit_root_bindings(project_id) if not internal else [])
# We'll need to visit realms in sorted order twice. Sort once and remember.
realms_list = sorted(realms_map.items())
# Prepopulate `conds_set` with all conditions mentioned in all bindings to
# normalize, dedup and map them to integers. Integers are faster to work
# with and we'll need them for the final proto message.
conds_set = ConditionsSet()
for _, realm in realms_list:
for binding in realm.bindings:
for cond in binding.conditions:
conds_set.add_condition(cond)
all_conditions = conds_set.finalize()
# A lazily populated {role -> tuple of permissions} mapping.
roles_expander = RolesExpander(db.roles, realms_cfg.custom_roles)
# A helper to traverse the realms graph.
realms_expander = RealmsExpander(roles_expander, conds_set, realms_map)
# Visit all realms and build preliminary bindings as pairs of
# (a tuple with permission indexes, a list of principals who have them). The
# bindings are preliminary since we don't know final permission indexes yet
# and instead use some internal indexes as generated by RolesExpander. We
# need to finish this first pass to gather the list of ALL used permissions,
# so we can calculate final indexes. This is done inside of `roles_expander`.
realms = [] # [(name, {(permissions tuple, conditions tuple) => [principal]}]
for name, _ in realms_list:
# Build a mapping from a principal+conditions to the permissions set.
#
# Each map entry `(principal, tuple(conds)) => set(perms)` means `principal`
# is granted the given set of permissions if all given conditions allow it.
#
# This step essentially deduplicates permission bindings that result from
# expanding realms and roles inheritance chains.
principal_to_perms = collections.defaultdict(set)
for principal, perms, conds in realms_expander.per_principal_bindings(name):
principal_to_perms[(principal, conds)].update(perms)
# Combine entries with the same set of permissions+conditions into one.
#
# Each map entry `(tuple(perms), tuple(conds)) => list(principal)` means
# all `principals` are granted all given permissions if all given conditions
# allow it.
#
# This step merges principal sets of identical bindings to have a more
# compact final representation.
perms_to_principals = collections.defaultdict(list)
for (principal, conds), perms in principal_to_perms.items():
perms_norm = tuple(sorted(perms))
perms_to_principals[(perms_norm, conds)].append(principal)
# perms_to_principals is essentially a set of all binding in a realm.
realms.append((name, perms_to_principals))
# We now know all permissions ever used by all realms. Convert them into the
# form suitable for realm_pb2 by sorting alphabetically. Keep the mapping
# between old and new indexes, to be able to change indexes in permission
# tuples we stored in `realms`.
perms, index_map = roles_expander.sorted_permissions()
# Build the final sorted form of all realms by relabeling permissions
# according to the index_map and by sorting stuff.
return realms_pb2.Realms(
permissions=[realms_pb2.Permission(name=p) for p in perms],
conditions=all_conditions,
realms=[
realms_pb2.Realm(
name='%s:%s' % (project_id, name),
bindings=to_normalized_bindings(perms_to_principals, index_map),
data=realms_expander.realm_data(name),
)
for name, perms_to_principals in realms
])
class RolesExpander(object):
"""Keeps track of permissions and `role => [permission]` expansions.
Permissions are represented internally as integers to speed up set operations.
The mapping from a permission to a corresponding integer is lazily built and
should be considered arbitrary (it depends on the order of method calls). But
it doesn't matter since in the end we relabel all permissions according to
their indexes in the final sorted list of permissions.
Should be used only with validated realms_config_pb2.RealmsCfg, may cause
stack overflow or raise random exceptions otherwise.
"""
def __init__(self, builtin_roles, custom_roles):
self._builtin_roles = builtin_roles
self._custom_roles = {r.name: r for r in custom_roles}
self._permissions = {} # permission name => its index
self._roles = {} # role name => set indexes of permissions
def _perm_index(self, name):
"""Returns an internal index that represents the given permission string."""
idx = self._permissions.get(name)
if idx is None:
idx = len(self._permissions)
self._permissions[name] = idx
return idx
def _perm_indexes(self, iterable):
"""Yields indexes of given permission strings."""
return (self._perm_index(p) for p in iterable)
def role(self, role):
"""Returns an unsorted tuple of indexes of permissions of the role."""
perms = self._roles.get(role)
if perms is not None:
return perms
if role.startswith(permissions.BUILTIN_ROLE_PREFIX):
perms = self._perm_indexes(self._builtin_roles[role].permissions)
elif role.startswith(permissions.CUSTOM_ROLE_PREFIX):
custom_role = self._custom_roles[role]
perms = set(self._perm_indexes(custom_role.permissions))
for parent in custom_role.extends:
perms.update(self.role(parent))
else:
raise AssertionError('Impossible role %s' % (role,))
perms = tuple(perms)
self._roles[role] = perms
return perms
def sorted_permissions(self):
"""Returns a sorted list of permission and a old->new index mapping list.
See to_normalized_bindings below for how it is used.
"""
perms = sorted(self._permissions)
mapping = [None]*len(perms)
for new_idx, p in enumerate(perms):
old_idx = self._permissions[p]
mapping[old_idx] = new_idx
assert all(v is not None for v in mapping), mapping
return perms, mapping
class ConditionsSet(object):
"""Normalizes and dedups conditions, maps them to integers.
Assumes all incoming realms_config_pb2.Condition are immutable and dedups
them by *identity* (using id(...) function), as well as by normalized values.
Also assumes the set of all possible *objects* ever passed to indexes(...) was
also passed to add_condition(...) first (so it could build id => index map).
This makes hot indexes(...) function fast by allowing to lookup ids instead
of (potentially huge) protobuf message values.
"""
def __init__(self):
# A mapping from a serialized normalized realms_pb2.Condition to a pair
# (normalized realms_pb2.Condition, its unique index).
self._normalized = {}
# A mapping from id(realms_config_pb2.Condition) to its matching index.
self._mapping = {}
# A list of all different objects ever passed to add_condition, to retain
# pointers to them to make sure their id(...)s are not reallocated by Python
# to point to other objects.
self._retain = []
# True if finalize() was already called.
self._finalized = False
def add_condition(self, cond):
"""Adds realms_config_pb2.Condition to the set if not already there."""
assert not self._finalized
assert isinstance(cond, realms_config_pb2.Condition), cond
# Check if we already processed this exact object before.
if id(cond) in self._mapping:
return
# Normalize realms_config_pb2.Condition into a realms_pb2.Condition.
norm = realms_pb2.Condition()
if cond.HasField('restrict'):
norm.restrict.attribute = cond.restrict.attribute
norm.restrict.values.extend(sorted(set(cond.restrict.values)))
else:
# Note: this should not be happening, we validated all inputs already.
raise ValueError('Invalid empty condition %r' % cond)
# Get a key for the dictionary, since `norm` itself is unhashable and can't
# be used as a key.
key = norm.SerializeToString()
# Append it to the set of unique conditions if not already there.
idx = self._normalized.setdefault(key, (norm, len(self._normalized)))[1]
# Remember that we mapped this particular `cond` *object* to this index.
self._mapping[id(cond)] = idx
self._retain.append(cond)
def finalize(self):
"""Finalizes the set preventing any future add_condition(...) calls.
Sorts the list of stored conditions according to some stable order and
returns the final sorted list of realms_pb2.Condition. Indexes returned by
indexes(...) will refer to indexes in this list.
"""
assert not self._finalized
self._finalized = True
# Sort according to their binary representations. The order doesn't matter
# as long as it is reproducible.
conds = [
val for _, val in
sorted(self._normalized.items(), key=lambda (key, _): key)
]
self._normalized = None # won't need it anymore
# Here `conds` is a list of pairs (cond, its old index). We'll need
# to change self._mapping to use new indexes (matching the new order in
# `conds`). Build the remapping dict {old index => new index}.
old_to_new = {old: new for new, (_, old) in enumerate(conds)}
assert len(old_to_new) == len(conds)
# Change indexes in _mapping to use the new order.
for key, old in self._mapping.items():
self._mapping[key] = old_to_new[old]
# Return the final list of conditions in the new order.
return [cond for cond, _ in conds]
def indexes(self, conds):
"""Given a list of realms_config_pb2.Condition returns a sorted index tuple.
Can be called only after finalize(). All given conditions must have
previously been but into the set via add_condition(...). The returned tuple
can have fewer elements if some conditions in `conds` are equivalent.
The returned tuple is essentially a compact encoding of the overall AND
condition expression in a binding.
"""
assert self._finalized
# Skip function calls for two most common cases.
if not conds:
return ()
if len(conds) == 1:
return (self._mapping[id(conds[0])],)
return tuple(sorted(set(self._mapping[id(cond)] for cond in conds)))
class RealmsExpander(object):
"""Helper to traverse the realm inheritance graph."""
def __init__(self, roles, conds_set, realms_map):
self._roles = roles
self._conds_set = conds_set
self._realms = realms_map # name -> realms_config_pb2.Realm
self._data = {} # name -> realms_pb2.RealmData, memoized
@staticmethod
def _parents(realm):
"""Given a realms_config_pb2.Realm yields names of immediate parents."""
if realm.name == common.ROOT_REALM:
return
yield common.ROOT_REALM
for name in realm.extends:
if name != common.ROOT_REALM:
yield name
def per_principal_bindings(self, realm):
"""Yields tuples (a single principal, permissions tuple, conditions tuple).
Visits all bindings in the realm and its parent realms. Returns a lot of
duplicates. It's the caller's job to skip them.
"""
r = self._realms[realm]
assert r.name == realm
for b in r.bindings:
perms = self._roles.role(b.role) # the tuple of permissions of the role
conds = self._conds_set.indexes(b.conditions) # the tuple with conditions
for principal in b.principals:
yield principal, perms, conds
for parent in self._parents(r):
for principal, perms, conds in self.per_principal_bindings(parent):
yield principal, perms, conds
def realm_data(self, name):
"""Returns calculated realms_pb2.RealmData for a realm."""
if name not in self._data:
realm = self._realms[name]
extends = [self.realm_data(p) for p in self._parents(realm)]
self._data[name] = derive_realm_data(realm, [x for x in extends if x])
return self._data[name]
def to_realms_map(realms_cfg, implicit_root_bindings):
"""Returns a map {realm name => realms_config_pb2.Realm}.
Makes sure the @root realm is defined there, adding it if necessary.
Appends the given list of bindings to the root realm.
Args:
realms_cfg: the original realms_config_pb2.Realms message.
implicit_root_bindings: a list of realms_config_pb2.Binding to add to @root.
"""
realms = {r.name: r for r in realms_cfg.realms}
root = realms_config_pb2.Realm(name=common.ROOT_REALM)
if common.ROOT_REALM in realms:
root.CopyFrom(realms[common.ROOT_REALM])
root.bindings.extend(implicit_root_bindings)
realms[common.ROOT_REALM] = root
return realms
def to_normalized_bindings(perms_to_principals, index_map):
"""Produces a sorted list of realms_pb2.Binding.
Bindings are given as a map from (permission tuple, conditions tuple) to
a list of principals that should have all given permission if all given
conditions allow.
Permissions are specified through their internal indexes as produced by
RolesExpander. We convert them into "public" ones (the ones that correspond
to the sorted permissions list in the realms_pb2.Realms proto). The mapping
from an old to a new index is given by `new = index_map[old]`.
Conditions are specified as indexes in ConditionsSet, we use them as they are,
since by construction of ConditionsSet, all conditions are in use and we don't
need any extra filtering (and consequently index remapping to skip gaps) as we
do for permissions.
Args:
perms_to_principals: {(permissions tuple, conditions tuple) => [principal]}.
index_map: defines how to remap permission indexes (old -> new).
Returns:
A sorted list of realm_pb2.Binding.
"""
normalized = (
(sorted(index_map[idx] for idx in perms), conds, sorted(principals))
for (perms, conds), principals in perms_to_principals.items()
)
return [
realms_pb2.Binding(
permissions=perms,
principals=principals,
conditions=conds)
for perms, conds, principals in sorted(normalized)
]
def derive_realm_data(realm, extends):
"""Calculates realms_pb2.RealmData from the realm config and parent data.
Args:
realm: realms_config_pb2.Realm to calculate the data for.
extends: a list of realms_pb2.RealmData it extends from.
Returns:
realms_pb2.RealmData or None if empty.
"""
enforce_in_service = set(realm.enforce_in_service)
for d in extends:
enforce_in_service.update(d.enforce_in_service)
if not enforce_in_service:
return None
return realms_pb2.RealmData(enforce_in_service=sorted(enforce_in_service))
|
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
def _sigmoid_grad(x, y, gy):
return chainer.functions.activation.sigmoid.SigmoidGrad((x,)).apply(
(y, gy))[0]
class NegativeSamplingFunction(function_node.FunctionNode):
ignore_label = -1
samples = None
def __init__(self, sampler, sample_size, reduce='sum'):
if reduce not in ('sum', 'no'):
raise ValueError(
"only 'sum' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
self.sampler = sampler
self.sample_size = sample_size
self.reduce = reduce
self.wx = None
def _make_samples(self, t):
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler((size, self.sample_size + 1))
samples[:, 0] = t
return samples
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't', 'W'))
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == x_type.dtype,
w_type.ndim == 2,
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
samples = self._make_samples(t)
w = W[samples]
wx = numpy.einsum(
'ij,ikj->ik', x[self.ignore_mask], w[self.ignore_mask])
wx[:, 0] *= -1
loss = numpy.zeros(len(x), x.dtype)
loss[self.ignore_mask] = numpy.sum(numpy.logaddexp(wx, 0), axis=1)
if self.reduce == 'sum':
loss = numpy.array(loss.sum(), x.dtype)
self.samples = samples
return loss,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
samples = self._make_samples(t)
n_in = x.shape[1]
self.wx = cuda.elementwise(
'raw T W, raw T x, bool mask, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
if (mask == 1) {
for (int j = 0; j < c; ++j) {
int x_ind[] = {(i / m), j};
int w_ind[] = {k, j};
f += x[x_ind] * W[w_ind];
}
}
wx = f;
''',
'negative_sampling_wx'
)(W, x, self.ignore_mask[:, None], samples, n_in,
self.sample_size + 1)
loss = cuda.elementwise(
'T wx, int32 c, int32 m, bool mask', 'T y',
'''
if (mask) {
T f = wx;
if (i % m == 0) {
f = -f;
}
if (f < 0) {
y = __logf(1 + __expf(f));
} else {
y = f + __logf(1 + __expf(-f));
}
} else {
y = 0;
}
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1, self.ignore_mask[:, None])
if self.reduce == 'sum':
loss = loss.sum()
else: # 'no':
loss = loss.sum(axis=1)
self.samples = samples
return loss,
def backward(self, indexes, grad_outputs):
x, t, W = self.get_retained_inputs()
gy, = grad_outputs
return NegativeSamplingFunctionGrad(
self.reduce, self.ignore_mask, self.sample_size, self.samples,
self.wx).apply((x, W, gy))
class NegativeSamplingFunctionGrad(function_node.FunctionNode):
def __init__(self, reduce, ignore_mask, sample_size, samples, wx):
self.reduce = reduce
self.ignore_mask = ignore_mask
self.sample_size = sample_size
self.samples = samples
self.wx = wx
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, W, gloss = inputs
gx = numpy.zeros_like(x)
gW = numpy.zeros_like(W)
for i in numpy.arange(len(self.ignore_mask))[self.ignore_mask]:
ix = x[i]
k = self.samples[i]
if self.reduce == 'sum':
igy = gloss
else:
igy = gloss[i]
w = W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = igy / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
gW[ik] += ig * ix
return gx, None, gW
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, W, gy = inputs
if self.reduce == 'no':
gy = gy[:, None]
wx = self.wx.astype(x.dtype, copy=False)
g = cuda.elementwise(
'T wx, T gy, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gy / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(wx, gy, self.sample_size + 1)
cupy = cuda.cupy
gx = cupy.zeros_like(x)
n_in = x.shape[1]
cuda.elementwise(
'raw T g, raw T W, bool mask, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
if (mask == 1){
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, W, self.ignore_mask[:, None], self.samples, n_in,
self.sample_size + 1, gx)
gW = cupy.zeros_like(W)
cuda.elementwise(
'T g, raw T x, S k, bool mask, int32 c, int32 m',
'raw T gW',
'''
T gi = g;
if (mask == 1) {
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, self.ignore_mask[:, None], n_in,
self.sample_size + 1, gW)
return gx, None, gW
def backward(self, indexes, grad_outputs):
x, W, gy = self.get_retained_inputs()
xp = backend.get_array_module(x.data)
if 0 in indexes:
gx = chainer.Variable(xp.zeros_like(x.data))
if 1 in indexes:
gW = chainer.Variable(xp.zeros_like(W.data))
if 2 in indexes:
ggy = chainer.Variable(xp.zeros_like(gy.data))
ggx, _, ggW = grad_outputs
pos_neg_mask = xp.ones(self.sample_size + 1)
pos_neg_mask[0] *= -1
for i in xp.arange(len(self.ignore_mask))[self.ignore_mask]:
# Partial forward pass to obtain intermediate `Variable`s
ix = x[i]
k = self.samples[i]
if self.reduce == 'sum':
igy = gy
else:
igy = gy[i]
w = W[k]
f = chainer.functions.flatten(
chainer.functions.matmul(w, ix[:, None])) * pos_neg_mask
sigf = chainer.functions.sigmoid(f)
g = chainer.functions.broadcast_to(igy, f.shape) * sigf \
* pos_neg_mask
dgW_dg = chainer.functions.flatten(
chainer.functions.matmul(ggW[k], ix[:, None])) * pos_neg_mask
dgW_df = chainer.functions.broadcast_to(igy, f.shape) \
* _sigmoid_grad(f, sigf, dgW_dg) * pos_neg_mask
dgx_dg = chainer.functions.flatten(
chainer.functions.matmul(ggx[i][None, :], w, transb=True))
dgx_df = chainer.functions.broadcast_to(igy, f.shape) \
* _sigmoid_grad(f, sigf, dgx_dg)
if 0 in indexes:
# deriative of gx
dgx = chainer.functions.matmul(w, dgx_df[:, None], transa=True)
# derivative of gW
dgx += chainer.functions.matmul(g[None, :], ggW[k]).T
dgx += chainer.functions.matmul(
w, dgW_df[:, None], transa=True)
gx = chainer.functions.scatter_add(
gx, i, chainer.functions.flatten(dgx))
if 1 in indexes:
# deriative of gx
shape = ggx[i].shape
for ik, ig, idgx_df in six.moves.zip(k, g, dgx_df):
ig = chainer.functions.broadcast_to(ig, shape)
idgx_df = chainer.functions.broadcast_to(idgx_df, shape)
gW = chainer.functions.scatter_add(
gW, ik, ig * ggx[i] + idgx_df * ix)
# derivative of gW
gW = chainer.functions.scatter_add(
gW, k,
chainer.functions.matmul(dgW_df[:, None], ix[None, :]))
if 2 in indexes:
dgx_dg *= pos_neg_mask
dggy = chainer.functions.sum((dgx_dg + dgW_dg) * sigf)
if self.reduce == 'sum':
ggy += dggy
else:
ggy = chainer.functions.scatter_add(ggy, i, dggy)
ret = []
if 0 in indexes:
ret.append(gx)
if 1 in indexes:
ret.append(gW)
if 2 in indexes:
ret.append(ggy)
return ret
def negative_sampling(x, t, W, sampler, sample_size, reduce='sum', **kwargs):
"""negative_sampling(x, t, W, sampler, sample_size, reduce='sum', *, return_samples=False)
Negative sampling loss function.
In natural language processing, especially language modeling, the number of
words in a vocabulary can be very large.
Therefore, you need to spend a lot of time calculating the gradient of the
embedding matrix.
By using the negative sampling trick you only need to calculate the
gradient for a few sampled negative examples.
The loss is defined as follows.
.. math::
f(x, p) = - \\log \\sigma(x^\\top w_p) - \\
k E_{i \\sim P(i)}[\\log \\sigma(- x^\\top w_i)]
where :math:`\\sigma(\\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximated with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`.
.. math::
f(x, p) \\approx - \\log \\sigma(x^\\top w_p) - \\
\\sum_{n \\in N} \\log \\sigma(-x^\\top w_n)
Each sample of :math:`N` is drawn from the word distribution
:math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where :math:`c(w)` is the
unigram count of the word :math:`w`, :math:`\\alpha` is a hyper-parameter,
and :math:`Z` is the normalization constant.
Args:
x (~chainer.Variable): Batch of input vectors.
t (~chainer.Variable): Vector of ground truth labels.
W (~chainer.Variable): Weight matrix.
sampler (~types.FunctionType): Sampling function. It takes a shape and
returns an integer array of the shape. Each element of this array
is a sample from the word distribution.
A :class:`~chainer.utils.WalkerAlias` object built with the power
distribution of word frequency is recommended.
sample_size (int): Number of samples.
reduce (str): Reduction option. Its value must be either
``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
return_samples (bool):
If ``True``, the sample array is also returned.
The sample array is a
:math:`(\\text{batch_size}, \\text{sample_size} + 1)`-array of
integers whose first column is fixed to the ground truth labels
and the other columns are drawn from the ``sampler``.
Returns:
~chainer.Variable or tuple:
If ``return_samples`` is ``False`` (default), the output
variable holding the loss value(s) calculated by the
above equation is returned. Otherwise, a tuple of the output
variable and the sample array is returned.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum'``, the output variable holds a scalar value.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <https://arxiv.org/abs/1310.4546>`_
.. seealso:: :class:`~chainer.links.NegativeSampling`.
""" # NOQA
return_samples = False
if kwargs:
return_samples, = argument.parse_kwargs(
kwargs, ('return_samples', return_samples))
func = NegativeSamplingFunction(sampler, sample_size, reduce)
out = func.apply((x, t, W))[0]
if return_samples:
return out, func.samples
return out
|
|
#!/usr/bin/env python
"""
VMat Class
@author: Alicia Schep, Greenleaf Lab, Stanford University
"""
#Import necessary python modules
from scipy import signal, ndimage
import numpy as np
from copy import copy
import matplotlib.pyplot as plt
class VMat_Error(Exception):
"""Class for errors in VMat function"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class VMat:
"""Class for storing and processing V-plot matrix"""
def __init__(self, mat, lower, upper):
"""
Assumes Vplot is centered!
Inputs:
mat = matrix (as numpy array)
lower = lower bound of insert sizes represented by mat
upper = upper bound of insert sizes represented by mat
"""
if mat.shape[0]!=upper-lower:
raise VMat_Error("mat shape is not consistent with insert limits")
self.mat = mat
self.upper = upper
self.lower = lower
self.w = mat.shape[1]/2
def trim(self,lower,upper,w):
"""reduce the size of the vplot
lower is new lower bound
upper is new upper bound
w is new flanking region around center
"""
up = upper-self.lower
dn = lower-self.lower
left = self.w - w
right = self.w + w + 1
if up > self.mat.shape[0] or dn < 0 or left < 0 or right > self.mat.shape[1]:
raise VMat_Error("Mat is smaller than desired trim")
self.mat = self.mat[dn:up,left:right]
self.lower = lower
self.upper = upper
self.w = w
def symmetrize(self):
"""Force the V-plot to be symmetric"""
for j in range(self.lower,self.upper):
i=j-self.lower
if j%2==1:
lefthalf = (self.mat[i,:(self.w+1)]+self.mat[i,self.w:][::-1])*0.5
self.mat[i,:] = np.hstack((lefthalf,lefthalf[:-1][::-1]))
else:
righthalf = (self.mat[i,(self.w):-1]+self.mat[i,:self.w][::-1])*0.5
self.mat[i,:] = np.hstack((righthalf[::-1],righthalf,self.mat[i,-1]))
def flip(self, mode = 'same'):
"""Flip V-plot"""
if mode == 'same':
new = np.zeros(self.mat.shape)
for j in range(self.lower,self.upper):
i = j - self.lower
if j%2==1:
new[i,:] = self.mat[i,][::-1]
else:
new[i,:-1] = self.mat[i,:-1][::-1]
#for -1 postion don't actually have values
new[i,-1] = np.mean([self.mat[i,-1],self.mat[i,1]])
self.mat = new
elif mode == 'valid':
new = np.zeros((self.mat.shape[0],self.mat.shape[1]-2))
for j in range(self.lower,self.upper):
i = j - self.lower
if j%2==1:
new[i,:] = self.mat[i,1:-1][::-1]
else:
new[i,:] = self.mat[i,:-1][::-1][1:]
self.mat = new
self.w += -1
else:
raise Exception("Mode must be one of 'same' or 'valid'")
def smooth(self, sd = 1):
"""smooth v-plot using gaussian kernel"""
self.mat = ndimage.filters.gaussian_filter(self.mat,sd,
mode='constant')
def smooth1d(self, sd = 1, axis = 1):
"""smooth v-plot along one axis only"""
self.mat = ndimage.filters.gaussian_filter1d(self.mat,sd,axis,
mode='nearest')
def norm(self):
"""normalize v matrix so that signal minus even background will be 1 divided by base pairs in window"""
tmp1 = self.mat / np.sum(self.mat)
tmp2 = np.ones(self.mat.shape) * (1.0 / self.mat.size)
self.mat = self.mat / (np.sum(self.mat * tmp1)- np.sum(self.mat * tmp2))
self.mat = (self.mat / self.mat.shape[1]) * 10.0
def norm_y(self,dist):
"""normalize vplot so insertsize matches supplied distribution"""
for i in range(self.mat.shape[0]):
self.mat[i] = self.mat[i] * (dist.get(size = i + self.lower)/ np.sum(self.mat[i]))
def converto1d(self):
"""convert the 2d matrix to a 1d representation of insertions"""
self.one_d = np.zeros(self.upper + self.upper%2 +2*self.w+1)
center = self.upper/2 + self.w
for j in range(self.mat.shape[0]):
for i in range(self.mat.shape[1]):
ilen=j+self.lower
val = copy(self.mat[j,i])
if ilen%2==0:
self.one_d[center-(self.w-i)-(ilen/2)]+= val
self.one_d[center-(self.w-i)+(ilen/2)]+= val
else:
self.one_d[center-(self.w-i)-(ilen/2)]+= val * 0.5
self.one_d[center-(self.w-i)+(ilen/2)]+= val * 0.5
self.one_d[center-(self.w-i)-(ilen/2+1)]+= val * 0.5
self.one_d[center-(self.w-i)+(ilen/2+1)]+= val * 0.5
self.one_d = self.one_d / sum(self.one_d)
def plot(self, mat=None, title=None, filename=None):
"""Plot current main matrix or specified matrix (of same dimensions)"""
if mat is None:
mat=self.mat
elif mat.shape!=(self.upper-self.lower,self.w*2+1):
raise VMat_Error("dimensions of input mat should match \
dim of vmat")
fig = plt.figure()
plt.imshow(mat,origin="lower",interpolation='nearest',
extent=[-self.w,self.w,self.lower,self.upper-1])
plt.xlabel("Position relative to dyad")
plt.ylabel("Insert size")
if title:
plt.title(title)
plt.colorbar(shrink=0.8)
if filename:
fig.savefig(filename)
plt.close(fig)
else:
fig.show()
def plot_1d(self,filename=None):
"""plot the 1d insertion representation of the matrix"""
fig = plt.figure()
xlim = len(self.one_d)/2
plt.plot(range(-xlim,xlim+1),self.one_d)
plt.vlines(-73,0,max(self.one_d)*1.1,linestyles='dashed')
plt.vlines(73,0,max(self.one_d)*1.1,linestyles='dashed')
plt.xlabel("Position relative to dyad")
plt.ylabel("Insertion Frequency")
if filename:
fig.savefig(filename)
plt.close(fig)
#Also save text output!
filename2 = ".".join(filename.split(".")[:-1]+['txt'])
np.savetxt(filename2,self.one_d,delimiter="\t")
else:
fig.show()
def plot_insertsize(self,filename=None):
"""plot the insert size disribution in the main matrix"""
fig = plt.figure()
ins = np.sum(self.mat,axis=1)
ins = ins/sum(ins)
plt.plot(range(self.lower,self.upper),ins)
plt.xlabel("Insert Size")
plt.ylabel("Frequency")
if filename:
fig.savefig(filename)
plt.close(fig)
#Also save text output!
filename2 = ".".join(filename.split(".")[:-1]+['txt'])
np.savetxt(filename2,ins,delimiter="\t")
else:
fig.show()
def save(self,filename):
"""write text output description of VMat object attributes"""
out=open(filename,'w')
out.write('#VMat Descriptor File\n')
out.write('#Contains VMat and pertinent information\n')
out.write('#lower\n')
out.write(str(self.lower)+'\n')
out.write('#upper\n')
out.write(str(self.upper)+'\n')
out.write('#mat\n')
for row in self.mat:
out.write("\t".join(map(str,row))+'\n')
out.close()
@staticmethod
def open(filename):
"""Create VMat object from text descriptor file"""
infile = open(filename,'r')
state = ''
mat = []
for line in infile:
if '#lower' in line:
state = 'lower'
elif '#upper' in line:
state = 'upper'
elif '#mat' in line:
state = 'mat'
elif '#' in line:
state = 'other'
elif state == 'lower':
lower = int(line.strip('\n'))
elif state == 'upper':
upper = int(line.strip('\n'))
elif state == 'mat':
mat.append(map(float,line.strip('\n').split('\t')))
try:
new = VMat(np.array(mat), lower, upper)
except NameError:
raise VMat_Error("VMat decriptor file appeas to be missing some\
needed components")
infile.close()
return new
|
|
# -*- coding: utf-8 -*-
import os
import sys
import heapq
from random import random, randint
import tcod
from .keys import wait_for_user_input, handle_game_user_input
from ..logger import get_logger
from .windows import create_windows
from .rendering import render_all
from ..algorithms.pathing import create_dijkstra_map, get_neighbors
logger = get_logger(__name__)
def inventory_menu(game_state):
# Setup Menu
while not tcod.console_is_window_closed():
# show the background image, at twice the regular console resolution
# tcod.image_blit_2x(img, 0, 0, 0)
header = 'Inventory'
player_inventory_list = game_state.get('player-inventory', [])
player_inventory = []
for item in player_inventory_list:
item_text = item['display']['text']
player_inventory.append(item_text)
options = {
chr(index + ord('a')): item
for index, item in enumerate(player_inventory)
}
choice, inventory_window = menu(header, options, 35, game_state, length=25)
item = None
if choice:
item = options.get(choice, None)
if item:
pass
if choice in ['escape']:
break
return choice, inventory_window
def main_menu(game_state):
game_state = create_windows(game_state)
# Get game state constants
screen_width = game_state['screen-width']
screen_height = game_state['screen-height']
half_width = screen_width // 2
half_height = screen_height // 2
package_path = game_state['package-path']
root_window = game_state['windows']['root']
menu_title = 'LOSE: Land of Software Engineering'
sub_title = 'You can be a LOSEr too!'
footer = 'by Bix'
alpha = tcod.BKGND_SCREEN
justification = tcod.CENTER
# Setup Font
font_filename = 'game-font.png'
font_path = os.path.join(package_path, 'data', 'assets', font_filename)
font_flags = tcod.FONT_LAYOUT_ASCII_INROW
tcod.console_set_custom_font(font_path, flags=font_flags, nb_char_horiz=0, nb_char_vertic=0)
# Setup Menu
img_filename = 'menu-background.png'
img_filepath = os.path.join(package_path, 'data', 'assets', img_filename)
img = tcod.image_load(img_filepath)
while not tcod.console_is_window_closed():
# show the background image, at twice the regular console resolution
tcod.image_blit_2x(img, 0, 0, 0)
# show the game's title, and some credits!
tcod.console_set_default_foreground(root_window, tcod.light_yellow)
tcod.console_set_default_background(root_window, tcod.black)
tcod.console_print_ex(root_window, half_width, half_height - 19, alpha, justification, menu_title)
tcod.console_print_ex(root_window, half_width, half_height - 17, alpha, justification, sub_title)
tcod.console_print_ex(root_window, half_width, screen_height - 2, alpha, justification, footer)
# show options and wait for the player's choice
options = {
'p': 'Play a new game',
'o': 'Options',
'x': 'Exit',
}
choice, main_menu_window = menu('Main Menu\n', options, 24, game_state)
if choice == 'p': # new game
play_game(game_state)
continue
elif choice == 'o': # options
game_options = {
'K': 'Keyboard Bindings',
'F': 'Font',
'x': 'Exit',
}
while True:
game_option_choice, menu_window = menu('Game Options', game_options, 24, game_state, window=main_menu_window)
if game_option_choice in ['x', None]: # quit
break
continue
elif choice in ['x', 'escape', None]: # quit
break
def menu(header, options, width, game_state, footer=None, length=None, window=None):
if len(options) > 26:
raise ValueError('Cannot have a menu with more than 26 options.')
empty = (length - len(options)) if length else 0
if empty < 0:
empty = 0
con = game_state['windows']['console']
screen_height = game_state['screen-height']
screen_width = game_state['screen-width']
# calculate total height for the header (after auto-wrap) and one line per option
header_height = tcod.console_get_height_rect(con, 0, 0, width, screen_height, header)
if header == '':
header_height = 0
height = len(options) + header_height + (empty if length else 0)
# create an off-screen console that represents the menu's window
window = window or tcod.console_new(width, height)
# print the header, with auto-wrap
tcod.console_set_default_foreground(window, tcod.green)
tcod.console_print_rect_ex(window, 0, 0, width, height, tcod.BKGND_NONE, tcod.LEFT, header)
# print all the options
y = header_height
opt_index = 0
if length:
for each_line in range(empty):
tcod.console_print_ex(window, 0, y + opt_index + each_line + 1, tcod.BKGND_NONE, tcod.LEFT, '')
for opt_index, opt in enumerate(options.items()):
option_key, option_text = opt
text = f'{option_key}: {option_text}'
tcod.console_print_ex(window, 0, y + opt_index, tcod.BKGND_NONE, tcod.LEFT, text)
# blit the contents of "window" to the root console
x = int(screen_width / 2 - width / 2)
y = int(screen_height / 2 - height / 2)
tcod.console_blit(window, 0, 0, width, height, 0, x, y, 1.0, 0.7)
# present the root console to the player and wait for a key-press
tcod.console_flush()
key = wait_for_user_input()
wait_for_user_input() # for some reason there's a double input
if key:
logger.trace(key)
return key, window
def msg_box(text, con, width=50):
menu(header=text, options={}, width=width, con=con) # use menu() as a sort of "message box"
def play_game(game_state):
while not tcod.console_is_window_closed():
# render the screen
setup_round(game_state)
render_all(game_state)
tcod.console_flush()
user_key = handle_game_user_input(game_state)
update_mob_positions(game_state)
mob_combat(game_state)
update_dijkstra_maps(game_state)
# handle inventory
if user_key in ['i', 'I']:
item_selected, inventory_window = inventory_menu(game_state)
tcod.console_set_default_foreground(0, tcod.white)
tcod.console_set_default_background(0, tcod.black)
# handle debug maps
elif game_state['debug'] and user_key in ['shift+meta+d', 'shift+meta+D']:
pass
# render_debug_window(game_state)
# # erase all objects at their old locations, before they move
# for object in objects:
# object.clear()
# # handle keys and exit game if needed
# player_action = handle_keys()
# if player_action == 'exit':
# save_game()
# break
# # let monsters take their turn
# if game_state == 'playing' and player_action != 'didnt-take-turn':
# for object in objects:
# if object.ai:
# object.ai.take_turn()
def update_dijkstra_maps(game_state, force=False):
character_position = game_state.get('character-position')
level_map = game_state.get('current-level')
updates = game_state['round-updates']
movement = updates.get('character-movement')
if movement or force:
new_map = create_dijkstra_map(level_map, character_position)
game_state.setdefault('dijkstra-maps', {})['character'] = new_map
def setup_round(game_state):
action = game_state.get('character-action') or game_state.get('round-updates', {}).get('character-movement')
game_state.pop('character-action', None)
game_state.pop('character-movement', None)
game_state['round-updates'] = {}
if game_state['current-round'] == 0:
update_dijkstra_maps(game_state, force=True)
if action:
game_state['current-round'] += 1
if game_state['current-round'] % 16 == 0:
player_health = game_state.get('player-health') or 10
if player_health < 10:
player_health += 1
logger.trace('Player health is: {player_health}')
game_state['player-health'] = player_health
def update_mob_positions(game_state, cost_threshold=30):
updates = game_state['round-updates']
action = game_state.get('character-action') or updates.get('character-movement')
character_position = game_state.get('character-position')
if not action:
return
character_map = game_state['dijkstra-maps']['character']
level_map = game_state['current-level']
tiles = game_state['tiles']
cost_data = []
heapq.heapify(cost_data)
for position, cost in character_map.items():
data = cost, position
heapq.heappush(cost_data, (data))
cost, position = heapq.heappop(cost_data)
while cost <= cost_threshold:
tile = level_map[position]
mobs = tile.get('mobs', [])
for mob in mobs:
neighbor_costs = []
for neighbor in get_neighbors(position, include_diagonals=True):
neighbor_tile = level_map[neighbor]
neighbor_cost = character_map[neighbor]
base_tile = tiles[neighbor_tile['name']]
blocks_movement = False
has_entity = False
if neighbor == character_position:
has_entity = True
elif neighbor_tile.get('mobs'):
has_entity = True
if neighbor_tile.get('blocking', {}).get('movement', {}).get('rate') == 100:
blocks_movement = True
elif base_tile.get('blocking', {}).get('movement', {}).get('rate') == 100:
blocks_movement = True
if not blocks_movement and not has_entity:
neighbor_costs.append((neighbor_cost, neighbor))
neighbor_cost, updated_position = min(neighbor_costs)
mob_index = tile['mobs'].index(mob)
tile['mobs'].pop(mob_index)
if not tile['mobs']:
tile.pop('mobs')
level_map[updated_position].setdefault('mobs', []).append(mob)
cost, position = heapq.heappop(cost_data)
def mob_combat(game_state):
updates = game_state['round-updates']
action = game_state.get('character-action') or updates.get('character-movement')
character_position = game_state.get('character-position')
player_health = game_state.get('player-health') or 10
if not action:
return
level_map = game_state['current-level']
for neighbor in get_neighbors(character_position, include_diagonals=True):
neighbor_tile = level_map[neighbor]
mobs = neighbor_tile.get('mobs', [])
for mob in mobs:
mob_name = mob['display']['text']
mob_hit_chance = mob.get('hit-chance')
mob_attack = mob.get('attack')
hit = (random() >= (1 - mob_hit_chance / 100))
if hit:
damage = randint(1, mob_attack)
player_health = player_health - damage
logger.trace(f'{mob_name} hit player for {damage}. Player has {player_health}.')
if player_health <= 0:
logger.trace('Player has died.')
sys.exit()
else:
logger.trace(f'{mob_name} missed player. Player has {player_health}.')
game_state['player-health'] = player_health
|
|
# coding: utf-8
import re
import os
import shutil
import subprocess
from string import Template
from monty.io import zopen
from monty.json import MSONable
from pymatgen.core.structure import Molecule
"""
This module implements input and output processing for Fiesta (http://perso.neel.cnrs.fr/xavier.blase/fiesta/index.html).
and
-Nwchem2Fiesta class: to create the input files needed for a Fiesta run
-Fiesta_run: run gw_fiesta and bse_fiesta
-Localised Basis set reader
"""
__author__ = 'ndardenne'
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__email__ = "[email protected]"
__date__ = "24/5/15"
class Nwchem2Fiesta(MSONable):
"""
To run NWCHEM2FIESTA inside python:
If nwchem.nw is the input, nwchem.out the output, and structure.movecs the
"movecs" file, the syntax to run NWCHEM2FIESTA is: NWCHEM2FIESTA
nwchem.nw nwchem.nwout structure.movecs > log_n2f
"""
def __init__(self, folder, filename="nwchem", log_file="log_n2f"):
"""
folder: where are stored the nwchem
filename: name of nwchem files read by NWCHEM2FIESTA (filename.nw, filename.nwout and filename.movecs)
logfile: logfile of NWCHEM2FIESTA
the run method launchs NWCHEM2FIESTA
"""
self.folder = folder
self.filename = filename
self.log_file = log_file
self._NWCHEM2FIESTA_cmd = "NWCHEM2FIESTA"
self._nwcheminput_fn = filename + ".nw"
self._nwchemoutput_fn = filename + ".nwout"
self._nwchemmovecs_fn = filename + ".movecs"
def run(self):
"""
Performs actual NWCHEM2FIESTA run
"""
init_folder = os.getcwd()
os.chdir(self.folder)
with zopen(self.log_file, 'w') as fout:
subprocess.call([self._NWCHEM2FIESTA_cmd, self._nwcheminput_fn,
self._nwchemoutput_fn, self._nwchemmovecs_fn],
stdout=fout)
os.chdir(init_folder)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"filename": self.filename,
"folder": self.folder}
@classmethod
def from_dict(cls, d):
return Nwchem2Fiesta(folder=d["folder"], filename=d["filename"])
class Fiesta_run(MSONable):
"""
To run FIESTA inside python:
if grid is [x,x] then bse runs
if grid is [x,x,y] the fiesta(gw) runs
otherwise it breaks
"""
def __init__(self, folder=os.getcwd(), grid=[2, 2, 2], log_file="log"):
"""
folder:
logfile: logfile of Fiesta
"""
self.folder = folder
self.log_file = log_file
self.grid = grid
def run(self):
if len(self.grid) == 3:
self.mpi_procs = self.grid[0] * self.grid[1] * self.grid[2]
self.gw_run()
elif len(self.grid) == 2:
self.mpi_procs = self.grid[0] * self.grid[1]
self.bse_run()
else:
raise ValueError(
"Wrong grid size: must be [nrow, ncolumn, nslice] for gw of [nrow, nslice] for bse")
def gw_run(self):
"""
Performs FIESTA (gw) run
"""
if self.folder != os.getcwd():
init_folder = os.getcwd()
os.chdir(self.folder)
with zopen(self.log_file, 'w') as fout:
subprocess.call(["mpirun", "-n", str(self.mpi_procs), "fiesta",
str(self.grid[0]), str(self.grid[1]),
str(self.grid[2])], stdout=fout)
if self.folder != os.getcwd():
os.chdir(init_folder)
def bse_run(self):
"""
Performs BSE run
"""
if self.folder != os.getcwd():
init_folder = os.getcwd()
os.chdir(self.folder)
with zopen(self.log_file, 'w') as fout:
subprocess.call(
["mpirun", "-n", str(self.mpi_procs), "bse", str(self.grid[0]),
str(self.grid[1])], stdout=fout)
if self.folder != os.getcwd():
os.chdir(init_folder)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"log_file": self.log_file,
"grid": self.grid,
"folder": self.folder}
@classmethod
def from_dict(cls, d):
return Fiesta_run(folder=d["folder"], grid=d["grid"],
log_file=d['log_file'])
class Basis_set_reader:
"""
A basis set reader.
Args:
filename: Filename to read.
Basis set are stored in data as a dict:
:key l_zeta_ng for each nl orbitals which contain list of tuple (alpha, coef) for each of the ng gaussians in l_zeta orbital
"""
def __init__(self, filename):
self.filename = filename
with zopen(filename) as f:
basis_set = f.read()
self.data = self._parse_file(basis_set)
# compute the number of nlm orbitals per atom
self.data.update(n_nlmo=self.set_n_nlmo())
def _parse_file(self, input):
lmax_nnlo_patt = re.compile(r"\s* (\d+) \s+ (\d+) \s+ \# .* ",
re.VERBOSE)
nl_orbital_patt = re.compile(r"\s* (\d+) \s+ (\d+) \s+ (\d+) \s+ \# .* ",
re.VERBOSE)
coef_alpha_patt = re.compile(r"\s* ([-\d.\D]+) \s+ ([-\d.\D]+) \s* ",
re.VERBOSE)
preamble = []
basis_set = {}
parse_preamble = False
parse_lmax_nnlo = False
parse_nl_orbital = False
for line in input.split("\n"):
if parse_nl_orbital:
m = nl_orbital_patt.search(line)
n = coef_alpha_patt.search(line)
if m:
l = m.group(1)
zeta = m.group(2)
ng = m.group(3)
basis_set[l + "_" + zeta + "_" + ng] = []
elif n:
alpha = n.group(1)
coef = n.group(2)
basis_set[l + "_" + zeta + "_" + ng].append((alpha, coef))
elif parse_lmax_nnlo:
m = lmax_nnlo_patt.search(line)
if m:
lmax = m.group(1)
nnlo = m.group(2)
parse_lmax_nnlo = False
parse_nl_orbital = True
elif parse_preamble:
preamble.append(line.strip())
if line.find("</preamble>") != -1:
parse_preamble = False
parse_lmax_nnlo = True
elif line.find("<preamble>") != -1:
parse_preamble = True
basis_set.update(lmax=lmax, n_nlo=nnlo, preamble=preamble)
return basis_set
def set_n_nlmo(self):
"""
:return: the number of nlm orbitals for the basis set
"""
nnlmo = 0
data_tmp = self.data
data_tmp.pop('lmax')
data_tmp.pop('n_nlo')
data_tmp.pop('preamble')
for l_zeta_ng in data_tmp:
l = l_zeta_ng.split("_")[0]
nnlmo = nnlmo + (2 * int(l) + 1)
return str(nnlmo)
def infos_on_basis_set(self):
"""
infos on the basis set as in Fiesta log
"""
o = []
o.append("=========================================")
o.append("Reading basis set:")
o.append("")
o.append(" Basis set for {} atom ".format(str(self.filename)))
o.append(" Maximum angular momentum = {}".format(self.data['lmax']))
o.append(" Number of atomics orbitals = {}".format(self.data['n_nlo']))
o.append(" Number of nlm orbitals = {}".format(self.data['n_nlmo']))
o.append("=========================================")
return str(0)
class FiestaInput(MSONable):
"""
Input File for Fiesta called "cell.in" by default (mandatory in Fiesta for now)
"""
def __init__(self, mol,
correlation_grid={'dE_grid': u'0.500', 'n_grid': u'14'},
Exc_DFT_option={'rdVxcpsi': u'1'},
COHSEX_options={'eigMethod': u'C', 'mix_cohsex': u'0.500',
'nc_cohsex': u'0', 'nit_cohsex': u'0',
'nv_cohsex': u'0', 'resMethod': u'V',
'scf_cohsex_wf': u'0'},
GW_options={'nc_corr': u'10', 'nit_gw': u'3',
'nv_corr': u'10'},
BSE_TDDFT_options={'do_bse': u'1', 'do_tddft': u'0',
'nc_bse': u'382', 'nit_bse': u'50',
'npsi_bse': u'1', 'nv_bse': u'21'}):
"""
:param mol: pymatgen mol
:param correlation_grid: dict
:param Exc_DFT_option: dict
:param COHSEX_options: dict
:param GW_options: dict
:param BSE_TDDFT_options: dict
"""
self._mol = mol
self.correlation_grid = correlation_grid
self.Exc_DFT_option = Exc_DFT_option
self.COHSEX_options = COHSEX_options
self.GW_options = GW_options
self.BSE_TDDFT_options = BSE_TDDFT_options
def set_auxiliary_basis_set(self, folder, auxiliary_folder,
auxiliary_basis_set_type="aug_cc_pvtz"):
"""
copy in the desired folder the needed auxiliary basis set "X2.ion" where X is a specie.
:param auxiliary_folder: folder where the auxiliary basis sets are stored
:param auxiliary_basis_set_type: type of basis set (string to be found in the extension of the file name; must be in lower case)
ex: C2.ion_aug_cc_pvtz_RI_Weigend find "aug_cc_pvtz"
"""
list_files = os.listdir(auxiliary_folder)
for specie in self._mol.symbol_set:
for file in list_files:
if file.upper().find(
specie.upper() + "2") != -1 and file.lower().find(
auxiliary_basis_set_type) != -1:
shutil.copyfile(auxiliary_folder + "/" + file,
folder + "/" + specie + "2.ion")
def set_GW_options(self, nv_band=10, nc_band=10, n_iteration=5, n_grid=6,
dE_grid=0.5):
"""
Set parameters in cell.in for a GW computation
:param nv__band: number of valence bands to correct with GW
:param nc_band: number of conduction bands to correct with GW
:param n_iteration: number of iteration
:param n_grid and dE_grid:: number of points and spacing in eV for correlation grid
"""
self.GW_options.update(nv_corr=nv_band, nc_corr=nc_band,
nit_gw=n_iteration)
self.correlation_grid.update(dE_grid=dE_grid, n_grid=n_grid)
def make_FULL_BSE_Densities_folder(self, folder):
"""
mkdir "FULL_BSE_Densities" folder (needed for bse run) in the desired folder
"""
if os.path.exists(folder + "/FULL_BSE_Densities"):
return "FULL_BSE_Densities folder already exists"
else:
os.makedirs(folder + "/FULL_BSE_Densities")
return "makedirs FULL_BSE_Densities folder"
def set_BSE_options(self, n_excitations=10, nit_bse=200):
"""
Set parameters in cell.in for a BSE computation
:param nv_bse: number of valence bands
:param nc_bse: number of conduction bands
:param n_excitations: number of excitations
:param nit_bse: number of iterations
"""
self.BSE_TDDFT_options.update(npsi_bse=n_excitations, nit_bse=nit_bse)
def dump_BSE_data_in_GW_run(self, BSE_dump=True):
"""
:param BSE_dump: boolean
:return: set the "do_bse" variable to one in cell.in
"""
if BSE_dump:
self.BSE_TDDFT_options.update(do_bse=1, do_tddft=0)
else:
self.BSE_TDDFT_options.update(do_bse=0, do_tddft=0)
def dump_TDDFT_data_in_GW_run(self, TDDFT_dump=True):
"""
:param TDDFT_dump: boolen
:return: set the do_tddft variable to one in cell.in
"""
if TDDFT_dump == True:
self.BSE_TDDFT_options.update(do_bse=0, do_tddft=1)
else:
self.BSE_TDDFT_options.update(do_bse=0, do_tddft=0)
@property
def infos_on_system(self):
"""
Returns infos on initial parameters as in the log file of Fiesta
"""
o = []
o.append("=========================================")
o.append("Reading infos on system:")
o.append("")
o.append(" Number of atoms = {} ; number of species = {}".format(
int(self._mol.composition.num_atoms), len(self._mol.symbol_set)))
o.append(" Number of valence bands = {}".format(
int(self._mol.nelectrons / 2)))
o.append(" Sigma grid specs: n_grid = {} ; dE_grid = {} (eV)".format(
self.correlation_grid['n_grid'], self.correlation_grid['dE_grid']))
if int(self.Exc_DFT_option['rdVxcpsi']) == 1:
o.append(" Exchange and correlation energy read from Vxcpsi.mat")
elif int(self.Exc_DFT_option['rdVxcpsi']) == 0:
o.append(" Exchange and correlation energy re-computed")
if self.COHSEX_options['eigMethod'] == "C":
o.append(
" Correcting {} valence bands and {} conduction bands at COHSEX level".format(
self.COHSEX_options['nv_cohsex'],
self.COHSEX_options['nc_cohsex']))
o.append(" Performing {} diagonal COHSEX iterations".format(
self.COHSEX_options['nit_cohsex']))
elif self.COHSEX_options['eigMethod'] == "HF":
o.append(
" Correcting {} valence bands and {} conduction bands at HF level".format(
self.COHSEX_options['nv_cohsex'],
self.COHSEX_options['nc_cohsex']))
o.append(" Performing {} diagonal HF iterations".format(
self.COHSEX_options['nit_cohsex']))
o.append(" Using resolution of identity : {}".format(
self.COHSEX_options['resMethod']))
o.append(
" Correcting {} valence bands and {} conduction bands at GW level".format(
self.GW_options['nv_corr'], self.GW_options['nc_corr']))
o.append(
" Performing {} GW iterations".format(self.GW_options['nit_gw']))
if int(self.BSE_TDDFT_options['do_bse']) == 1:
o.append(" Dumping data for BSE treatment")
if int(self.BSE_TDDFT_options['do_tddft']) == 1:
o.append(" Dumping data for TD-DFT treatment")
o.append("")
o.append(" Atoms in cell cartesian A:")
symbols = []
for syb in self._mol.symbol_set:
symbols.append(syb)
for site in self._mol:
o.append(" {} {} {} {}".format(site.x, site.y,
site.z, int(
symbols.index(site.specie.symbol)) + 1))
o.append("=========================================")
return str(o)
@property
def molecule(self):
"""
Returns molecule associated with this FiestaInput.
"""
return self._mol
def __str__(self):
symbols = []
for syb in self._mol.symbol_set:
symbols.append(syb)
geometry = []
for site in self._mol:
geometry.append(" {} {} {} {}".format(site.x, site.y,
site.z, int(
symbols.index(site.specie.symbol)) + 1))
t = Template("""# number of atoms and species
$nat $nsp
# number of valence bands
$nvbands
# number of points and spacing in eV for correlation grid
$n_grid $dE_grid
# relire=1 ou recalculer=0 Exc DFT
$rdVxcpsi
# number of COHSEX corrected occp and unoccp bands: C=COHSEX H=HF
$nv_cohsex $nc_cohsex $eigMethod
# number of COHSEX iter, scf on wfns, mixing coeff; V=RI-V I=RI-D
$nit_cohsex $resMethod $scf_cohsex_wf $mix_cohsex
# number of GW corrected occp and unoccp bands
$nv_corr $nc_corr
# number of GW iterations
$nit_gw
# dumping for BSE and TDDFT
$do_bse $do_tddft
# number of occp. and virtual bands fo BSE: nocore and up to 40 eVs
$nv_bse $nc_bse
# number of excitations needed and number of iterations
$npsi_bse $nit_bse
# list of symbols in order
$symbols
# scaling factor
1.000
# atoms x,y,z cartesian .. will be multiplied by scale
$geometry
""")
return t.substitute(nat=int(self._mol.composition.num_atoms),
nsp=len(self._mol.symbol_set),
nvbands=int(self._mol.nelectrons / 2),
n_grid=self.correlation_grid['n_grid'],
dE_grid=self.correlation_grid['dE_grid'],
rdVxcpsi=self.Exc_DFT_option['rdVxcpsi'],
nv_cohsex=self.COHSEX_options['nv_cohsex'],
nc_cohsex=self.COHSEX_options['nc_cohsex'],
eigMethod=self.COHSEX_options['eigMethod'],
nit_cohsex=self.COHSEX_options['nit_cohsex'],
resMethod=self.COHSEX_options['resMethod'],
scf_cohsex_wf=self.COHSEX_options['scf_cohsex_wf'],
mix_cohsex=self.COHSEX_options['mix_cohsex'],
nv_corr=self.GW_options['nv_corr'],
nc_corr=self.GW_options['nc_corr'],
nit_gw=self.GW_options['nit_gw'],
do_bse=self.BSE_TDDFT_options['do_bse'],
do_tddft=self.BSE_TDDFT_options['do_tddft'],
nv_bse=self.BSE_TDDFT_options['nv_bse'],
nc_bse=self.BSE_TDDFT_options['nc_bse'],
npsi_bse=self.BSE_TDDFT_options['npsi_bse'],
nit_bse=self.BSE_TDDFT_options['nit_bse'],
symbols="\n".join(symbols),
geometry="\n".join(geometry))
def write_file(self, filename):
with zopen(filename, "w") as f:
f.write(self.__str__())
def as_dict(self):
return {
"mol": self._mol.as_dict(),
"correlation_grid": self.correlation_grid,
"Exc_DFT_option": self.Exc_DFT_option,
"COHSEX_options": self.COHSEX_options,
"GW_options": self.GW_options,
"BSE_TDDFT_options": self.BSE_TDDFT_options
}
@classmethod
def from_dict(cls, d):
return FiestaInput(Molecule.from_dict(d["mol"]),
correlation_grid=d["correlation_grid"],
Exc_DFT_option=d["Exc_DFT_option"],
COHSEX_options=d["geometry_options"],
GW_options=d["symmetry_options"],
BSE_TDDFT_options=d["memory_options"])
@classmethod
def from_string(cls, string_input):
"""
Read an FiestaInput from a string. Currently tested to work with
files generated from this class itself.
Args:
string_input: string_input to parse.
Returns:
FiestaInput object
"""
correlation_grid = {}
Exc_DFT_option = {}
COHSEX_options = {}
GW_options = {}
BSE_TDDFT_options = {}
lines = string_input.strip().split("\n")
# number of atoms and species
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
nat = toks[0]
nsp = toks[1]
# number of valence bands
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
nvbands = toks[0]
# correlation_grid
# number of points and spacing in eV for correlation grid
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
correlation_grid['n_grid'] = toks[0]
correlation_grid['dE_grid'] = toks[1]
# Exc DFT
# relire=1 ou recalculer=0 Exc DFT
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
Exc_DFT_option['rdVxcpsi'] = toks[0]
# COHSEX
# number of COHSEX corrected occp and unoccp bands: C=COHSEX H=HF
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
COHSEX_options['nv_cohsex'] = toks[0]
COHSEX_options['nc_cohsex'] = toks[1]
COHSEX_options['eigMethod'] = toks[2]
# number of COHSEX iter, scf on wfns, mixing coeff; V=RI-V I=RI-D
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
COHSEX_options['nit_cohsex'] = toks[0]
COHSEX_options['resMethod'] = toks[1]
COHSEX_options['scf_cohsex_wf'] = toks[2]
COHSEX_options['mix_cohsex'] = toks[3]
# GW
# number of GW corrected occp and unoccp bands
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
GW_options['nv_corr'] = toks[0]
GW_options['nc_corr'] = toks[1]
# number of GW iterations
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
GW_options['nit_gw'] = toks[0]
# BSE
# dumping for BSE and TDDFT
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
BSE_TDDFT_options['do_bse'] = toks[0]
BSE_TDDFT_options['do_tddft'] = toks[1]
# number of occp. and virtual bands fo BSE: nocore and up to 40 eVs
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
BSE_TDDFT_options['nv_bse'] = toks[0]
BSE_TDDFT_options['nc_bse'] = toks[1]
# number of excitations needed and number of iterations
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
BSE_TDDFT_options['npsi_bse'] = toks[0]
BSE_TDDFT_options['nit_bse'] = toks[1]
# Molecule
# list of symbols in order
lines.pop(0)
atname = []
i = int(nsp)
while i != 0:
l = lines.pop(0).strip()
toks = l.split()
atname.append(toks[0])
i -= 1
# scaling factor
lines.pop(0)
l = lines.pop(0).strip()
toks = l.split()
scale = toks[0]
# atoms x,y,z cartesian .. will be multiplied by scale
lines.pop(0)
# Parse geometry
species = []
coords = []
i = int(nat)
while i != 0:
l = lines.pop(0).strip()
toks = l.split()
coords.append([float(j) for j in toks[0:3]])
species.append(atname[int(toks[3]) - 1])
i -= 1
mol = Molecule(species, coords)
return FiestaInput(mol=mol, correlation_grid=correlation_grid,
Exc_DFT_option=Exc_DFT_option,
COHSEX_options=COHSEX_options,
GW_options=GW_options,
BSE_TDDFT_options=BSE_TDDFT_options)
@classmethod
def from_file(cls, filename):
"""
Read an Fiesta input from a file. Currently tested to work with
files generated from this class itself.
Args:
filename: Filename to parse.
Returns:
FiestaInput object
"""
with zopen(filename) as f:
return cls.from_string(f.read())
class FiestaOutput:
"""
A Fiesta output file parser.
All energies are in eV.
Args:
filename: Filename to read.
"""
def __init__(self, filename):
self.filename = filename
with zopen(filename) as f:
data = f.read()
chunks = re.split(r"GW Driver iteration", data)
# preamble: everything before the first GW Driver iteration
preamble = chunks.pop(0)
# self.job_info = self._parse_preamble(preamble)
self.data = [self._parse_job(c) for c in chunks]
def _parse_job(self, output):
GW_BANDS_results_patt = re.compile(
r"^<it.* \| \s+ (\D+\d*) \s+ \| \s+ ([-\d.]+) \s+ ([-\d.]+) \s+ ([-\d.]+) \s+ \| "
r" \s+ ([-\d.]+) \s+ ([-\d.]+) \s+ ([-\d.]+) \s+ \|"
r" \s+ ([-\d.]+) \s+ ([-\d.]+) \s+ ", re.VERBOSE)
GW_GAPS_results_patt = re.compile(
r"^<it.* \| \s+ Egap_KS \s+ = \s+ ([-\d.]+) \s+ \| \s+ Egap_QP \s+ = \s+ ([-\d.]+) \s+ \| "
r" \s+ Egap_QP \s+ = \s+ ([-\d.]+) \s+ \|", re.VERBOSE)
end_patt = re.compile(r"\s*program returned normally\s*")
total_time_patt = re.compile(r"\s*total \s+ time: \s+ ([\d.]+) .*",
re.VERBOSE)
error_defs = {
"calculations not reaching convergence": "Bad convergence"}
GW_results = {}
parse_gw_results = False
parse_total_time = False
for l in output.split("\n"):
if parse_total_time:
m = end_patt.search(l)
if m:
GW_results.update(end_normally=True)
m = total_time_patt.search(l)
if m:
GW_results.update(total_time=m.group(1))
if parse_gw_results:
if l.find("Dumping eigen energies") != -1:
parse_total_time = True
parse_gw_results = False
continue
else:
m = GW_BANDS_results_patt.search(l)
if m:
d = {}
d.update(band=m.group(1).strip(), eKS=m.group(2),
eXX=m.group(3), eQP_old=m.group(4),
z=m.group(5), sigma_c_Linear=m.group(6),
eQP_Linear=m.group(7),
sigma_c_SCF=m.group(8), eQP_SCF=m.group(9))
GW_results[m.group(1).strip()] = d
n = GW_GAPS_results_patt.search(l)
if n:
d = {}
d.update(Egap_KS=n.group(1), Egap_QP_Linear=n.group(2),
Egap_QP_SCF=n.group(3))
GW_results["Gaps"] = d
if l.find("GW Results") != -1:
parse_gw_results = True
return GW_results
class BSEOutput:
"""
A bse output file parser. The start...
All energies are in eV.
Args:
filename: Filename to read.
"""
def __init__(self, filename):
self.filename = filename
with zopen(filename) as f:
log_bse = f.read()
# self.job_info = self._parse_preamble(preamble)
self.exiton = self._parse_job(log_bse)
def _parse_job(self, output):
BSE_exitons_patt = re.compile(
r"^exiton \s+ (\d+) : \s+ ([\d.]+) \( \s+ ([-\d.]+) \) \s+ \| .* ",
re.VERBOSE)
end_patt = re.compile(r"\s*program returned normally\s*")
total_time_patt = re.compile(r"\s*total \s+ time: \s+ ([\d.]+) .*",
re.VERBOSE)
error_defs = {
"calculations not reaching convergence": "Bad convergence"}
BSE_results = {}
parse_BSE_results = False
parse_total_time = False
for l in output.split("\n"):
if parse_total_time:
m = end_patt.search(l)
if m:
BSE_results.update(end_normally=True)
m = total_time_patt.search(l)
if m:
BSE_results.update(total_time=m.group(1))
if parse_BSE_results:
if l.find(
"FULL BSE main valence -> conduction transitions weight:") != -1:
parse_total_time = True
parse_BSE_results = False
continue
else:
m = BSE_exitons_patt.search(l)
if m:
d = {}
d.update(bse_eig=m.group(2), osc_strength=m.group(3))
BSE_results[str(m.group(1).strip())] = d
if l.find("FULL BSE eig.(eV), osc. strength and dipoles:") != -1:
parse_BSE_results = True
return BSE_results
|
|
# xpyBuild - eXtensible Python-based Build System
#
# Copyright (c) 2013 - 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: custom.py 301527 2017-02-06 15:31:43Z matj $
#
"""
Contains `xpybuild.targets.custom.CustomCommand` (and similar classes) for executing an arbitrary command line to
produce a file or directory of output.
"""
import os, os.path, subprocess
import shlex
from xpybuild.buildcommon import *
from xpybuild.basetarget import BaseTarget, targetNameToUniqueId
from xpybuild.utils.fileutils import mkdir, deleteDir, deleteFile, normLongPath, normPath
from xpybuild.utils.process import _wait_with_timeout
from xpybuild.pathsets import PathSet, BasePathSet
from xpybuild.utils.buildexceptions import BuildException
from xpybuild.targets.copy import Copy
from xpybuild.propertysupport import defineOption
class Custom(BaseTarget): # deprecated because error handling/logging is poor and it promotes bad practices like not using options (e.g process timeout)
"""
@deprecated: Use `CustomCommand` instead, or a dedicated `BaseTarget` subclass.
A custom target that builds a single file or directory of content by executing
an arbitrary python functor.
Functor must take:
(target path, [dependency paths], context)
Tip: don't forget to ensure the target path's parent dir exists
using fileutils.mkdir.
"""
fn = None
cleanfn = None
def __init__(self, target, deps, fn, cleanfn=None):
"""
@param target: The target file/directory that will be built
@param deps: The list of dependencies of this target (paths, pathsets or lists)
@param fn: The functor used to build this target
@param cleanfn: The functor used to clean this target (optional, defaults to removing
the target file/dir)
"""
BaseTarget.__init__(self, target, deps)
self.fn = fn
self.cleanfn = cleanfn
self.deps = PathSet(deps)
def run(self, context):
self.fn(self.path, self.deps.resolve(context), context)
def clean(self, context):
if self.cleanfn: self.cleanfn(self.path, context)
BaseTarget.clean(self, context)
class ResolvePath(object):
""" @deprecated: Use a `PathSet` (possibly with the `joinPaths` property functor)
instead of this class.
A wrapper around a string in a command line that indicates it is
a path an should be resolved (expanded, normalized, possibly made relative
to the target dir) when the command is executed.
If the specified path resolves to more than one item then an exception is
thrown unless the pathsep argument is specified.
"""
def __init__(self, path):
"""
@param path: The path to expand, which is a string.
"""
self.path = path
def __repr__(self):
""" Returns a string including this class name and the path """
return 'ResolvePath<%s>'%self.path
def resolve(self, context, baseDir):
""" Resolves the path using the specified context and baseDir """
return context.getFullPath(self.path, defaultDir=baseDir)
defineOption('CustomCommand.outputHandlerFactory', None)
class CustomCommand(BaseTarget):
"""
A custom target that builds a single file or directory of content by running one or more
command line processes.
The command line *must* not reference any generated paths unless they are
explicitly listed in deps.
Supported target options include:
- ``.option("process.timeout")`` to control the maximum number of seconds the command can
run before being cancelled.
- ``.option("common.processOutputEncodingDecider")`` to determine the encoding
used for reading stdout/err (see `xpybuild.utils.process.defaultProcessOutputEncodingDecider`).
- ``.option("CustomCommand.outputHandlerFactory")`` to replace the default behaviour
for detecting errors (which is just based on zero/non-zero exit code) and logging stdout/err with
a custom `xpybuild.utils.outputhandler.ProcessOutputHandler`. The additional
options described on `ProcessOutputHandler` can also be used with this target.
@param target: the file or directory to be built. Will be cleaned, and its parent dir created,
before target runs.
@param dependencies: an optional list of dependencies; it is essential that ALL dependencies required by
this command and generated by the build processare explicitly listed here, in addition to any
files/directories used by this command that might change between builds.
@param list[obj]|Callable->list command: A list of command line arguments to execute one process.
(see also the ``commands`` parameter which can be used to execute multiple processes).
Alternatively, the list can be constructed dynamically by passing a function
with signature ``(resolvedTargetDirPath: str, resolvedDepsList: list, context: xpybuild.buildcontext.BuildContext) -> list``
(where ``resolvedDepsList`` is an ordered, flattened list of resolved paths from ``deps``).
Each argument in the list of arguments may be:
- a string (which will be run through expandPropertyValues prior to execution);
must not be used for representing arguments that are paths
- a `PathSet` (which must resolve to exactly one path - see `joinPaths`
property functor if multiple paths are required). Any PathSets used in
the arguments should usually be explicitly listed in dependencies too,
especially if they are generated by another part of this build.
- a property functor such as joinPaths (useful for constructing
Java classpaths), basename, etc
- an arbitrary function taking a single context argument
- `CustomCommand.TARGET` - a special value that is resolved to the
output path of this target
- `CustomCommand.DEPENDENCIES` - a special value that is resolved to
a list of this target's dependencies
- [deprecated] a ResolvePath(path) object, indicating a path that should be
resolved and resolved at execution time (this is equivalent
to using a PathSet, which is probably a better approach).
Command lines MUST NOT depend
in any way on the current source or output directory, always use
a PathSet wrapper around such paths.
@param list[list[obj]] commands: A list of commands to run to generate this target, each of which is itself
represented as a list of command line arguments (as described above under ``command``).
Note that you must specify either ``command=`` or ``commands=`` but not both.
Commands listed here are executed in sequence. Unless you have multiple commands that need to write to the same
large output directory it is usually better to use separate `CustomCommand` or `CustomCommandWithCopy` instances
so they can execute in parallel for a faster build.
This parameter was added in version 4.0.
@param cwd: the working directory to run it from (almost always this should be
left blank, meaning use output dir)
@param env: a dictionary of environment overrides, or a function that
returns one given a context. Values in the dictionary will
be expanded using the same rules as for the command (see above).
Consider using `xpybuild.propertysupport.joinPaths` for environment variables
containing a list of paths.
@param redirectStdOutToTarget: usually, any stdout is treated as logging
and the command is assumed to create the target file itself, but
set this to True for commands where the target file contents are
generated by the stdout of the command being executed.
@param stdout: usually a unique name is auto-generated for this target and suffixed with ``.out``, but
set this parameter if you need to send output to a specific location. Ignored if the ``CustomCommand.outputHandlerFactory``
option is set.
@param stderr: usually a unique name is auto-generated this target and suffixed with ``.err``, but
set this parameter if you need to send output to a specific location. Ignored if the ``CustomCommand.outputHandlerFactory``
option is set.
"""
class __CustomCommandSentinel(object):
def __init__(self, name): self.name = name
def __repr__(self): return 'CustomCommand.'+name
TARGET = __CustomCommandSentinel('TARGET')
"""
A special value that can be used in the ``command`` argument and is resolved to the output path of this target.
"""
DEPENDENCIES = __CustomCommandSentinel('DEPENDENCIES')
"""
A special value that can be used in the ``command`` argument and is resolved to a list of this target's dependencies.
"""
def __init__(self, target, command=None, dependencies=[], cwd=None, redirectStdOutToTarget=False, env=None, stdout=None, stderr=None,
commands=None):
BaseTarget.__init__(self, target, dependencies)
assert not (command and commands), 'Cannot specify both command= and commands='
self.command = command
self.commands = commands
self.cwd = cwd
self.deps = PathSet(dependencies)
self.redirectStdOutToTarget = redirectStdOutToTarget
if redirectStdOutToTarget and isDirPath(target): raise BuildException('Cannot set redirectStdOutToTarget and specify a directory for the target name - please specify a file instead: %s'%target)
self.env = env
self.stdout, self.stderr = stdout, stderr
if stdout and redirectStdOutToTarget:
raise BuildException('Cannot set both redirectStdOutToTarget and stdout')
def _resolveItem(self, x, context):
if x == self.DEPENDENCIES: return self.deps.resolve(context)
if x == self.TARGET: x = self.path
if isinstance(x, str): return context.expandPropertyValues(x)
if hasattr(x, 'resolveToString'): return x.resolveToString(context) # supports Composables too
if isinstance(x, BasePathSet):
result = x.resolve(context)
if len(result) != 1:
raise BuildException('PathSet for custom command must resolve to exactly one path not %d (or use joinPaths): %s'%(len(result), x))
return result[0]
if isinstance(x, ResolvePath): return x.resolve(context, self.baseDir)
if callable(x): return x(context)
raise Exception('Unknown custom command input type %s: %s'%(x.__class__.__name__, x))
def _resolveCommands(self, context):
# if we wanted we could allow commands= to itself be a list, but not gonna bother for now
resolved = []
for c in self.commands or [self.command]:
if callable(c):
c = c(self.path, self.deps.resolve(context), context)
assert not isinstance(c, str) # must be a list of strings, not a string
c = flatten([self._resolveItem(x, context) for x in c])
c[0] = normPath(os.path.abspath(c[0]))
resolved.append(c)
return resolved
def getHashableImplicitInputs(self, context):
return super(CustomCommand, self).getHashableImplicitInputs(context) + flatten(self._resolveCommands(context))
def run(self, context):
if self.cwd: self.cwd = context.getFullPath(self.cwd, self.baseDir)
if isDirPath(self.path):
mkdir(self.path)
cwd = self.cwd or self.path
else:
mkdir(os.path.dirname(self.path))
cwd = self.cwd or self.workDir
mkdir(self.workDir)
commands = self._resolveCommands(context)
assert len(commands) > 0, 'No commands were specified to run in this target!'
if len(commands)>1: assert not (self.redirectStdOutToTarget or self.stdout or self.stderr), 'Invalid argument was specified for multiple commands mode'
cmdindex = 0
for cmd in commands:
cmdindex += 1
# this location is a lot easier to find than the target's workdir
logbasename = os.path.normpath(context.getPropertyValue('BUILD_WORK_DIR')+'/CustomCommandOutput/'+os.path.basename(cmd[0])+"."+targetNameToUniqueId(self.name))
if cmdindex > 1: logbasename= logbasename+".%d"%cmdindex # make this unique
cmdDisplaySuffix = ' #%d'%(cmdindex) if len(commands)>1 else ''
stdoutPath = context.getFullPath(self.path if self.redirectStdOutToTarget else (self.stdout or logbasename+'.out'), defaultDir='${BUILD_WORK_DIR}/CustomCommandOutput/')
stderrPath = context.getFullPath(self.stderr or logbasename+'.err', defaultDir='${BUILD_WORK_DIR}/CustomCommandOutput/')
self.log.info('Building %s by executing command%s: %s', self.name, cmdDisplaySuffix, ''.join(['\n\t"%s"'%x for x in cmd]))
if self.cwd and cmdindex==1: self.log.info(' building %s from working directory: %s', self.name, self.cwd) # only print if overridden
env = self.env or {}
if env:
if callable(env):
env = env(context)
else:
env = {k: None if None == env[k] else self._resolveItem(env[k], context) for k in env}
if cmdindex==1: self.log.info(' environment overrides for %s are: %s', self.name, ''.join(['\n\t"%s=%s"'%(k, env[k]) for k in env]))
for k in os.environ:
if k not in env: env[k] = os.getenv(k)
for k in list(env.keys()):
if None == env[k]:
del env[k]
self.log.info(' output from %s will be written to "%s" and "%s"', self.name+cmdDisplaySuffix,
stdoutPath,
stderrPath)
if not os.path.exists(cmd[0]) and not (IS_WINDOWS and os.path.exists(cmd[0]+'.exe')):
raise BuildException('Cannot run command because the executable does not exist: "%s"'%(cmd[0]), location=self.location)
encoding = self.options['common.processOutputEncodingDecider'](context, cmd[0])
handler = self.options['CustomCommand.outputHandlerFactory']
if handler: # create a new handler for each command
handler = handler(str(self), options=self.options)
success=False
rc = None
try:
# maybe send output to a file instead
mkdir(os.path.dirname(logbasename))
with open(stderrPath, 'wb') as fe: # can't use openForWrite with subprocess
with open(stdoutPath, 'wb') as fo:
process = subprocess.Popen(cmd,
stderr=fe,
stdout=fo,
cwd=cwd,
env=env)
rc = _wait_with_timeout(process, '%s(%s)'%(self.name, os.path.basename(cmd[0])), self.options['process.timeout'], False)
success = rc == 0
finally:
try:
if os.path.getsize(stderrPath) == 0 and not self.stderr: deleteFile(stderrPath, allowRetry=True)
if not self.redirectStdOutToTarget and os.path.getsize(stdoutPath) == 0 and not self.stdout: deleteFile(stdoutPath, allowRetry=True)
except Exception as e:
# stupid windows, it passes understanding
self.log.info('Failed to delete empty .out/.err files (ignoring error as it is not critical): %s', e)
#if not os.listdir(self.workDir): deleteDir(self.workDir) # don't leave empty work dirs around
mainlog = '<command did not write any stdout/stderr>'
logMethod = self.log.info if success else self.log.error
if (handler or not self.redirectStdOutToTarget) and os.path.isfile(stdoutPath) and os.path.getsize(stdoutPath) > 0:
if handler:
with open(stdoutPath, 'r', encoding=encoding, errors='replace') as f:
for l in f: handler.handleLine(l, isstderr=False)
elif os.path.getsize(stdoutPath) < 15*1024:
logMethod(' stdout from %s is: \n%s', self.name+cmdDisplaySuffix, open(stdoutPath, 'r', encoding=encoding, errors='replace').read().replace('\n', '\n\t'))
mainlog = stdoutPath
if not success: context.publishArtifact('%s%s stdout'%(self, cmdDisplaySuffix), stdoutPath)
if os.path.isfile(stderrPath) and os.path.getsize(stderrPath) > 0:
if handler:
with open(stderrPath, 'r', encoding=encoding, errors='replace') as f:
for l in f: handler.handleLine(l, isstderr=True)
elif os.path.getsize(stderrPath) < 15*1024:
logMethod(' stderr from %s is: \n%s', self.name+cmdDisplaySuffix, open(stderrPath, 'r', encoding=encoding, errors='replace').read().replace('\n', '\n\t'))
mainlog = stderrPath # take precedence over stdout
if not success: context.publishArtifact('%s%s stderr'%(self, cmdDisplaySuffix), stderrPath)
if handler:
handler.handleEnd(returnCode=rc)
elif rc != None and rc != 0 and not handler:
if IS_WINDOWS:
quotearg = lambda c: '"%s"'%c if ' ' in c else c
else:
quotearg = shlex.quote
# having it in this format makes it easier for people to re-run the command manually
self.log.info(' full command line is: %s', ' '.join(quotearg(c) for c in cmd))
raise BuildException('%s command%s failed with error code %s; see output at "%s" or look under %s'%(os.path.basename(cmd[0]), cmdDisplaySuffix, rc, mainlog, cwd), location=self.location)
# final sanity check
if not os.path.exists(self.path):
raise BuildException('%s returned no error code but did not create the output file/dir; see output at "%s" or look under %s'%(self, mainlog, cwd), location=self.location)
if (not isDirPath(self.path)) and (not os.path.isfile(self.path)):
raise BuildException('%s did not create a file as expected (please check that trailing "/" is used if and only if a directory output is intended)'%self, location=self.location)
if isDirPath(self.path) and not os.listdir(self.path):
raise BuildException('%s created an empty directory'%self, location=self.location)
class CustomCommandWithCopy(CustomCommand, Copy):
"""
A custom target that builds a directory of content by running a
specified command line process, but unlike the normal CustomCommand
also copies one or more files into the output directory before running the
specified command.
For advanced cases only - usually it's best to find a way to explicitly
separate the target input and output and use a normal CustomCommand - but
this target exists for badly written tools that are only able to do
in-place modifications on a directory.
:param target: the target that will be generated by copying and then running the specified commands.
:param command: see `CustomCommand` for details
:param commands: see `CustomCommand` for details
:param dependencies: an explicit list of any dependencies other than copySrc)
that are required by the command, including static resources and
other targets generated by the build. Specifying this accurately is ESSENTIAL for
reliable building.
"""
def __init__(self, target, command=None, dependencies=[], copySrc=None, cwd=None, redirectStdOutToTarget=False, env=None, commands=None, **kwargs):
assert isDirPath(target), 'This target can only be used for directories (ending in /)'
copySrc = PathSet(copySrc)
CustomCommand.__init__(self, target, command=command, commands=commands, dependencies=[dependencies, copySrc], cwd=cwd, redirectStdOutToTarget=redirectStdOutToTarget, env=env, **kwargs)
# can't call Copy.__init__ without introducing a duplicate target
# but use the same name used by Copy so we can call run successfully later
self.src = copySrc
self.mode = None
def run(self, context):
# first do what Copy would have done
Copy.run(self, context)
# then custom command
CustomCommand.run(self, context)
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Tests for the update command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os.path
import shutil
import subprocess
import sys
import tarfile
import boto
import gslib
from gslib.metrics import _UUID_FILE_PATH
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.utils import system_util
from gslib.utils.boto_util import CERTIFICATE_VALIDATION_ENABLED
from gslib.utils.constants import UTF8
from gslib.utils.update_util import DisallowUpdateIfDataInGsutilDir
TESTS_DIR = os.path.abspath(os.path.dirname(__file__))
GSUTIL_DIR = os.path.join(TESTS_DIR, '..', '..')
class UpdateTest(testcase.GsUtilIntegrationTestCase):
"""Update command test suite."""
@unittest.skipUnless(CERTIFICATE_VALIDATION_ENABLED,
'Test requires https certificate validation enabled.')
def test_update(self):
"""Tests that the update command works or raises proper exceptions."""
if system_util.InvokedViaCloudSdk():
stderr = self.RunGsUtil(['update'],
stdin='n',
return_stderr=True,
expected_status=1)
self.assertIn('update command is disabled for Cloud SDK', stderr)
return
if gslib.IS_PACKAGE_INSTALL:
# The update command is not present when installed via package manager.
stderr = self.RunGsUtil(['update'], return_stderr=True, expected_status=1)
self.assertIn('Invalid command', stderr)
return
# Create two temp directories, one of which we will run 'gsutil update' in
# to pull the changes from the other.
tmpdir_src = self.CreateTempDir()
tmpdir_dst = self.CreateTempDir()
# Copy gsutil to both source and destination directories.
gsutil_src = os.path.join(tmpdir_src, 'gsutil')
gsutil_dst = os.path.join(tmpdir_dst, 'gsutil')
# Path when executing from tmpdir (Windows doesn't support in-place rename)
gsutil_relative_dst = os.path.join('gsutil', 'gsutil')
ignore_callable = shutil.ignore_patterns(
'.git*',
'*.pyc',
'*.pyo',
'__pycache__',
)
shutil.copytree(GSUTIL_DIR, gsutil_src, ignore=ignore_callable)
# Copy specific files rather than all of GSUTIL_DIR so we don't pick up temp
# working files left in top-level directory by gsutil developers (like tags,
# .git*, .pyc files, etc.)
os.makedirs(gsutil_dst)
for comp in os.listdir(GSUTIL_DIR):
if ('.git' not in comp and
'__pycache__' not in comp and
not comp.endswith('.pyc') and
not comp.endswith('.pyo')): # yapf: disable
cp_src_path = os.path.join(GSUTIL_DIR, comp)
cp_dst_path = os.path.join(gsutil_dst, comp)
if os.path.isdir(cp_src_path):
shutil.copytree(cp_src_path, cp_dst_path, ignore=ignore_callable)
else:
shutil.copyfile(cp_src_path, cp_dst_path)
# Create a fake version number in the source so we can verify it in the
# destination.
expected_version = '17.25'
src_version_file = os.path.join(gsutil_src, 'VERSION')
self.assertTrue(os.path.exists(src_version_file))
with open(src_version_file, 'w') as f:
f.write(expected_version)
# Create a tarball out of the source directory and copy it to a bucket.
src_tarball = os.path.join(tmpdir_src, 'gsutil.test.tar.gz')
normpath = os.path.normpath
try:
# We monkey patch os.path.normpath here because the tarfile module
# normalizes the ./gsutil path, but the update command expects the tar
# file to be prefixed with . This preserves the ./gsutil path.
os.path.normpath = lambda fname: fname
tar = tarfile.open(src_tarball, 'w:gz')
tar.add(gsutil_src, arcname='./gsutil')
tar.close()
finally:
os.path.normpath = normpath
prefix = [sys.executable] if sys.executable else []
# Run with an invalid gs:// URI.
p = subprocess.Popen(prefix + ['gsutil', 'update', 'gs://pub'],
cwd=gsutil_dst,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(_, stderr) = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 1)
self.assertIn(b'update command only works with tar.gz', stderr)
# Run with non-existent gs:// URI.
p = subprocess.Popen(prefix +
['gsutil', 'update', 'gs://pub/Jdjh38)(;.tar.gz'],
cwd=gsutil_dst,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(_, stderr) = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 1)
self.assertIn(b'NotFoundException', stderr)
# Run with file:// URI wihout -f option.
p = subprocess.Popen(
prefix + ['gsutil', 'update', suri(src_tarball)],
cwd=gsutil_dst,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(_, stderr) = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 1)
self.assertIn(b'command does not support', stderr)
# Run with a file present that was not distributed with gsutil.
with open(os.path.join(gsutil_dst, 'userdata.txt'), 'w') as fp:
fp.write('important data\n')
p = subprocess.Popen(
prefix +
['gsutil', 'update', '-f', suri(src_tarball)],
cwd=gsutil_dst,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
(_, stderr) = p.communicate()
p.stdout.close()
p.stderr.close()
# Clean up before next test, and before assertions so failure doesn't leave
# this file around.
os.unlink(os.path.join(gsutil_dst, 'userdata.txt'))
self.assertEqual(p.returncode, 1)
# Additional check for Windows since it has \r\n and string may have just \n
os_ls = os.linesep.encode(UTF8)
if os_ls in stderr:
stderr = stderr.replace(os_ls, b' ')
elif b'\n' in stderr:
stderr = stderr.replace(b'\n', b' ')
self.assertIn(
b'The update command cannot run with user data in the gsutil directory',
stderr)
# Determine whether we'll need to decline the analytics prompt.
analytics_prompt = not (os.path.exists(_UUID_FILE_PATH) or
boto.config.get_value('GSUtil',
'disable_analytics_prompt'))
update_input = b'n\r\ny\r\n' if analytics_prompt else b'y\r\n'
# Now do the real update, which should succeed.
p = subprocess.Popen(
prefix + [gsutil_relative_dst, 'update', '-f',
suri(src_tarball)],
cwd=tmpdir_dst,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
(_, stderr) = p.communicate(input=update_input)
p.stdout.close()
p.stderr.close()
self.assertEqual(
p.returncode,
0,
msg=('Non-zero return code (%d) from gsutil update. stderr = \n%s' %
(p.returncode, stderr.decode(UTF8))))
# Verify that version file was updated.
dst_version_file = os.path.join(tmpdir_dst, 'gsutil', 'VERSION')
with open(dst_version_file, 'r') as f:
self.assertEqual(f.read(), expected_version)
# If the analytics prompt was given, that means we disabled analytics. We
# should reset to the default by deleting the UUID file.
if analytics_prompt:
os.unlink(_UUID_FILE_PATH)
class UpdateUnitTest(testcase.GsUtilUnitTestCase):
"""Tests the functionality of commands/update.py."""
@unittest.skipUnless(
not gslib.IS_PACKAGE_INSTALL,
'Test is runnable only if gsutil dir is accessible, and update '
'command is not valid for package installs.')
def test_repo_matches_manifest(self):
"""Ensure that all files/folders match the manifest."""
# Create a temp directory and copy specific files to it.
tmpdir_src = self.CreateTempDir()
gsutil_src = os.path.join(tmpdir_src, 'gsutil')
os.makedirs(gsutil_src)
copy_files = []
for filename in os.listdir(GSUTIL_DIR):
if (filename.endswith('.pyc') or filename.startswith('.git') or
filename == '__pycache__' or filename == '.settings' or
filename == '.project' or filename == '.pydevproject' or
filename == '.style.yapf' or filename == '.yapfignore'):
# Need to ignore any compiled code or Eclipse project folders.
continue
copy_files.append(filename)
for comp in copy_files:
if os.path.isdir(os.path.join(GSUTIL_DIR, comp)):
func = shutil.copytree
else:
func = shutil.copyfile
func(os.path.join(GSUTIL_DIR, comp), os.path.join(gsutil_src, comp))
DisallowUpdateIfDataInGsutilDir(directory=gsutil_src)
|
|
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from DistributedMinigame import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.safezone import Walk
from toontown.toonbase import ToontownTimer
from direct.gui import OnscreenText
import MinigameAvatarScorePanel
from direct.distributed import DistributedSmoothNode
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
import TagGameGlobals
import Trajectory
class DistributedTagGame(DistributedMinigame):
DURATION = TagGameGlobals.DURATION
IT_SPEED_INCREASE = 1.3
IT_ROT_INCREASE = 1.3
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTagGame', [State.State('off', self.enterOff, self.exitOff, ['play']), State.State('play', self.enterPlay, self.exitPlay, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['off'])], 'off', 'off')
self.addChildGameFSM(self.gameFSM)
self.walkStateData = Walk.Walk('walkDone')
self.scorePanels = []
self.initialPositions = ((0, 10, 0, 180, 0, 0),
(10, 0, 0, 90, 0, 0),
(0, -10, 0, 0, 0, 0),
(-10, 0, 0, -90, 0, 0))
base.localAvatar.isIt = 0
self.modelCount = 4
def getTitle(self):
return TTLocalizer.TagGameTitle
def getInstructions(self):
return TTLocalizer.TagGameInstructions
def getMaxDuration(self):
return self.DURATION
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.itText = OnscreenText.OnscreenText('itText', fg=(0.95, 0.95, 0.65, 1), scale=0.14, font=ToontownGlobals.getSignFont(), pos=(0.0, -0.8), wordwrap=15, mayChange=1)
self.itText.hide()
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.ground = loader.loadModel('phase_4/models/minigames/tag_arena')
self.music = base.loadMusic('phase_4/audio/bgm/MG_toontag.ogg')
self.tagSfx = base.loadSfx('phase_4/audio/sfx/MG_Tag_C.ogg')
self.itPointer = loader.loadModel('phase_4/models/minigames/bboard-pointer')
self.tracks = []
self.IT = None
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.ignoreAll()
del self.tracks
del self.IT
self.sky.removeNode()
del self.sky
self.itPointer.removeNode()
del self.itPointer
self.ground.removeNode()
del self.ground
del self.music
del self.tagSfx
self.itText.cleanup()
del self.itText
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.ground.reparentTo(render)
self.sky.reparentTo(render)
myPos = self.avIdList.index(self.localAvId)
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.reparentTo(render)
base.localAvatar.loop('neutral')
camera.reparentTo(render)
camera.setPosHpr(0, -24, 16, 0, -30, 0)
base.camLens.setFar(450.0)
base.transitions.irisIn(0.4)
NametagGlobals.setMasterArrowsOn(1)
DistributedSmoothNode.activateSmoothing(1, 1)
self.IT = None
return
def offstage(self):
self.notify.debug('offstage')
DistributedSmoothNode.activateSmoothing(1, 0)
NametagGlobals.setMasterArrowsOn(0)
DistributedMinigame.offstage(self)
self.sky.reparentTo(hidden)
self.ground.reparentTo(hidden)
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
self.itText.hide()
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for avId in self.avIdList:
self.acceptTagEvent(avId)
myPos = self.avIdList.index(self.localAvId)
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avatar = self.getAvatar(avId)
if avatar:
avatar.startSmooth()
base.localAvatar.setPosHpr(*self.initialPositions[myPos])
base.localAvatar.d_clearSmoothing()
base.localAvatar.sendCurrentPosition()
base.localAvatar.b_setAnimState('neutral', 1)
base.localAvatar.b_setParent(ToontownGlobals.SPRender)
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.gameFSM.request('play')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterPlay(self):
self.notify.debug('enterPlay')
for i in xrange(self.numPlayers):
avId = self.avIdList[i]
avName = self.getAvatarName(avId)
scorePanel = MinigameAvatarScorePanel.MinigameAvatarScorePanel(avId, avName)
scorePanel.setPos(-0.213, 0.0, 0.28 * i + 0.66)
scorePanel.reparentTo(base.a2dBottomRight)
self.scorePanels.append(scorePanel)
base.setCellsAvailable(base.rightCells, 0)
self.walkStateData.enter()
self.walkStateData.fsm.request('walking')
if base.localAvatar.isIt:
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.DURATION)
self.timer.countdown(self.DURATION, self.timerExpired)
base.playMusic(self.music, looping=1, volume=0.9)
base.localAvatar.setIdealCameraPos(Point3(0, -24, 8))
def exitPlay(self):
for task in self.tracks:
task.finish()
self.tracks = []
for avId in self.avIdList:
toon = self.getAvatar(avId)
if toon:
toon.getGeomNode().clearMat()
toon.scale = 1.0
toon.rescaleToon()
self.walkStateData.exit()
self.music.stop()
self.timer.destroy()
del self.timer
for panel in self.scorePanels:
panel.cleanup()
self.scorePanels = []
base.setCellsAvailable(base.rightCells, 1)
base.mouseInterfaceNode.setForwardSpeed(ToontownGlobals.ToonForwardSpeed)
base.mouseInterfaceNode.setRotateSpeed(ToontownGlobals.ToonRotateSpeed)
self.itPointer.reparentTo(hidden)
base.localAvatar.cameraIndex = 0
base.localAvatar.setCameraPositionByIndex(0)
def timerExpired(self):
self.notify.debug('local timer expired')
self.gameOver()
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.gameFSM.request('off')
def exitCleanup(self):
pass
def setIt(self, avId):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() != 'play':
self.notify.debug('Ignoring setIt after done playing')
return
self.itText.show()
self.notify.debug(str(avId) + ' is now it')
if avId == self.localAvId:
self.itText.setText(TTLocalizer.TagGameYouAreIt)
base.localAvatar.isIt = 1
base.localAvatar.controlManager.setSpeeds(OTPGlobals.ToonForwardSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonJumpForce, OTPGlobals.ToonReverseSpeed * self.IT_SPEED_INCREASE, OTPGlobals.ToonRotateSpeed * self.IT_ROT_INCREASE)
else:
self.itText.setText(TTLocalizer.TagGameSomeoneElseIsIt % self.getAvatarName(avId))
base.localAvatar.isIt = 0
base.localAvatar.setWalkSpeedNormal()
avatar = self.getAvatar(avId)
if avatar:
self.itPointer.reparentTo(avatar)
self.itPointer.setZ(avatar.getHeight())
base.playSfx(self.tagSfx)
toon = self.getAvatar(avId)
duration = 0.6
if not toon:
return
spinTrack = LerpHprInterval(toon.getGeomNode(), duration, Point3(0, 0, 0), startHpr=Point3(-5.0 * 360.0, 0, 0), blendType='easeOut')
growTrack = Parallel()
gs = 2.5
for hi in xrange(toon.headParts.getNumPaths()):
head = toon.headParts[hi]
growTrack.append(LerpScaleInterval(head, duration, Point3(gs, gs, gs)))
def bounceFunc(t, trajectory, node = toon.getGeomNode()):
node.setZ(trajectory.calcZ(t))
def bounceCleanupFunc(node = toon.getGeomNode(), z = toon.getGeomNode().getZ()):
node.setZ(z)
bounceTrack = Sequence()
startZ = toon.getGeomNode().getZ()
tLen = 0
zVel = 30
decay = 0.6
while tLen < duration:
trajectory = Trajectory.Trajectory(0, Point3(0, 0, startZ), Point3(0, 0, zVel), gravMult=5.0)
dur = trajectory.calcTimeOfImpactOnPlane(startZ)
if dur <= 0:
break
bounceTrack.append(LerpFunctionInterval(bounceFunc, fromData=0.0, toData=dur, duration=dur, extraArgs=[trajectory]))
tLen += dur
zVel *= decay
bounceTrack.append(Func(bounceCleanupFunc))
tagTrack = Sequence(Func(toon.animFSM.request, 'off'), Parallel(spinTrack, growTrack, bounceTrack), Func(toon.animFSM.request, 'Happy'))
self.tracks.append(tagTrack)
tagTrack.start()
if self.IT:
it = self.getAvatar(self.IT)
shrinkTrack = Parallel()
for hi in xrange(it.headParts.getNumPaths()):
head = it.headParts[hi]
scale = ToontownGlobals.toonHeadScales[it.style.getAnimal()]
shrinkTrack.append(LerpScaleInterval(head, duration, scale))
self.tracks.append(shrinkTrack)
shrinkTrack.start()
self.IT = avId
def acceptTagEvent(self, avId):
self.accept('enterdistAvatarCollNode-' + str(avId), self.sendTagIfIt, [avId])
def sendTagIfIt(self, avId, collisionEntry):
if base.localAvatar.isIt:
self.notify.debug('Tagging ' + str(avId))
self.sendUpdate('tag', [avId])
else:
self.notify.debug('Bumped ' + str(avId))
def setTreasureScore(self, scores):
if not self.hasLocalToon:
return
self.notify.debug('setTreasureScore: %s' % scores)
for i in xrange(len(self.scorePanels)):
self.scorePanels[i].setScore(scores[i])
|
|
# -*- coding: utf-8 -*-
__doc__ = """
The manager module provides a selected classes to
handle websocket's execution.
Initially the rationale was to:
- Externalize the way the CherryPy server had been setup
as its websocket management was too tightly coupled with
the plugin implementation.
- Offer a management that could be used by other
server or client implementations.
- Move away from the threaded model to the event-based
model by relying on `select` or `epoll` (when available).
A simple usage for handling websocket clients:
.. code-block:: python
from ws4py.client import WebSocketBaseClient
from ws4py.manager import WebSocketManager
m = WebSocketManager()
class EchoClient(WebSocketBaseClient):
def handshake_ok(self):
m.add(self) # register the client once the handshake is done
def received_message(self, msg):
print str(msg)
m.start()
client = EchoClient('ws://localhost:9000/ws')
client.connect()
m.join() # blocks forever
Managers are not compulsory but hopefully will help your
workflow. For clients, you can still rely on threaded, gevent or
tornado based implementations of course.
"""
import logging
import select
import threading
import time
from tangelo.ws4py import format_addresses
from tangelo.ws4py.compat import py3k
logger = logging.getLogger('ws4py')
class SelectPoller(object):
def __init__(self, timeout=0.1):
"""
A socket poller that uses the `select`
implementation to determines which
file descriptors have data available to read.
It is available on all platforms.
"""
self._fds = []
self.timeout = timeout
def release(self):
"""
Cleanup resources.
"""
self._fds = []
def register(self, fd):
"""
Register a new file descriptor to be
part of the select polling next time around.
"""
if fd not in self._fds:
self._fds.append(fd)
def unregister(self, fd):
"""
Unregister the given file descriptor.
"""
if fd in self._fds:
self._fds.remove(fd)
def poll(self):
"""
Polls once and returns a list of
ready-to-be-read file descriptors.
"""
if not self._fds:
time.sleep(self.timeout)
return []
r, w, x = select.select(self._fds, [], [], self.timeout)
return r
class EPollPoller(object):
def __init__(self, timeout=0.1):
"""
An epoll poller that uses the ``epoll``
implementation to determines which
file descriptors have data available to read.
Available on Unix flavors mostly.
"""
self.poller = select.epoll()
self.timeout = timeout
def release(self):
"""
Cleanup resources.
"""
self.poller.close()
def register(self, fd):
"""
Register a new file descriptor to be
part of the select polling next time around.
"""
try:
self.poller.register(fd, select.EPOLLIN | select.EPOLLPRI)
except IOError:
pass
def unregister(self, fd):
"""
Unregister the given file descriptor.
"""
self.poller.unregister(fd)
def poll(self):
"""
Polls once and yields each ready-to-be-read
file-descriptor
"""
events = self.poller.poll(timeout=self.timeout)
for fd, event in events:
if event | select.EPOLLIN | select.EPOLLPRI:
yield fd
class WebSocketManager(threading.Thread):
def __init__(self, poller=None):
"""
An event-based websocket manager. By event-based, we mean
that the websockets will be called when their
sockets have data to be read from.
The manager itself runs in its own thread as not to
be the blocking mainloop of your application.
The poller's implementation is automatically chosen
with ``epoll`` if available else ``select`` unless you
provide your own ``poller``.
"""
threading.Thread.__init__(self)
self.lock = threading.Lock()
self.websockets = {}
if poller:
self.poller = poller
else:
if hasattr(select, "epoll"):
self.poller = EPollPoller()
logger.info("Using epoll")
else:
self.poller = SelectPoller()
logger.info("Using select as epoll is not available")
def __len__(self):
return len(self.websockets)
def __iter__(self):
if py3k:
return iter(self.websockets.values())
else:
return self.websockets.itervalues()
def __contains__(self, ws):
fd = ws.sock.fileno()
# just in case the file descriptor was reused
# we actually check the instance (well, this might
# also have been reused...)
return self.websockets.get(fd) is ws
def add(self, websocket):
"""
Manage a new websocket.
First calls its :meth:`opened() <ws4py.websocket.WebSocket.opened>`
method and register its socket against the poller
for reading events.
"""
logger.info("Managing websocket %s" % format_addresses(websocket))
websocket.opened()
with self.lock:
fd = websocket.sock.fileno()
self.websockets[fd] = websocket
self.poller.register(fd)
def remove(self, websocket):
"""
Remove the given ``websocket`` from the manager.
This does not call its :meth:`closed() <ws4py.websocket.WebSocket.closed>`
method as it's out-of-band by your application
or from within the manager's run loop.
"""
logger.info("Removing websocket %s" % format_addresses(websocket))
with self.lock:
fd = websocket.sock.fileno()
self.websockets.pop(fd, None)
self.poller.unregister(fd)
def stop(self):
"""
Mark the manager as terminated and
releases its resources.
"""
self.running = False
with self.lock:
self.websockets.clear()
self.poller.release()
def run(self):
"""
Manager's mainloop executed from within a thread.
Constantly poll for read events and, when available,
call related websockets' `once` method to
read and process the incoming data.
If the :meth:`once() <ws4py.websocket.WebSocket.once>`
method returns a `False` value, its :meth:`terminate() <ws4py.websocket.WebSocket.terminate>`
method is also applied to properly close
the websocket and its socket is unregistered from the poller.
Note that websocket shouldn't take long to process
their data or they will block the remaining
websockets with data to be handled. As for what long means,
it's up to your requirements.
"""
self.running = True
while self.running:
with self.lock:
polled = self.poller.poll()
if not self.running:
break
for fd in polled:
if not self.running:
break
ws = self.websockets.get(fd)
if ws and not ws.terminated:
if not ws.once():
with self.lock:
fd = ws.sock.fileno()
self.websockets.pop(fd, None)
self.poller.unregister(fd)
if not ws.terminated:
logger.info("Terminating websocket %s" % format_addresses(ws))
ws.terminate()
def close_all(self, code=1001, message='Server is shutting down'):
"""
Execute the :meth:`close() <ws4py.websocket.WebSocket.close>`
method of each registered websockets to initiate the closing handshake.
It doesn't wait for the handshake to complete properly.
"""
with self.lock:
logger.info("Closing all websockets with [%d] '%s'" % (code, message))
for ws in iter(self):
ws.close(code=1001, reason=message)
def broadcast(self, message, binary=False):
"""
Broadcasts the given message to all registered
websockets, at the time of the call.
Broadcast may fail on a given registered peer
but this is silent as it's not the method's
purpose to handle websocket's failures.
"""
with self.lock:
websockets = self.websockets.copy()
if py3k:
ws_iter = iter(websockets.values())
else:
ws_iter = websockets.itervalues()
for ws in ws_iter:
if not ws.terminated:
try:
ws.send(message, binary)
except:
pass
|
|
import numpy
import chainer
from chainer import configuration
from chainer import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
def _as4darray(arr):
if arr.ndim == 0:
return arr.reshape(1, 1, 1, 1)
elif arr.ndim == 4:
return arr
else:
return arr.reshape(arr.shape[0], -1, 1, 1)
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchNormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9):
self.running_mean = mean
self.running_var = var
# Note: cuDNN v5 requires that eps be greater than 1e-5. Otherwise, an
# error will occur.
# See CUDNN_BN_MIN_EPSILON value in cudnn.h to verify minimum allowable
# value.
self.eps = eps
if chainer.should_use_cudnn('>=auto'):
if eps < 1e-5:
msg = 'cuDNN does not allow an eps value less than 1e-5.'
raise RuntimeError(msg)
self.mean_cache = None
self.decay = decay
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 5:
raise type_check.InvalidType(
'%s or %s' % (in_types.size() == 3, in_types.size() == 5),
'%s == %s' % (in_types.size(), n_in))
x_type, gamma_type, beta_type = in_types[:3]
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(beam2d): Check shape
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
if len(in_types) == 5:
mean_type, var_type = in_types[3:]
type_check.expect(
mean_type.dtype == x_type.dtype,
mean_type.shape == gamma_type.shape,
var_type.dtype == x_type.dtype,
var_type.shape == gamma_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x, gamma, beta = inputs[:3]
if configuration.config.train:
if self.running_mean is None:
self.running_mean = xp.zeros_like(gamma)
self.running_var = xp.zeros_like(gamma)
else:
self.running_mean = xp.array(self.running_mean)
self.running_var = xp.array(self.running_var)
elif len(inputs) == 5:
self.fixed_mean = inputs[3]
self.fixed_var = inputs[4]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
gamma = gamma[expander]
beta = beta[expander]
# cuDNN only supports these tensor dimensions because they are
# the most commonly used. If there is a need to support other
# dimensions with cuDNN, we could consider reshaping the input
# into a 2-dim array with channels as second dim and m=<product
# of all dimensions except the 2nd dimension> as the first
# dimension.
cudnn_dim_ok = x.ndim == 2 or (x.ndim == 4 and head_ndim == 2)
# TODO(bkvogel): Check for float16 support again in next cuDNN version.
# cuDNN v5 batch normalization does not seem to support float16.
self._can_use_cudnn = cudnn_dim_ok and x[0].dtype != numpy.float16
cudnn_updated_running_stats = False
if (xp is not numpy and chainer.should_use_cudnn('>=auto', 5000) and
self._can_use_cudnn):
x = cuda.cupy.ascontiguousarray(x)
if x.ndim == 4 and head_ndim == 2:
# for convolutional layer
self.mode = libcudnn.CUDNN_BATCHNORM_SPATIAL
else:
# for linear layer
self.mode = libcudnn.CUDNN_BATCHNORM_PER_ACTIVATION
gamma = cuda.cupy.ascontiguousarray(gamma)
beta = cuda.cupy.ascontiguousarray(beta)
dtype = x.dtype
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(_as4darray(x))
derivedBnDesc = cudnn.create_uninitialized_tensor_descriptor()
libcudnn.deriveBNTensorDescriptor(derivedBnDesc.value,
x_desc.value, self.mode)
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
y = cuda.cupy.empty_like(x)
# Factor used in the moving average
factor = 1 - self.decay
if configuration.config.train:
if self.mean_cache is None:
# Output cache to speed up backward pass.
self.mean_cache = xp.empty_like(gamma)
# Output cache to speed up backward pass.
self.var_cache = xp.empty_like(gamma)
# Note: cuDNN computes the mini-batch mean and variance
# internally. We can simply (optionally) pass
# it the running-average mean and variance arrays.
libcudnn.batchNormalizationForwardTraining(
handle, self.mode, one.data, zero.data,
x_desc.value, x.data.ptr, x_desc.value,
y.data.ptr, derivedBnDesc.value, gamma.data.ptr,
beta.data.ptr, factor, self.running_mean.data.ptr,
self.running_var.data.ptr, self.eps,
self.mean_cache.data.ptr, self.var_cache.data.ptr)
cudnn_updated_running_stats = True
else:
libcudnn.batchNormalizationForwardInference(
handle, self.mode, one.data, zero.data,
x_desc.value, x.data.ptr, x_desc.value, y.data.ptr,
derivedBnDesc.value, gamma.data.ptr, beta.data.ptr,
self.fixed_mean.data.ptr, self.fixed_var.data.ptr,
self.eps)
else:
if configuration.config.train:
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis)
var = x.var(axis=axis)
var += self.eps
else:
mean = self.fixed_mean
var = self.fixed_var + self.eps
self.std = xp.sqrt(var, dtype=var.dtype)
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
y = gamma * self.x_hat
y += beta
else:
self.x_hat, y = cuda.elementwise(
'T x, T mean, T std, T gamma, T beta', 'T x_hat, T y',
'''
x_hat = (x - mean) / std;
y = gamma * x_hat + beta;
''',
'bn_fwd')(x, mean[expander], self.std[expander], gamma,
beta)
if configuration.config.train and (not cudnn_updated_running_stats):
# Note: If in training mode, the cuDNN forward training function
# will do this for us, so
# only run following code if cuDNN was not used.
# Update running statistics:
m = x.size // gamma.size
adjust = m / max(m - 1., 1.) # unbiased estimation
self.running_mean *= self.decay
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self.running_mean += temp_ar
del temp_ar
self.running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self.running_var += temp_ar
del temp_ar
return y,
def backward(self, inputs, grad_outputs):
x, gamma = inputs[:2]
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = cuda.get_array_module(x)
if len(inputs) == 5:
# This case is unlikely to be used in practice and so does not
# need to be optimized for performance.
mean = inputs[3]
var = inputs[4]
std = xp.sqrt(var, dtype=var.dtype)
gs = gamma / std
gbeta = gy.sum(axis=axis)
x_hat = _xhat(x, mean, std, expander)
ggamma = (gy * x_hat).sum(axis=axis)
gmean = -gs * gbeta
gvar = -0.5 * gamma / var * ggamma
gx = gs[expander] * gy
return gx, ggamma, gbeta, gmean, gvar
# Note: If length of inputs is not 5, we must be in train mode.
assert configuration.config.train
if (xp is not numpy and chainer.should_use_cudnn('>=auto', 5000) and
self._can_use_cudnn):
# Note: cuDNN batch normalization backward only works in
# "training mode." That is, it does not support
# computing gradients in fixed-mean-variance mode, because there
# is normally no reason to call backward()
# while in test/evaluation mode.
x = cuda.cupy.ascontiguousarray(x)
gamma = cuda.cupy.ascontiguousarray(gamma)
gy = cuda.cupy.ascontiguousarray(gy)
dtype = x.dtype
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(_as4darray(x))
derivedBnDesc = cudnn.create_uninitialized_tensor_descriptor()
libcudnn.deriveBNTensorDescriptor(derivedBnDesc.value,
x_desc.value, self.mode)
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
gx = cuda.cupy.empty_like(x)
ggamma = cuda.cupy.empty_like(gamma)
gbeta = cuda.cupy.empty_like(gamma)
libcudnn.batchNormalizationBackward(
handle, self.mode, one.data, zero.data,
one.data, zero.data, x_desc.value, x.data.ptr,
x_desc.value, gy.data.ptr, x_desc.value, gx.data.ptr,
derivedBnDesc.value, gamma.data.ptr,
ggamma.data.ptr, gbeta.data.ptr,
self.eps, self.mean_cache.data.ptr, self.var_cache.data.ptr)
else:
gbeta = gy.sum(axis=axis)
ggamma = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
gx = (gamma / self.std)[expander] * (
gy - (self.x_hat * ggamma[expander] + gbeta[expander]) / m)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, T x_hat, T gamma, T std, T ggamma, T gbeta, \
T inv_m',
'T gx',
'gx = (gamma / std) * (gy - (x_hat * ggamma + gbeta) * \
inv_m)',
'bn_bwd')(gy, self.x_hat, gamma[expander],
self.std[expander], ggamma[expander],
gbeta[expander], inv_m)
return gx, ggamma, gbeta
def batch_normalization(x, gamma, beta, **kwargs):
"""batch_normalization(x, gamma, beta, eps=2e-5, running_mean=None, running_var=None, decay=0.9)
Batch normalization function.
It takes the input variable ``x`` and two parameter variables ``gamma`` and
``beta``. The parameter variables must both have the same dimensionality,
which is referred to as the channel shape. This channel shape corresponds
to the dimensions in the input which are not averaged over. Since the
first dimension of the input corresponds to the batch size, the second
dimension of `x` will correspond to the first dimension of the channel
shape, the third dimension of `x` will correspond to the second channel
dimension (if it exists) and so on. Therefore, the dimensionality of the
input must be at least one plus the number of channel dimensions. The
total effective "batch size" will then be considered to be the product of
all dimensions in `x` except for the channel dimensions.
As an example, if the input is four dimensional and the parameter
variables are one dimensional, then it is assumed that the first
dimension of the input is the batch size, the second dimension is the
channel size, and the remaining two dimensions are considered
to be spatial dimensions that will be averaged over along with the
batch size in the batch normalization computations. That is,
the total batch size will be considered to be the product of all
input dimensions except the second dimension.
Note: If this function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to access the updated running statistics, it is necessary
to get a new instance of the function object, call the object, and then
access the running_mean and/or running_var attributes. See the
corresponding Link class for an example of how to do this.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (Variable): Input variable.
gamma (Variable): Scaling parameter of normalized data.
beta (Variable): Shifting parameter of scaled normalized data.
eps (float): Epsilon value for numerical stability.
running_mean (array): Running average of the mean. This is a
running average of the mean over several mini-batches using
the decay parameter. If ``None``, the running average is not
computed. If this is ``None``, then ``runnng_var`` must also
be ``None``.
running_var (array): Running average of the variance. This is a
running average of the variance over several mini-batches using
the decay parameter. If ``None``, the running average is not
computed. If this is ``None``, then ``running_mean`` must also
be ``None``.
decay (float): Decay rate of moving average. It is used during
training.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso:: :class:`links.BatchNormalization`
""" # NOQA
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
eps, running_mean, running_var, decay = argument.parse_kwargs(
kwargs, ('eps', 2e-5), ('running_mean', None),
('running_var', None), ('decay', 0.9))
return BatchNormalizationFunction(eps, running_mean, running_var,
decay)(x, gamma, beta)
def fixed_batch_normalization(x, gamma, beta, mean, var, eps=2e-5):
"""Batch normalization function with fixed statistics.
This is a variant of batch normalization, where the mean and variance
statistics are given by the caller as fixed variables. This is
used on testing mode of the batch normalization layer, where batch
statistics cannot be used for prediction consistency.
Args:
x (Variable): Input variable.
gamma (Variable): Scaling parameter of normalized data.
beta (Variable): Shifting parameter of scaled normalized data.
mean (Variable): Shifting parameter of input.
var (Variable): Square of scaling parameter of input.
eps (float): Epsilon value for numerical stability.
.. seealso::
:func:`functions.batch_normalization`,
:class:`links.BatchNormalization`
"""
with configuration.using_config('train', False):
return BatchNormalizationFunction(eps, None, None, 0.0)(
x, gamma, beta, mean, var)
|
|
#!/usr/bin/env python
#
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine.].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2005-2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# [email protected]
# Adobe AS3 Team
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
#
#
# simple script that executes tamarin certification tests. you have to build the
# stand-alone, avmplus executable.
# see http://developer.mozilla.org/en/docs/Tamarin_Build_Documentation
#
# this test looks for an executable avmplus shell in
# %MOZ_SRC/mozilla/js/tamarin/platform/,
#
import os, sys, getopt, datetime, pipes, glob, itertools, tempfile, string, re, platform
import subprocess
from os.path import *
from os import getcwd,environ
from datetime import datetime
from glob import glob
from sys import argv, exit
from getopt import getopt
from itertools import count
from killableprocess import Popen
from time import time
import threadpool
import subProcess
# runtestUtils must be imported after "from os.path import *" as walk is overridden
from runtestUtils import *
try:
import pexpect
except ImportError:
pexpect = False
class RuntestBase:
sourceExt = '.as'
abcasmExt = '.abs'
abcasmRunner = 'bash ../../utils/abcasm/abcasm.sh'
abcasmShell = 'abcasm/abs_helper'
testconfig = 'testconfig.txt'
logFileType = 'html'
avm = ''
asc = ''
builtinabc = ''
shellabc = ''
exclude = []
config = ''
ascargs = ''
vmargs = ''
escbin = ''
rebuildtests = False
includes = None
settings = None
ashErrors = []
options = ''
longOptions = []
osName = ''
vmtype = ''
js_output = ''
js_output_f = None
args = []
tests = []
start_time = None
verbose = False
quiet = False
htmlOutput = True
timestamps = True
forcerebuild = False
eval = False # Run the source file (.as, .js) but, do not magically prepend included files
runSource = False # Run the source file (.as, .js) instead of .abc, magically prepend included files
testTimeOut = -1 #by default tests will NOT timeout
debug = False
threads = 1
def __init__(self):
# Result Vars
self.allpasses=0
self.allfails=0
self.allunpass=0
self.allexpfails=0
self.allexceptions=0
self.allskips=0
self.alltimeouts=0
self.allasserts=0
self.failmsgs=[]
self.expfailmsgs=[]
self.unpassmsgs=[]
self.timeoutmsgs=[]
self.assertmsgs=[]
self.altsearchpath=None
self.run()
def run(self):
self.setEnvironVars()
self.loadPropertiesFile()
self.setOptions()
self.parseOptions()
if self.htmlOutput and not self.rebuildtests:
self.createOutputFile()
self.setTimestamp()
if not self.config:
self.determineConfig()
self.tests = self.getTestsList(self.args)
# Load the root testconfig file
self.settings, self.includes = self.parseTestConfig('.')
self.preProcessTests()
if self.rebuildtests:
self.rebuildTests()
else:
self.runTests(self.tests)
self.cleanup()
def setEnvironVars(self):
if 'AVM' in environ:
self.avm = environ['AVM'].strip()
if 'ASC' in environ:
self.asc = environ['ASC'].strip()
if 'GLOBALABC' in environ:
self.builtinabc = environ['GLOBALABC'].strip()
if 'BUILTINABC' in environ:
self.builtinabc = environ['BUILTINABC'].strip()
if 'SHELLABC' in environ:
self.shellabc = environ['SHELLABC'].strip()
if 'CVS' in environ:
self.exclude = ['CVS'].strip()
if 'CONFIG' in environ:
self.config = environ['CONFIG'].strip()
if 'ASCARGS' in environ:
self.ascargs = environ['ASCARGS'].strip()
if 'VMARGS' in environ:
self.vmargs = environ['VMARGS'].strip()
def loadPropertiesFile(self):
# yet another way to specify asc,avm,builtinabc ...from a file
pf = 'runtests.properties'
if exists(pf):
self.verbose_print( 'reading properties from %s' % (pf) )
fd = open(pf,'r')
for l in fd:
setting = l.strip().split('=')
if l.startswith('#') or len(setting) < 2 or len(setting[1]) <= 0:
continue
val = setting[1].strip()
option = setting[0].split('.') # see if we have x.y = z
nm = option[0].strip()
# check if nm is valid
if nm in self.__class__.__dict__ and not callable(self.__class__.__dict__[nm]):
if len(option) > 1:
val = self.__class__.__dict__[nm] + ' ' + val # concat
self.__class__.__dict__[nm] = val
fd.close()
def usage(self, c):
print 'usage: %s [options] [tests]' % basename(argv[0])
print ' -v --verbose enable additional output'
print ' -E --avm avmplus command to use'
print ' -a --asc compiler to use'
print ' -g --globalabc DEPRECATED but still works - use builtin.abc (used to be location of global.abc)'
print ' -b --builtinabc location of builtin.abc'
print ' -s --shellabc location of shell_toplevel.abc'
print ' -x --exclude comma separated list of directories to skip'
print ' -e --eval use run-time compiler'
print ' -h --help display help and exit'
print ' -t --notime do not generate timestamps (cleaner diffs)'
print ' -f --forcerebuild force rebuild all test files'
print ' -c --config sets the config string [default OS-tvm]'
print ' -q --quiet display minimum output during testrun'
print ' --rebuildtests rebuild the tests only - do not run against VM'
print ' --ascargs args to pass to asc on rebuild of test files'
print ' --vmargs args to pass to vm'
print ' --timeout max time to let a test run, in sec (default -1 = never timeout)'
print ' --nohtml do not create a html output file'
def setOptions(self):
'''set the valid command line options.
When subclassing, call this method first, then append options to each list'''
self.options = 'vE:a:g:b:s:x:htfc:dqe'
self.longOptions = ['verbose','avm=','asc=','globalabc=','builtinabc=','shellabc=',
'exclude=','help','notime','forcerebuild','config=','ascargs=','vmargs=',
'timeout=', 'rebuildtests','quiet','nohtml','eval']
def parseOptions(self):
try:
opts, self.args = getopt(argv[1:], self.options, self.longOptions )
except:
self.usage(2)
if not self.args:
self.args = ['.']
for o, v in opts:
if o in ('-v', '--verbose'):
self.verbose = True
elif o in ('-h', '--help'):
self.usage(0)
elif o in ('-E', '--avm'):
self.avm = v
elif o in ('-a', '--asc'):
self.asc = v
elif o in ('-g', '--globalabc'):
self.builtinabc = v
elif o in ('-b', '--builtinabc'):
self.builtinabc = v
elif o in ('-s', '--shellabc'):
self.shellabc = v
elif o in ('-x', '--exclude'):
self.exclude += v.split(',')
elif o in ('-t', '--notime'):
self.timestamps = False
elif o in ('-f', '--forcerebuild'):
self.forcerebuild = True
elif o in ('-c', '--config'):
self.config = v
elif o in ('-e', '--eval'):
self.eval = True
elif o in ('--ascargs',):
self.ascargs = v
elif o in ('--vmargs',):
self.vmargs = v
elif o in ('--ext',):
self.sourceExt = v
elif o in ('--timeout',):
self.self.testTimeOut=int(v)
elif o in ('-d',):
self.debug = True
elif o in ('--rebuildtests',):
self.rebuildtests = True
elif o in ('-q', '--quiet'):
self.quiet = True
elif o in ('--nohtml',):
self.htmlOutput = False
return opts
def setTimestamp(self):
if self.timestamps:
# get the start time
self.start_time = datetime.today()
self.js_print('Tamarin tests started: %s' % self.start_time, overrideQuiet=True)
def determineOS(self):
_os = platform.system()
ostype = ''
if re.search('(CYGWIN_NT|Windows)', _os):
ostype='win'
if re.search('(Darwin)', _os):
ostype='mac'
if re.search('(Linux)', _os):
ostype='lnx'
if re.search('(SunOS)', _os):
ostype='sol'
if ostype == '':
print("ERROR: os %s is unknown, expected values are (win,mac,lnx,sol), use runtests.py --config x86-win-tvm-release to manually set the configuration" % (platform.system()))
exit(1)
self.osName = ostype
def determineConfig(self):
if not self.runSource:
self.vmtype = 'release'
(f,err,exitcode) = self.run_pipe('%s' % self.avm)
try:
for line in f:
if line.find('[-d]') != -1:
self.vmtype = 'releasedebugger'
break
except:
nop = True
# ================================================
# Determine the configruation if it has not been
# passed into the script:
# {CPU_ARCH}-{OS}-{VM}-{VERSION}-{VMSWITCH}
# ================================================
self.determineOS()
try:
# Try and determine CPU architecture of the AVM, if it fails drop back to platform.machine()
cputype = ''
(f,err,exitcode) = self.run_pipe('file %s' % (self.avm))
if re.search('(32-bit|80386|i386)', f[0]):
cputype='x86'
if re.search('(64-bit|x86-64|x86_64|Mono/\.Net)', f[0]):
cputype='x64'
if re.search('(ppc)', f[0]):
cputype='ppc'
if re.search('(ppc64)', f[0]):
cputype='ppc64'
if cputype == '':
raise Exception()
except:
try:
cputype={'x86':'x86','i386':'x86','i686':'x86','x86_64':'x64','i86pc':'x86','Power Macintosh':'ppc','sun4u':'x86','':'x86'}[platform.machine()]
except:
print("ERROR: cpu_arch '%s' is unknown, expected values are (x86,ppc), use runtests.py --config x86-win-tvm-release to manually set the configuration" % (platform.machine()))
exit(1)
self.config = cputype+'-'+self.osName+'-tvm-'+self.vmtype+self.vmargs
### File and Directory functions ###
def istest(self,f):
return f.endswith((self.sourceExt,self.abcasmExt)) and basename(f) != ('shell'+self.sourceExt) \
and not f.endswith('Util'+self.sourceExt)
def getTestsList(self, startDir):
if self.altsearchpath!=None:
newstartDir=[]
for a in startDir:
newstartDir.append(self.altsearchpath+a)
startDir=startDir+newstartDir
for i in range(len(startDir)):
if startDir[i][-1] == '/':
startDir[i] = startDir[i][:-1]
tests = [a for a in startDir if isfile(a) and self.istest(a)]
for a in [d for d in startDir if isdir(d) and not (basename(d) in self.exclude)]:
for d, dirs, files in walk(a, followlinks=True):
tests += [(d+'/'+f) for f in files if self.istest(f)]
utils = [d for d in dirs if d+self.sourceExt in files]
for x in [x for x in self.exclude+utils if x in dirs]:
dirs.remove(x)
return tests
# set the output file name. let's base its name on the date and platform,
# and give it a sequence number.
def createOutputFile(self):
now = datetime.today()
for i in count(1):
self.js_output = '%d-%s-%s.%d.%s' % (now.year, str(now.month).zfill(2), str(now.day).zfill(2), i, self.logFileType)
if not isfile(self.js_output):
break
print 'Writing results to %s' % self.js_output
self.js_output_f = open(self.js_output, 'w')
def js_print(self, m, start_tag='<p><tt>', end_tag='</tt></p>', overrideQuiet=False):
if self.quiet and not overrideQuiet:
sys.stdout.write('.')
else:
print m
sys.stdout.flush()
if self.js_output:
if self.logFileType == 'html':
self.js_output_f.write('%s %s %s\n' % (start_tag, m, end_tag))
else:
self.js_output_f.write('%s\n' % m)
self.js_output_f.flush()
def verbose_print(self, m, start='', end=''):
if self.verbose:
self.js_print(m, start, end)
def err_print(self, m):
self.js_print(m, '<font color=#990000>', '</font><br/>')
def quiet_print(self, m, start=None, end=None):
if self.quiet:
sys.stdout.write('.')
else:
if not start and not end:
self.js_print(m)
else:
self.js_print(m, start, end)
def fail(self, abc, msg, failmsgs):
msg = msg.strip()
self.err_print(' %s' % msg)
failmsgs += ['%s : %s' % (abc, msg)]
def parents(self, d):
while d != abspath(self.args[0]) and d != '':
yield d
d = dirname(d)
yield d
# run a command and return its output
def run_pipe(self, cmd):
if self.debug:
print('cmd: %s' % cmd)
try:
p = Popen((cmd), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stdout.readlines()
err = p.stderr.readlines()
starttime=time()
exitCode = p.wait(self.testTimeOut) #abort if it takes longer than 60 seconds
if exitCode < 0 and self.testTimeOut>-1 and time()-starttime>self.testTimeOut: # process timed out
return 'timedOut'
return (output,err,exitCode)
except KeyboardInterrupt:
print '\n\nKeyboardInterrupt detected ... killing process'
p.kill()
self.killmyself()
def killmyself(self):
# destroy this python process and children
if self.osName == 'win':
import ctypes
ctypes.windll.kernel32.TerminateProcess(
ctypes.windll.kernel32.OpenProcess(1, False, os.getpid()),
-1)
else:
os.killpg(os.getpgrp(),9)
def parseArgStringToList(self, argStr):
args = argStr.strip().split(' ')
# recombine any args that have spaces in them
argList = []
for a in args:
if a == '':
pass
elif a[0] == '-':
argList.append(a)
else: # append the space and text to the last arg
argList[len(argList)-1] += ' ' + a
return argList
def parseAscArgs(self, ascArgFile, currentdir):
# reads an .asc_args file and returns a tuple of the arg mode (override or merge) and a list of args
f = open(ascArgFile,'r')
while True: # skip comment lines
ascargs = f.readline()
if (ascargs[0] != '#'):
break
ascargs = ascargs.split('|')
ascargs[0] = ascargs[0].strip()
if (len(ascargs) == 1): #treat no keyword as a merge
ascargs.insert(0,'merge')
elif (ascargs[0] != 'override') or (ascargs[0] != 'merge'): # default to merge if mode not recognized
ascargs[0] = 'merge'
# replace the $DIR keyword with actual directory
ascargs[1] = string.replace(ascargs[1], '$DIR', currentdir)
if ascargs[1].find('$SHELLABC') != -1:
if not isfile(self.shellabc): # TODO: not the best place to check for this
exit('ERROR: shell.abc %s does not exist, SHELLABC environment variable or --shellabc must be set to shell_toplevel.abc' % self.shellabc)
ascargs[1] = string.replace(ascargs[1], '$SHELLABC', self.shellabc)
ascargs[1] = self.parseArgStringToList(ascargs[1])
removeArgList = []
argList = []
for a in ascargs[1]:
if a[0:3] == '-no':
removeArgList.append(a[3:])
else:
argList.append(a)
return ascargs[0], argList, removeArgList
def loadAscArgs(self, arglist,dir,file):
# It is possible that file is actually a partial path rooted to acceptance,
# so make sure that we are only dealing with the actual filename
file = split(file)[1]
mode = ''
newArgList = []
removeArgList = []
# Loads an asc_args file and modifies arglist accordingly
if isfile('./dir.asc_args'):
mode = ''
mode, newList, removeList = self.parseAscArgs('./dir.asc_args', './')
newArgList.extend(newList)
removeArgList.extend(removeList)
if mode == 'merge':
arglist.extend(newArgList)
elif mode == 'override':
arglist = newArgList
# remove any duplicate args
arglist = list(set(arglist))
if removeArgList:
for removeArg in removeArgList:
try:
arglist.remove(removeArg)
except:
pass
if isfile(dir+'/dir.asc_args'):
mode = ''
mode, newList, removeList = self.parseAscArgs(dir+'/dir.asc_args', dir)
newArgList.extend(newList)
removeArgList.extend(removeList)
if mode == 'merge':
arglist.extend(newArgList)
elif mode == 'override':
arglist = newArgList
# remove any duplicate args
arglist = list(set(arglist))
if removeArgList:
for removeArg in removeArgList:
try:
arglist.remove(removeArg)
except:
pass
if file and isfile('%s/%s.asc_args' % (dir, file)): #file takes precendence over directory
mode = ''
mode, newList, removeList = self.parseAscArgs('%s/%s.asc_args' % (dir, file), dir)
newArgList.extend(newList)
removeArgList.extend(removeList)
if mode == 'merge':
arglist.extend(newArgList)
elif mode == 'override':
arglist = newArgList
# remove any duplicate args
arglist = list(set(arglist))
if removeArgList:
for removeArg in removeArgList:
try:
arglist.remove(removeArg)
except:
pass
return arglist
def compile_test(self, as_file, extraArgs=[]):
asc, builtinabc, shellabc, ascargs = self.asc, self.builtinabc, self.shellabc, self.ascargs
# if there is a .build file available (which is an executable script) run that file instead
# of compiling with asc
as_base = as_file[0:as_file.rfind('.')]
if isfile(as_base+'.build'):
(dir,file)=split(as_base)
self.verbose_print(' compiling %s running %s%s' % (file, as_base, '.build'))
(f,err,exitcode) = self.run_pipe('%s%s' % (as_base, '.build'))
for line in f:
self.verbose_print(line.strip())
for line in err:
self.verbose_print(line.strip())
return
if not isfile(asc):
exit('ERROR: cannot build %s, ASC environment variable or --asc must be set to asc.jar' % as_file)
(dir, file) = split(as_file)
self.verbose_print(' compiling %s' % file)
# additional .as file compiler args
if as_file.endswith(self.sourceExt):
if not isfile(builtinabc):
exit('ERROR: builtin.abc (formerly global.abc) %s does not exist, BUILTINABC environment variable or --builtinabc must be set to builtin.abc' % builtinabc)
if asc.endswith('.jar'):
cmd = 'java -jar ' + asc
else:
cmd = asc
arglist = self.parseArgStringToList(ascargs)
# look for .asc_args files to specify dir / file level compile args, arglist is passed by ref
arglist = self.loadAscArgs(arglist, dir, as_file)
for arg in extraArgs:
cmd += ' %s' % arg
cmd += ' -import %s' % builtinabc
for arg in arglist:
cmd += ' %s' % arg
for p in self.parents(dir):
if p=='':
p='.'
shell = join(p,'shell'+self.sourceExt)
if isfile(shell):
cmd += ' -in ' + shell
break
(testdir, ext) = splitext(as_file)
deps = glob(join(testdir,'*'+self.sourceExt))
deps.sort()
for util in deps + glob(join(dir,'*Util'+self.sourceExt)):
cmd += ' -in %s' % string.replace(util, '$', '\$')
elif as_file.endswith(self.abcasmExt):
cmd = self.abcasmRunner
try:
self.verbose_print('%s %s' % (cmd,as_file))
(f,err,exitcode) = self.run_pipe('%s %s' % (cmd,as_file))
for line in f:
self.verbose_print(line.strip())
for line in err:
self.verbose_print(line.strip())
return f+err
except:
raise
def rebuildTests(self):
if self.threads == 1 or platform.system()[:6].upper() == 'CYGWIN':
self.compileWithAsh(self.tests)
else: # run using threads
# split tests into number of threads
testGroups = splitList(self.tests, self.threads)
# generate threadpool
requests = threadpool.makeRequests(self.compileWithAsh, testGroups, self.printOutput)
main = threadpool.ThreadPool(self.threads)
# que requests
[main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# wait() will return when results for all work requests have arrived
try:
main.wait()
except KeyboardInterrupt, SystemExit:
print '\n\nKeyboardInterrupt detected ... killing worker threads'
main.dismissWorkers(self.threads)
self.killmyself()
except Exception, e:
main.dismissWorkers(self.threads)
print 'EXCEPTION: %s' % e
self.killmyself()
def compileWithAsh(self, tests):
start_time = datetime.today()
#print("starting compile of %d tests at %s" % (len(tests),start_time))
total=len(tests)
if not pexpect:
for test in tests:
self.js_print('%d\tcompiling %s' % (total,test))
self.compile_test(test)
(testdir, ext) = splitext(test)
if exists(testdir+".abc")==False:
print("ERROR abc files %s.abc not created" % (testdir))
self.ashErrors.append("abc files %s.abc not created" % (testdir))
total -= 1;
else: #pexpect available
child = pexpect.spawn("java -classpath %s macromedia.asc.embedding.Shell" % self.asc)
child.logfile = None
child.expect("\(ash\)")
child.expect("\(ash\)")
for test in tests:
if self.debug:
print cmd
else:
print "Compiling ", test
if test.endswith(self.abcasmExt):
self.compile_test(test)
else:
arglist = self.parseArgStringToList(self.ascargs)
(dir, file) = split(test)
# look for .asc_args files to specify dir / file level compile args
arglist = self.loadAscArgs(arglist, dir, test)
cmd = "asc -import %s " % (self.builtinabc)
for arg in arglist:
cmd += ' %s' % arg
for p in self.parents(dir):
shell = join(p,"shell.as")
if isfile(shell):
cmd += " -in " + shell
break
(testdir, ext) = splitext(test)
deps = glob(join(testdir,"*.as"))
deps.sort()
for util in deps + glob(join(dir,"*Util.as")):
cmd += " -in %s" % util #no need to prepend \ to $ when using ash
cmd += " %s" % test
if exists(testdir+".abc"):
os.unlink(testdir+".abc")
child.sendline(cmd)
child.expect("\(ash\)")
if not exists(testdir+".abc"):
print("ERROR: abc file %s.abc not created, cmd used to compile: %s" % (testdir,cmd))
self.ashErrors.append("abc file %s.abc not created, cmd used to compile: %s" % (testdir,cmd))
total -= 1;
#print("%d remaining, %s" % (total,cmd))
end_time = datetime.today()
#print("finished compile of %d tests at %s elapsed time is %s" % (len(tests),start_time,end_time-start_time))
def build_incfiles(self, as_file):
files=[]
(dir, file) = split(as_file)
for p in self.parents(dir):
shell = join(p,'shell'+self.sourceExt)
if isfile(shell):
files.append(shell)
(testdir, ext) = splitext(as_file)
if not self.eval:
for util in glob(join(testdir,'*'+self.sourceExt)) + glob(join(dir,'*Util'+self.sourceExt)):
files.append(string.replace(util, "$", "\$"))
return files
# TODO: Rename/move to better place
def preProcessTests(self):
if (not self.rebuildtests) and (not self.avm): #don't need AVM if rebuilding tests
exit('ERROR: cannot run %s, AVM environment variable or --avm must be set to avmplus' % self.avm)
self.js_print('current configuration: %s' % self.config, overrideQuiet=True)
self.js_print('Executing %d tests against vm: %s' % (len(self.tests), self.avm), overrideQuiet=True);
# Are we running esc - depends on a valid avm
if self.runESC:
runSource = True
# generate the executable cmd for esc
#escAbcs = [f for f in os.listdir(self.escbin) if f.endswith('.abc')] #not all abcs are used for esc
escAbcs = ['debug','util','bytes-tamarin','util-tamarin','lex-char','lex-token',
'lex-scan','ast','ast-decode','parse','asm','abc','emit','cogen',
'cogen-stmt','cogen-expr','esc-core','eval-support','esc-env','main']
if not self.escbin.endswith('/'):
self.escbin += '/'
for f in escAbcs:
self.avm += ' %s%s.es.abc' % (self.escbin, f)
self.avm += ' -- '
self.avm += ' %s../test/spidermonkey-prefix.es' % self.escbin #needed to run shell harness
def printOutput(self,request, outputCalls=None):
#execute the outputCalls
if outputCalls:
for call in outputCalls:
apply(call[0],call[1])
def runTests(self, testList):
testnum = len(testList)
# threads on cygwin randomly lock up
if self.threads == 1 or platform.system()[:6].upper() == 'CYGWIN':
for t in testList:
testnum -= 1
o = self.runTest((t, testnum))
self.printOutput(None, o)
else: # run using threads
#Assign test numbers
testsTuples = []
testsLen = len(testList)
for i,t in enumerate(testList):
testsTuples.append([t,testsLen-i])
# generate threadpool
requests = threadpool.makeRequests(self.runTest, testsTuples, self.printOutput)
main = threadpool.ThreadPool(self.threads)
# que requests
[main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# wait() will return when results for all work requests have arrived
try:
main.wait()
except KeyboardInterrupt, SystemExit:
print '\n\nKeyboardInterrupt detected ... killing worker threads'
main.dismissWorkers(self.threads)
self.killmyself()
except Exception, e:
main.dismissWorkers(self.threads)
print 'EXCEPTION: %s' % e
self.killmyself()
def parseTestConfig(self, dir):
settings={}
includes=[]
names=None
lines=[]
if isfile(join(dir,self.testconfig)):
if join(dir, '') == './':
for line in open(join(dir,self.testconfig)).read().splitlines():
lines.append(line)
else:
# if this is not the root testconfig, append the path before the testname
for line in open(join(dir,self.testconfig)).read().splitlines():
lines.append('%s/%s' %(dir,line))
for line in lines:
if line.startswith('#') or len(line)==0:
continue
fields = line.split(',')
for f in range(len(fields)):
fields[f]=fields[f].strip()
while len(fields)<4:
fields.append('');
names=fields[0].split(':')
if len(names)==1:
names.append('.*')
# remove any trailing extension if specified
# TODO: add abs to here
if names[0][-3:] == self.sourceExt:
names[0]=names[0][:-3]
rs='^%s$' % names[0]
# only add settings for current config
if re.search('^%s$' % fields[1],self.config):
if re.search(fields[1],self.config) and fields[2]=='include':
includes.append(fields[0])
if not settings.has_key(names[0]):
settings[names[0]] = {}
if not settings[names[0]].has_key(names[1]):
settings[names[0]][names[1]] = {}
settings[names[0]][names[1]][fields[2]]=fields[3]
return settings, includes
def compareAbcAsmOutput(self, file, output):
# return diff
try:
f = open(file[:-4]+'.out', 'r')
if self.config.find('debugger') != -1:
if isfile(file[:-4]+'.out.debug'):
f.close()
f = open(file[:-4]+'.out.debug', 'r')
if self.config.find('interp') != -1:
if isfile(file[:-4]+'.out.interp'):
f.close()
f = open(file[:-4]+'.out.interp', 'r')
flines = []
for line in f.readlines():
line = ''.join(line.split('\r'))
if line != '\n':
flines.append(line)
f.close()
except IOError:
flines = ['IOError Opening .out file']
if len(output) != len(flines):
return flines
# compare lines
for i in range(0,len(output)):
if output[i].strip() != flines[i].strip():
return flines
return
def runTest(self, testAndNum):
ast = testAndNum[0]
testnum = testAndNum[1]
outputCalls = [] #queue all output calls so that output is written in a block
lpass = 0
lfail = 0
lexpfail = 0
lunpass = 0
ltimeout = 0
lassert = 0
if ast.startswith('./'):
ast=ast[2:]
dir = ast[0:ast.rfind('/')]
root,ext = splitext(ast)
if self.runSource or self.eval:
testName = ast
else:
testName = root + '.abc'
includes = self.includes #list
settings = {}
# get settings for this test
for k in self.settings.keys():
if re.search('^'+k+'$', root):
for k2 in self.settings[k].keys():
if k2 in settings:
settings[k2].update(self.settings[k][k2])
else:
settings[k2] = self.settings[k][k2].copy()
if isfile(join(dir,self.testconfig)):
localIncludes, localSettings = self.parseTestConfig(dir)
# have a local testconfig, so we create a copy of the global settings to not overwrite
includes = list(self.includes) #copy list - don't use reference
includes.extend(localIncludes)
if localSettings.has_key(root):
settings.update(localSettings[root])
#TODO: possibly handle includes by building test list? This works for now...
if includes and not list_match(includes,root):
return
outputCalls.append((self.js_print,('%d running %s' % (testnum, ast), '<b>', '</b><br/>')));
# skip entire test if specified
# TODO: add skip reason to output
if settings.has_key('.*') and settings['.*'].has_key('skip'):
outputCalls.append((self.js_print,(' skipping',)))
self.allskips += 1
return outputCalls
# delete abc if forcerebuild
if self.forcerebuild and isfile(testName):
os.unlink(testName)
if isfile(testName) and getmtime(ast)>getmtime(testName):
self.verbose_print("%s has been modified, recompiling" % ast)
os.unlink(testName)
if not isfile(testName):
compileOutput = self.compile_test(ast)
if not isfile(testName):
if ast.endswith(self.abcasmExt):
# file didn't compile, compare compile output
flines = self.compareAbcAsmOutput(ast, compileOutput)
if flines:
lfail += 1
outputCalls.append((self.fail,(testName, 'FAILED! :\nExpected:\n'+''.join(flines)+'\nGOT:\n'+''.join(compileOutput), self.failmsgs)))
outputCalls.append((self.js_print, (' FAILED passes:%d fails:%d unexpected passes: %d expected failures: %d' % (lpass,lfail,lunpass,lexpfail), '', '<br/>')))
else:
lpass += 1
outputCalls.append((self.verbose_print, (' PASSED passes:%d fails:%d unexpected passes: %d expected failures: %d' % (lpass,lfail,lunpass,lexpfail), '', '<br/>')))
self.allfails += lfail
self.allpasses += lpass
return outputCalls
else:
lfail += 1
outputCalls.append((self.fail,(testName, 'FAILED! file not found ' + testName, self.failmsgs)))
if self.runSource or self.eval:
incfiles=self.build_incfiles(testName)
incfiles.append("shell" + self.sourceExt)
for incfile in incfiles:
testName=incfile+" "+testName
if isfile("%s.avm_args" % ast):
testName = " %s %s" % (string.replace(open("%s.avm_args" % ast).readline(), "$DIR", dir), testName)
if ast.endswith(self.abcasmExt):
# make sure util file has been compiled
if not exists(self.abcasmShell+'.abc'): # compile abcasmShell with no additional args
self.run_pipe('java -jar %s %s' % (self.asc, self.abcasmShell+'.as'))
(f,err,exitcode) = self.run_pipe('%s %s %s %s' % (self.avm, self.vmargs, self.abcasmShell+'.abc', testName))
else:
(f,err,exitcode) = self.run_pipe('%s %s %s' % (self.avm, self.vmargs, testName))
if f == "timedOut":
outputCalls.append((self.fail(testName, 'FAILED! Test Timed Out! Time out is set to %s s' % self.testTimeOut, self.timeoutmsgs)))
ltimeout += 1
else:
try:
outputLines = []
for line in f:
outputLines.append(line)
outputCalls.append((self.verbose_print,(line.strip(),)))
if 'Assertion failed:' in line:
lassert += 1
outputCalls.append((self.fail,(testName, line, self.assertmsgs)))
testcase=''
if len(line)>9:
testcase=line.strip()
if dict_match(settings,testcase,'skip'):
outputCalls.append((self.js_print,(' skipping %s' % line.strip(),)))
self.allskips+=1
continue
if 'PASSED!' in line:
res=dict_match(settings,testcase,'expectedfail')
if res:
outputCalls.append((self.fail,(testName, 'unexpected pass: ' + line.strip() + ' reason: '+res, self.unpassmsgs)))
lunpass += 1
else:
lpass += 1
if 'FAILED!' in line:
res=dict_match(settings,testcase,'expectedfail')
if res:
outputCalls.append((self.fail,(testName, 'expected failure: ' + line.strip() + ' reason: '+res, self.expfailmsgs)))
lexpfail += 1
else:
lfail += 1
outputCalls.append((self.fail,(testName, line, self.failmsgs)))
except:
print 'exception running avm'
exit(1)
exitcodeExp=0
if isfile(root+".exitcode"):
try:
exitcodeExp=int(open(root+".exitcode").read())
except:
print("ERROR: reading exit code file '%s' should contain an integer")
res=dict_match(settings,'exitcode','expectedfail')
if exitcode!=exitcodeExp:
res2=dict_match(settings,'exitcode','skip')
if res2==None and res:
outputCalls.append((self.js_print,(testName, 'expected failure: exitcode reason: %s'%res,self.expfailmsgs)))
lexpfail += 1
elif res2==None:
outputCalls.append((self.fail,(testName, 'unexpected exit code expected:%d actual:%d FAILED!' % (exitcodeExp,exitcode), self.failmsgs)))
outputCalls.append((self.fail,(testName, 'captured output: %s' % string.join([l.strip() for l in outputLines], ' | '), self.failmsgs)))
lfail+= 1
elif err!=[]:
outputCalls.append((self.fail,(testName, "unexpected stderr expected:'%s' actual:'%s'" % ('',err), self.failmsgs)))
elif lpass == 0 and lfail == 0 and lunpass==0 and lexpfail==0:
res=dict_match(settings,'*','expectedfail')
if res:
outputCalls.append((self.fail,(testName, 'expected failure: FAILED contained no testcase messages reason: %s' % res,self.expfailmsgs)))
lexpfail += 1
else:
lfail = 1
outputCalls.append((self.fail,(testName, ' FAILED contained no testcase messages - reason: %s' % string.join([l.strip() for l in outputLines], ' | '), self.failmsgs)))
self.allfails += lfail
self.allpasses += lpass
self.allexpfails += lexpfail
self.allunpass += lunpass
self.alltimeouts += ltimeout
self.allasserts += lassert
if lfail or lunpass:
outputCalls.append((self.js_print, (' FAILED passes:%d fails:%d unexpected passes: %d expected failures: %d' % (lpass,lfail,lunpass,lexpfail), '', '<br/>')))
else:
outputCalls.append((self.verbose_print, (' PASSED passes:%d fails:%d unexpected passes: %d expected failures: %d' % (lpass,lfail,lunpass,lexpfail), '', '<br/>')))
return outputCalls
#
# cleanup
#
def cleanup(self):
# Turn off quiet to display summary
if self.quiet:
self.quiet = False
if self.timeoutmsgs:
self.js_print('\nTIMEOUTS:', '', '<br/>')
for m in self.timeoutmsgs:
self.js_print(' %s' % m, '', '<br/>')
if self.failmsgs:
self.js_print('\nFAILURES:', '', '<br/>')
for m in self.failmsgs:
self.js_print(' %s' % m, '', '<br/>')
if self.expfailmsgs:
self.js_print('\nEXPECTED FAILURES:', '', '<br/>')
for m in self.expfailmsgs:
self.js_print(' %s' % m, '', '<br/>')
if self.unpassmsgs:
self.js_print('\nUNEXPECTED PASSES:', '', '<br/>')
for m in self.unpassmsgs:
self.js_print(' %s' % m, '', '<br/>')
if self.assertmsgs:
self.js_print('\nASSERTIONS:', '', '<br/>')
for m in self.assertmsgs:
self.js_print(' %s' % m, '', '<br/>')
if self.rebuildtests:
if self.ashErrors:
self.js_print('\ntest run FAILED!')
self.js_print('')
self.js_print('Compile Errors:')
for msg in self.ashErrors:
self.js_print('\t'+msg)
self.js_print('')
else:
self.js_print('\ntest run PASSED!')
else:
if not self.allfails and not self.allunpass:
self.js_print('\ntest run PASSED!')
else:
self.js_print('\ntest run FAILED!')
if self.timestamps:
end_time = datetime.today()
self.js_print('Tests complete at %s' % end_time, '<hr><tt>', '</tt>')
self.js_print('Start Date: %s' % self.start_time, '<tt><br>', '')
self.js_print('End Date : %s' % end_time, '<br>', '')
self.js_print('Test Time : %s' % (end_time-self.start_time), '<br>', '')
if not self.rebuildtests:
self.js_print('passes : %d' % self.allpasses, '<br>', '')
self.js_print('failures : %d' % self.allfails, '<br>', '')
if self.allunpass>0:
self.js_print('unexpected passes : %d' % self.allunpass, '<br>', '')
if self.allexpfails>0:
self.js_print('expected failures : %d' % self.allexpfails, '<br>', '')
if self.allskips>0:
self.js_print('tests skipped : %d' % self.allskips, '<br>', '')
if self.allexceptions>0:
self.js_print('test exceptions : %d' % self.allexceptions, '<br>', '')
if self.alltimeouts>0:
self.js_print('test timeouts : %d' % self.alltimeouts, '<br>', '')
if self.allasserts>0:
self.js_print('assertions : %d' % self.allasserts, '<br>', '')
if self.js_output:
print 'Results were written to %s' % self.js_output
if self.ashErrors:
exit(1)
#if __name__ == '__main__':
# test = RuntestBase()
|
|
# -*- coding: UTF-8 -*-
"""Easy to use object-oriented thread pool framework.
A thread pool is an object that maintains a pool of worker threads to perform
time consuming operations in parallel. It assigns jobs to the threads
by putting them in a work request queue, where they are picked up by the
next available thread. This then performs the requested operation in the
background and puts the results in another queue.
The thread pool object can then collect the results from all threads from
this queue as soon as they become available or after all threads have
finished their work. It's also possible, to define callbacks to handle
each result as it comes in.
The basic concept and some code was taken from the book "Python in a Nutshell,
2nd edition" by Alex Martelli, O'Reilly 2006, ISBN 0-596-10046-9, from section
14.5 "Threaded Program Architecture". I wrapped the main program logic in the
ThreadPool class, added the WorkRequest class and the callback system and
tweaked the code here and there. Kudos also to Florent Aide for the exception
handling mechanism.
Basic usage::
>>> pool = ThreadPool(poolsize)
>>> requests = makeRequests(some_callable, list_of_args, callback)
>>> [pool.putRequest(req) for req in requests]
>>> pool.wait()
See the end of the module code for a brief, annotated usage example.
Website : http://chrisarndt.de/projects/threadpool/
"""
__docformat__ = "restructuredtext en"
__all__ = [
'makeRequests',
'NoResultsPending',
'NoWorkersAvailable',
'ThreadPool',
'WorkRequest',
'WorkerThread'
]
__author__ = "Christopher Arndt"
__version__ = "1.2.5"
__revision__ = "$Revision: 354 $"
__date__ = "$Date: 2008-11-19 18:34:46 +0100 (Wed, 19 Nov 2008) $"
__license__ = 'MIT license'
# standard library modules
import sys
import threading
import Queue
import traceback
# exceptions
class NoResultsPending(Exception):
"""All work requests have been processed."""
pass
class NoWorkersAvailable(Exception):
"""No worker threads available to process remaining requests."""
pass
# internal module helper functions
def _handle_thread_exception(request, exc_info):
"""Default exception handler callback function.
This just prints the exception info via ``traceback.print_exception``.
"""
traceback.print_exception(*exc_info)
# utility functions
def makeRequests(callable_, args_list, callback=None,
exc_callback=_handle_thread_exception):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable_, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable_, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests
# classes
class WorkerThread(threading.Thread):
"""Background thread connected to the requests/results queues.
A worker thread sits in the background and picks up work requests from
one queue and puts the results in another until it is dismissed.
"""
def __init__(self, requests_queue, results_queue, poll_timeout=5, **kwds):
"""Set up thread in daemonic mode and start it immediatedly.
``requests_queue`` and ``results_queue`` are instances of
``Queue.Queue`` passed by the ``ThreadPool`` class when it creates a new
worker thread.
"""
threading.Thread.__init__(self, **kwds)
self.setDaemon(1)
self._requests_queue = requests_queue
self._results_queue = results_queue
self._poll_timeout = poll_timeout
self._dismissed = threading.Event()
self.start()
def run(self):
"""Repeatedly process the job queue until told to exit."""
while True:
if self._dismissed.isSet():
# we are dismissed, break out of loop
break
# get next work request. If we don't get a new request from the
# queue after self._poll_timout seconds, we jump to the start of
# the while loop again, to give the thread a chance to exit.
try:
request = self._requests_queue.get(True, self._poll_timeout)
except Queue.Empty:
continue
else:
if self._dismissed.isSet():
# we are dismissed, put back request in queue and exit loop
self._requests_queue.put(request)
break
try:
result = request.callable(*request.args, **request.kwds)
self._results_queue.put((request, result))
except:
request.exception = True
self._results_queue.put((request, sys.exc_info()))
def dismiss(self):
"""Sets a flag to tell the thread to exit when done with current job."""
self._dismissed.set()
class WorkRequest:
"""A request to execute a callable for putting in the request queue later.
See the module function ``makeRequests`` for the common case
where you want to build several ``WorkRequest`` objects for the same
callable but with different arguments for each call.
"""
def __init__(self, callable_, args=None, kwds=None, requestID=None,
callback=None, exc_callback=_handle_thread_exception):
"""Create a work request for a callable and attach callbacks.
A work request consists of the a callable to be executed by a
worker thread, a list of positional arguments, a dictionary
of keyword arguments.
A ``callback`` function can be specified, that is called when the
results of the request are picked up from the result queue. It must
accept two anonymous arguments, the ``WorkRequest`` object and the
results of the callable, in that order. If you want to pass additional
information to the callback, just stick it on the request object.
You can also give custom callback for when an exception occurs with
the ``exc_callback`` keyword parameter. It should also accept two
anonymous arguments, the ``WorkRequest`` and a tuple with the exception
details as returned by ``sys.exc_info()``. The default implementation
of this callback just prints the exception info via
``traceback.print_exception``. If you want no exception handler
callback, just pass in ``None``.
``requestID``, if given, must be hashable since it is used by
``ThreadPool`` object to store the results of that work request in a
dictionary. It defaults to the return value of ``id(self)``.
"""
if requestID is None:
self.requestID = id(self)
else:
try:
self.requestID = hash(requestID)
except TypeError:
raise TypeError("requestID must be hashable.")
self.exception = False
self.callback = callback
self.exc_callback = exc_callback
self.callable = callable_
self.args = args or []
self.kwds = kwds or {}
def __str__(self):
return "<WorkRequest id=%s args=%r kwargs=%r exception=%s>" % \
(self.requestID, self.args, self.kwds, self.exception)
class ThreadPool:
"""A thread pool, distributing work requests and collecting results.
See the module docstring for more information.
"""
def __init__(self, num_workers, q_size=0, resq_size=0, poll_timeout=5):
"""Set up the thread pool and start num_workers worker threads.
``num_workers`` is the number of worker threads to start initially.
If ``q_size > 0`` the size of the work *request queue* is limited and
the thread pool blocks when the queue is full and it tries to put
more work requests in it (see ``putRequest`` method), unless you also
use a positive ``timeout`` value for ``putRequest``.
If ``resq_size > 0`` the size of the *results queue* is limited and the
worker threads will block when the queue is full and they try to put
new results in it.
.. warning:
If you set both ``q_size`` and ``resq_size`` to ``!= 0`` there is
the possibilty of a deadlock, when the results queue is not pulled
regularly and too many jobs are put in the work requests queue.
To prevent this, always set ``timeout > 0`` when calling
``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions.
"""
self._requests_queue = Queue.Queue(q_size)
self._results_queue = Queue.Queue(resq_size)
self.workers = []
self.dismissedWorkers = []
self.workRequests = {}
self.createWorkers(num_workers, poll_timeout)
def createWorkers(self, num_workers, poll_timeout=5):
"""Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
ofte threads should check whether they are dismissed, while waiting for
requests.
"""
for i in range(num_workers):
self.workers.append(WorkerThread(self._requests_queue,
self._results_queue, poll_timeout=poll_timeout))
def dismissWorkers(self, num_workers, do_join=False):
"""Tell num_workers worker threads to quit after their current task."""
dismiss_list = []
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss()
dismiss_list.append(worker)
if do_join:
for worker in dismiss_list:
worker.join()
else:
self.dismissedWorkers.extend(dismiss_list)
def joinAllDismissedWorkers(self):
"""Perform Thread.join() on all worker threads that have been dismissed.
"""
for worker in self.dismissedWorkers:
worker.join()
self.dismissedWorkers = []
def putRequest(self, request, block=True, timeout=0):
"""Put work request into work queue and save its id for later."""
assert isinstance(request, WorkRequest)
# don't reuse old work requests
assert not getattr(request, 'exception', None)
self._requests_queue.put(request, block, timeout)
self.workRequests[request.requestID] = request
def poll(self, block=False):
"""Process any new results in the queue."""
while True:
# still results pending?
if not self.workRequests:
raise NoResultsPending
# are there still workers to process remaining requests?
elif block and not self.workers:
raise NoWorkersAvailable
try:
# get back next results
request, result = self._results_queue.get(block=block)
# has an exception occured?
if request.exception and request.exc_callback:
request.exc_callback(request, result)
# hand results to callback, if any
if request.callback and not \
(request.exception and request.exc_callback):
request.callback(request, result)
del self.workRequests[request.requestID]
except Queue.Empty:
break
def wait(self):
"""Wait for results, blocking until all have arrived."""
while 1:
try:
self.poll(True)
except NoResultsPending:
break
################
# USAGE EXAMPLE
################
if __name__ == '__main__':
import random
import time
# the work the threads will have to do (rather trivial in our example)
def do_something(data):
time.sleep(random.randint(1,5))
result = round(random.random() * data, 5)
# just to show off, we throw an exception once in a while
if result > 5:
raise RuntimeError("Something extraordinary happened!")
return result
# this will be called each time a result is available
def print_result(request, result):
print "**** Result from request #%s: %r" % (request.requestID, result)
# this will be called when an exception occurs within a thread
# this example exception handler does little more than the default handler
def handle_exception(request, exc_info):
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
print request
print exc_info
raise SystemExit
print "**** Exception occured in request #%s: %s" % \
(request.requestID, exc_info)
# assemble the arguments for each job to a list...
data = [random.randint(1,10) for i in range(20)]
# ... and build a WorkRequest object for each item in data
requests = makeRequests(do_something, data, print_result, handle_exception)
# to use the default exception handler, uncomment next line and comment out
# the preceding one.
#requests = makeRequests(do_something, data, print_result)
# or the other form of args_lists accepted by makeRequests: ((,), {})
data = [((random.randint(1,10),), {}) for i in range(20)]
requests.extend(
makeRequests(do_something, data, print_result, handle_exception)
#makeRequests(do_something, data, print_result)
# to use the default exception handler, uncomment next line and comment
# out the preceding one.
)
# we create a pool of 3 worker threads
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
# then we put the work requests in the queue...
for req in requests:
main.putRequest(req)
print "Work request #%s added." % req.requestID
# or shorter:
# [main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# by using ThreadPool.wait(). This would block until results for
# all work requests have arrived:
# main.wait()
# instead we can poll for results while doing something else:
i = 0
while True:
try:
time.sleep(0.5)
main.poll()
print "Main thread working...",
print "(active worker threads: %i)" % (threading.activeCount()-1, )
if i == 10:
print "**** Adding 3 more worker threads..."
main.createWorkers(3)
if i == 20:
print "**** Dismissing 2 worker threads..."
main.dismissWorkers(2)
i += 1
except KeyboardInterrupt:
print "**** Interrupted!"
break
except NoResultsPending:
print "**** No pending results."
break
if main.dismissedWorkers:
print "Joining all dismissed worker threads..."
main.joinAllDismissedWorkers()
|
|
# Copyright (C) 2013 Coders at Work
from base import ComponentBase
from keyvalue import KeyValueStore
from environment import NullEnvironment, SystemEnvironment
from executer import Executer
from metadata import MetaDataFile
import util
from collections import OrderedDict
import datetime
import getpass
import inspect
import os
import pty
import re
import select
import subprocess
import sys
import textwrap
import threading
try:
from Queue import Empty
from Queue import Queue
except ImportError:
# python 3.x
from queue import Empty
from queue import Queue
class Habitat(ComponentBase):
class KeyValueDefault:
user = getpass.getuser()
home = os.path.expanduser("~")
habitat_root = '%(home)s/.habitats/%(habitat_name)s'
metadata_path = '%(habitat_root)s/metadata'
base_port = 8000
host = '127.0.0.1'
timeout = 30
class __ShouldThrow(object):
pass
def __init__(self, should_start=True, *args, **kwargs):
self.habitat_name = self.__class__.__name__
super(Habitat, self).__init__(name='%(habitat_name)s')
self.executer = Executer(self)
self._args = args
self._port_map = {}
# We absolutely need a metadata file.
metadata_path = self['metadata_path']
if not os.path.exists(os.path.dirname(metadata_path)):
os.makedirs(os.path.dirname(metadata_path))
self.metadata = MetaDataFile(metadata_path)
for name, component in self.get_all_components().iteritems():
component.habitat = self
component.name = name
if component.name not in self.metadata:
self.metadata[component.name] = {}
component.metadata = self.metadata[component.name]
# If we should start the Habitat, run the first argument as a command.
if should_start:
if self._args:
command = self._args[0]
else:
command = 'run'
self.command(command, *self._args[1:])
def command(self, name, *args):
if hasattr(self.Commands, name):
getattr(self.Commands, name)(self, *args)
elif ( name in self
and isinstance(self[name], ComponentBase)
and len(args) > 0
and hasattr(self[name].Commands, args[0])):
component = self[name]
name, args = args[0], args[1:]
getattr(component.Commands, name)(component, *args)
else:
self.command('help')
def _start(self):
for component in self.get_all_components().values():
component.start()
self.metadata.storage.save()
def wait_if_needed(self):
# If we are running a component, we wait for a CTRL-C from the user.
should_wait = False
for name, component in self.get_all_components().iteritems():
if component.is_running():
should_wait = True
break
if should_wait:
print 'Waiting for CTRL-C...'
try:
while True:
sys.stdin.readlines()
except KeyboardInterrupt:
pass
def _stop(self):
for name, component in reversed(self.get_all_components().items()):
if component.is_running():
component.stop()
self.metadata.storage.save()
def get_all_components(self):
return {
name: getattr(self, name)
for name in dir(self)
if (not name.startswith('_')
and hasattr(self, name)
and isinstance(getattr(self, name), ComponentBase))
}
def get_component(self, name):
if isinstance(name, basestring):
return self[name]
if isinstance(name, ComponentBase):
return name
raise Exception('Invalid component: %s' % name)
def get_component_from_stack(self):
i = 2
# Get the first stack level out of Habitat
stack = inspect.stack()[i]
while 'self' in stack[0].f_locals and isinstance(stack[0].f_locals["self"], Habitat):
i += 1
stack = inspect.stack()[i]
if 'self' in stack[0].f_locals and isinstance(stack[0].f_locals["self"], ComponentBase):
return stack[0].f_locals["self"]
return None
def execute(self, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. Throw an exception if the command
failed.
"""
return self.executer.execute(**kwargs)
def execute_or_die(self, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. Throw an exception if the command
failed.
"""
return self.executer.execute_or_die(**kwargs)
def execute_in_thread(self, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. The tool is ran in a separate
thread.
"""
return self.executer.execute_in_thread(**kwargs)
def execute_interactive(self, **kwargs):
"""Run a command line tool using an environment and redirecting the
STDOUT/STDERR to the local logs. The tool is ran interactively.
"""
return self.executer.execute_interactive(**kwargs)
class Commands:
@staticmethod
def run(habitat, *args):
"""Run a list of components by their names."""
try:
habitat.start()
habitat.wait_if_needed()
except Exception, ex:
print ex
finally:
habitat.stop(force=True)
@staticmethod
def depgraph(habitat, *args):
"""Show the list of dependencies from the habitat."""
def print_curr_node(tree, depname, level, shown=[]):
deps = tree[depname]
print '%s%s' % (' ' * level, depname)
for dep in reversed(deps):
print_curr_node(tree, dep, level + 1)
tree[dep] = []
all_components = habitat.get_all_components()
component_tree = {
name: [dep.name for dep in component.deps]
for name, component in all_components.iteritems()
}
all_deps = util.order_dependencies(component_tree)
for name in reversed(all_deps):
if component_tree[name]:
print_curr_node(component_tree, name, 0)
@staticmethod
def show(habitat, *args):
"""Output all variables as they are evaluated in each component
or the habitat itself.
Usage: show <variable> <variable...>
"""
if not args:
habitat.command('help', 'show')
return
for key in args:
if key in habitat:
print '%25s["%s"] == "%s"' % (
habitat.habitat_name, key, habitat[key])
for name, component in habitat.get_all_components().iteritems():
if key in component:
print '%25s["%s"] == "%s"' % (name, key, component[key])
@staticmethod
def help(habitat, *args):
"""Output all accepted commands and their docstring if available.
"""
all_commands = [
method
for method in dir(habitat.Commands)
if not method.startswith('_') and not method in args
]
length = sorted([len(cmd) for cmd in all_commands])[-1] + 4
format_str = ' %%%ss %%s' % str(length)
print format_str % ('Command', 'Description')
print '-' * (length + 70)
for method in dir(habitat.Commands):
if method.startswith('_'):
continue
if args and not method in args:
continue
doc = getattr(habitat.Commands, method).__doc__ or ''
doc = textwrap.fill(doc, 60, subsequent_indent=' ' * (length + 4))
print format_str % (method, doc)
print '-' * (length + 70)
print
print ' Components with commands:'
components = [
'%-25s' % (k,)
for k, v in habitat.get_all_components().iteritems()
if ( hasattr(v, 'Commands')
and len([c for c in dir(v.Commands) if not c.startswith('_')]))
]
print ' %s' % (textwrap.fill(' '.join(components), 80, subsequent_indent=' '),)
|
|
import redis as rdb
import traceback
import datetime
import platform
import aiohttp
import asyncio
import json
#############Color for Terminal###############################
#Whole line, and easier to debug
def prRed(prt): print("\033[91m{}\033[00m".format(prt))
def prGreen(prt): print("\033[92m{}\033[00m".format(prt))
def prYellow(prt): print("\033[93m{}\033[00m".format(prt))
def prLightPurple(prt): print("\033[94m{}\033[00m".format(prt))
def prPurple(prt): print("\033[95m{}\033[00m".format(prt))
def prCyan(prt): print("\033[96m{}\033[00m".format(prt))
def prLightGray(prt): print("\033[97m{}\033[00m".format(prt))
def prBlack(prt): print("\033[98m{}\033[00m".format(prt))
###############################################################
#read files and save it to secret
with open ("../secret.json","r",encoding = "utf8") as f:
secret = json.load(f)
###########Connection Line####################
redis = rdb.Redis(host=secret["Redis"], decode_responses=True)
def is_owner(ctx): #Checking if you are owner of bot
return ctx.message.author.id == 105853969175212032
############Checking if cogs for that guild is enable or disable##########
def is_enable(ctx,cog):
try:
return redis.hget("{}:Config:Cogs".format(ctx.message.guild.id),cog) == "on"
except:
return False
######################Checking if Role is able######################################
def check_roles(ctx,cog,get_role): #Server ID then which plugin, and Roles with set
try:
db_role= redis.smembers("{}:{}:{}".format(ctx.message.guild.id,cog,get_role))
print("Roles: ",db_role)
author_roles= [role.id for role in ctx.message.author.roles]
print(author_roles)
for role in db_role:
print(role)
if int(role) in author_roles:
return True
return False
except Exception as e:
prRed("ERROR\n{}".format(e))
return False
async def send_hastebin(info):
async with aiohttp.ClientSession() as session:
async with session.post("https://hastebin.com/documents",data = str(info)) as resp:
if resp.status == 200:
return "https://hastebin.com/{}.py".format((await resp.json())["key"])
async def input(bot,ctx,msg,check):
"""
Args:
ctx: discord Context
msg: Message to send to users
check: conditions accept from user.
Returns: return message object from user's
"""
asking = await ctx.send(msg) #sending message to ask user something
try:
answer = await bot.wait_for("message", timeout=15, check=check)
await answer.delete()
except asyncio.TimeoutError: # timeout error
await asking.delete()
await ctx.send(content="You took too long, please try again!",delete_after = 15)
return None
except:
pass
await asking.delete() # bot delete it own msg.
return answer
class Background:
"""
Background, allow to run auto background task easily without worry.
"""
current = datetime.datetime.utcnow() #datetime obj
def __init__(self,name,max_time,sleep_time,function,log):
self.name = name
self.max_time = max_time
self.sleep_time = sleep_time
self.function = function #function to call
self.log = log
self.current = datetime.datetime.utcnow()
def start(self): #to start function run
loop = asyncio.get_event_loop()
self.loop_timer = loop.create_task(self.timer())
prLightPurple("Starting {} loop".format(self.name))
def stop(self):
self.loop_timer.cancel()
prLightPurple("Stopping {} loop".format(self.name))
async def timer(self):
try:
while True:
self.current = datetime.datetime.utcnow()
self.log.debug(self.current)
self.log.debug("Calling event")
await self.function()
self.log.debug("Enter sleep mode")
await asyncio.sleep(self.sleep_time)
except asyncio.CancelledError:
return prRed("Asyncio Cancelled Error")
except Exception as e:
print(e)
prRed(traceback.format_exc())
class Embed_page:
"""
Embed page, with reaction to go next or now (or other reaction for certain feature)
"""
def __init__(self,bot,embed_list,**kwargs):
self.bot = bot
self.embed_page = embed_list #expecting list of embed
self.max_page = kwargs.get("max_page",len(embed_list))
self.page = kwargs.get("page",0) #current page, default is 0
self.alt_edit = kwargs.get("alt_edit") #if wish to edit message AFTER return function that is not belong here
self.original_msg = kwargs.get("original_msg") #^
#this is array with emotion:function, so we can have each reaction for certain function. Reason for array is order
self.reaction = kwargs.get("reaction", [[u"\u2B05",self.back_page],[u"\u27A1",self.continue_page]])
def back_page(self,*args):
self.page -= 1
if self.page -1 <= 0:
self.page = 0 #dont change it
return self.page
def continue_page(self,*args):
self.page += 1
if self.page > self.max_page - 1:
self.page = self.max_page - 1
return self.page
def get_page(self):
return self.embed_page[self.page]
async def wait_for_react(self,check,timeout):
try:
reaction, user = await self.bot.wait_for("reaction_add", timeout = timeout, check = check)
except asyncio.TimeoutError:
return None,None
else:
return reaction,user
async def start(self,channel,check,timeout = 60,is_async = False,extra = []):
"""
Args:
channel: discord.channel. A destination to send message.
check : checking permission for this.
timeout: timeout for message to run if no one rect. Default 60 second
It will send message or edit message if original_msg exist. To get that, you will need to pass self.alt_edit True and rerun this template.
Run iterate of self.reaction for adding rect into it
Then finally run endless loops waiting for message etc
"""
if self.original_msg:
await self.original_msg.edit(embed = self.get_page())
self.message = self.original_msg
else:
self.message = await channel.send(embed = self.get_page())
for rect in self.reaction:
await self.message.add_reaction(rect[0])
while True:
react,user = await self.wait_for_react(check,timeout)
#If react is none, it mean that it had reach timeout and user didn't react.
if react is None:
return await self.message.clear_reactions()
#remove user's message
try:
await self.message.remove_reaction(react.emoji,user)
except: #if bot does not have permission for it. Oh well. Hard time for user.
pass
#now we will find reaction it used and then run function of that.
#once we do that, we will delete that reaction.
for item in self.reaction:
if item[0] == react.emoji: #if it equal then we can call function
if is_async:
if self.alt_edit:
await item[1](react,user,self.message,*extra)
else:
await item[1](react,user,*extra)
else:
if self.alt_edit:
item[1](react,user,self.message,*extra)
else:
item[1](react,user,*extra)
break
#now we will update message again
await self.message.edit(embed = self.get_page())
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imports unittest as a replacement for testing.pybase.googletest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import itertools
import os
import sys
import tempfile
# pylint: disable=wildcard-import
from unittest import *
# pylint: enable=wildcard-import
unittest_main = main
# pylint: disable=invalid-name
# pylint: disable=undefined-variable
def main(*args, **kwargs):
"""Delegate to unittest.main after redefining testLoader."""
if 'TEST_SHARD_STATUS_FILE' in os.environ:
try:
f = None
try:
f = open(os.environ['TEST_SHARD_STATUS_FILE'], 'w')
f.write('')
except IOError:
sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.'
% os.environ['TEST_SHARD_STATUS_FILE'])
sys.exit(1)
finally:
if f is not None: f.close()
if ('TEST_TOTAL_SHARDS' not in os.environ or
'TEST_SHARD_INDEX' not in os.environ):
return unittest_main(*args, **kwargs)
total_shards = int(os.environ['TEST_TOTAL_SHARDS'])
shard_index = int(os.environ['TEST_SHARD_INDEX'])
base_loader = TestLoader()
delegate_get_names = base_loader.getTestCaseNames
bucket_iterator = itertools.cycle(range(total_shards))
def getShardedTestCaseNames(testCaseClass):
filtered_names = []
for testcase in sorted(delegate_get_names(testCaseClass)):
bucket = next(bucket_iterator)
if bucket == shard_index:
filtered_names.append(testcase)
return filtered_names
# Override getTestCaseNames
base_loader.getTestCaseNames = getShardedTestCaseNames
kwargs['testLoader'] = base_loader
unittest_main(*args, **kwargs)
def GetTempDir():
first_frame = inspect.stack()[-1][0]
temp_dir = os.path.join(
tempfile.gettempdir(), os.path.basename(inspect.getfile(first_frame)))
temp_dir = temp_dir.rstrip('.py')
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir, 0o755)
return temp_dir
def StatefulSessionAvailable():
return False
class StubOutForTesting(object):
"""Support class for stubbing methods out for unit testing.
Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.CleanUp()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the CleanUp() looks up the old
value of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
"""Do not rely on the destructor to undo your stubs.
You cannot guarantee exactly when the destructor will get called without
relying on implementation details of a Python VM that may change.
"""
self.CleanUp()
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_exc_type, unused_exc_value, unused_tb):
self.CleanUp()
def CleanUp(self):
"""Undoes all SmartSet() & Set() calls, restoring original definitions."""
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr.
This method is smart and works at the module, class, and instance level
while preserving proper inheritance. It will not stub out C types however
unless that has been explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Args:
obj: The object whose attributes we want to modify.
attr_name: The name of the attribute to modify.
new_attr: The new value for the attribute.
Raises:
AttributeError: If the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and attr_name in obj.__dict__)):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
found_attr = False
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
found_attr = True
except AttributeError:
continue
if not found_attr:
raise AttributeError('Attribute not found.')
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses SmartSet() calls, restoring things to original definitions.
This method is automatically called when the StubOutForTesting()
object is deleted; there is no need to call it explicitly.
It is okay to call SmartUnsetAll() repeatedly, as later calls have
no effect if no SmartSet() calls have been made.
"""
for args in reversed(self.stubs):
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""In parent, replace child_name's old definition with new_child.
The parent could be a module when the child is a function at
module scope. Or the parent could be a class when a class' method
is being replaced. The named child is set to new_child, while the
prior definition is saved away for later, when UnsetAll() is
called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
Args:
parent: The context in which the attribute child_name is to be changed.
child_name: The name of the attribute to change.
new_child: The new value of the attribute.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses Set() calls, restoring things to their original definitions.
This method is automatically called when the StubOutForTesting()
object is deleted; there is no need to call it explicitly.
It is okay to call UnsetAll() repeatedly, as later calls have no
effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
for (parent, old_child, child_name) in reversed(self.cache):
setattr(parent, child_name, old_child)
self.cache = []
|
|
"""
This file contains some utility functions to calculate hessian matrix and its inverse.
Author: Chen Shangyu ([email protected])
"""
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from datetime import datetime
import tensorflow as tf
import os
import numpy as np
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# Construct hessian computing graph for res layer (conv layer without bias)
def create_res_hessian_computing_tf_graph(input_shape, layer_kernel, layer_stride):
"""
This function create the TensorFlow graph for computing hessian matrix for res layer.
Step 1: It first extract image patches using tf.extract_image_patches.
Step 2: Then calculate the hessian matrix by outer product.
Args:
input_shape: the dimension of input
layer_kernel: kernel size of the layer
layer_stride: stride of the layer
Output:
input_holder: TensorFlow placeholder for layer input
get_hessian_op: A TensorFlow operator to calculate hessian matrix
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
patches = tf.extract_image_patches(images = input_holder,
ksizes = [1,layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
print 'Patches shape: %s' %patches.get_shape()
a = tf.expand_dims(patches, axis=-1)
b = tf.expand_dims(patches, axis=3)
outprod = tf.multiply(a, b)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=[0, 1, 2])
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
# Construct hessian computing graph for fc layer
def create_fc_hessian_computing_tf_graph(input_shape):
"""
This function create the TensorFlow graph for computing hessian matrix for fully-connected layer.
Compared with create_res_hessian_computing_tf_graph, it does not need to extract patches.
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
a = tf.expand_dims(input_holder, axis=-1)
# Appending extra one for bias term
vect_w_b = tf.concat([a, tf.ones([tf.shape(a)[0], 1, 1])], axis=1)
outprod = tf.matmul(vect_w_b, vect_w_b, transpose_b=True)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=0)
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
# Construct hessian computing graph
def create_conv_hessian_computing_tf_graph(input_shape, layer_kernel, layer_stride):
"""
This function create the TensorFlow graph for computing hessian matrix for fully-connected layer.
Compared with create_res_hessian_computing_tf_graph, it append extract one for bias term.
"""
input_holder = tf.placeholder(dtype=tf.float32, shape=input_shape)
patches = tf.extract_image_patches(images = input_holder,
ksizes = [1,layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
print 'Patches shape: %s' %patches.get_shape()
vect_w_b = tf.concat([patches, tf.ones([tf.shape(patches)[0], \
tf.shape(patches)[1], tf.shape(patches)[2], 1])], axis=3)
a = tf.expand_dims(vect_w_b, axis=-1)
b = tf.expand_dims(vect_w_b, axis=3)
outprod = tf.multiply(a, b)
# print 'outprod shape: %s' %outprod.get_shape()
get_hessian_op = tf.reduce_mean(outprod, axis=[0, 1, 2])
print 'Hessian shape: %s' % get_hessian_op.get_shape()
return input_holder, get_hessian_op
# Construct hessian inverse computing graph for Woodbury
def create_Woodbury_hessian_inv_graph(input_shape, dataset_size):
"""
This function create the hessian inverse calculation graph using Woodbury method.
"""
hessian_inv_holder = tf.placeholder(dtype=tf.float32, shape=[input_shape, input_shape])
input_holder = tf.placeholder(dtype=tf.float32, shape=[1, input_shape])
# [1, 4097] [4097, 4097] [4097, 1]
denominator = dataset_size + \
tf.matmul(a = tf.matmul(a = input_holder, b = hessian_inv_holder), b = input_holder, transpose_b=True)
# ([4097, 4097] [4097, 1]) ([1, 4097] [4097, 4097])
numerator = tf.matmul(a = tf.matmul(a = hessian_inv_holder, b = input_holder, transpose_b=True), \
b = tf.matmul(a = input_holder, b = hessian_inv_holder))
hessian_inv_op = hessian_inv_holder - numerator * (1.00 / denominator)
return hessian_inv_holder, input_holder, hessian_inv_op
def generate_hessian(net, trainloader, layer_name, layer_type, \
n_batch_used = 100, batch_size = 2, stride_factor = 3 ,use_cuda = True):
"""
This function generate hessian matrix for a given layer. Basically, what it does is:
Step 1: Extract layer input using PyTorch interface
Step 2: For convolution, res layer, extract patches using TensorFlow function
Step 3: Calculate hessian
Args:
net: PyTorch model
trainloader: PyTorch dataloader
layer_name:
layer_type: 'C' for Convolution (with bias), 'R' for res layer (without bias),
'F' for Fully-Connected (with bias). I am sure you will know why the bias term
is emphasized here as you are clever.
n_batch_used: number of batches used to generate hessian.
batch_size: Batch size. Because hessian calculation graph is quite huge. A small (like 2) number
of batch size if recommended here.
stride_factor: Due to the same reason mentioned above, bigger stride results in fewer extracted
image patches (think about how convolution works). stride_factor is multiplied by
actual stride in latter use. Therefore when stride_factor == 1, it extract patches in
original way. However, it may results in some GPU/CPU memory troubles. If you meet such,
you can increase the stride factor here.
use_cuda: whether you can use cuda or not.
Output:
Hessian matrix
"""
freq_moniter = (n_batch_used * batch_size) / 50 # Total 50 times of printing information
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
net.eval()
for batch_idx, (inputs, _) in enumerate(trainloader):
if use_cuda:
inputs = inputs.cuda()
net(Variable(inputs, volatile=True))
layer_input = net.module.layer_input[layer_name]
# In the begining, construct hessian graph
if batch_idx == 0:
print '[%s] Now construct generate hessian op for layer %s' %(datetime.now(), layer_name)
# res layer
if layer_type == 'R':
# Because PyTorch's data format (N,C,W,H) is different from tensorflow (N,W,H,C)
# layer input should be permuted to fit tensorflow
layer_input_np = layer_input.permute(0, 2, 3, 1).cpu().numpy()
layer_input_holder, generate_hessian_op = \
create_res_hessian_computing_tf_graph(layer_input_np.shape,
net.module.layer_kernel[layer_name],
net.module.layer_stride[layer_name] * stride_factor)
# check whether dimension is right
hessian_shape = int(generate_hessian_op.get_shape()[0])
print 'Hessian shape: %d' %hessian_shape
weight_shape = net.state_dict()['module.%s.weight' %layer_name].size()
# print ('Kernel shape: %s' %weight_shape)
# print weight_shape
kernel_unfold_shape = int(weight_shape[1]) * int(weight_shape[2]) * int(weight_shape[3])
print 'Kernel unfold shape: %d' %kernel_unfold_shape
assert(hessian_shape == kernel_unfold_shape)
# linear layer
elif layer_type == 'F':
layer_input_np = layer_input.cpu().numpy()
layer_input_holder, generate_hessian_op = \
create_fc_hessian_computing_tf_graph(layer_input_np.shape)
# check whether dimension is right
hessian_shape = int(generate_hessian_op.get_shape()[0])
print 'Hessian shape: %d' % hessian_shape
weight_shape = net.state_dict()['module.%s.weight' % layer_name].size()
print 'Weights shape: %d' % weight_shape[1]
assert(hessian_shape == weight_shape[1] + 1) # +1 because of bias
elif layer_type == 'C':
layer_input_np = layer_input.permute(0, 2, 3, 1).cpu().numpy()
layer_input_holder, generate_hessian_op = \
create_conv_hessian_computing_tf_graph(layer_input_np.shape,
net.module.layer_kernel[layer_name],
net.module.layer_stride[layer_name] * stride_factor)
# check whether dimension is right
hessian_shape = int(generate_hessian_op.get_shape()[0])
print 'Hessian shape: %d' %hessian_shape
weight_shape = net.state_dict()['module.%s.weight' %layer_name].size()
# print ('Kernel shape: %s' %weight_shape)
# print weight_shape
kernel_unfold_shape = int(weight_shape[1]) * int(weight_shape[2]) * int(weight_shape[3])
print 'Kernel unfold shape: %d' %kernel_unfold_shape
assert(hessian_shape == kernel_unfold_shape + 1)
print '[%s] %s Graph build complete.' % (datetime.now(), layer_name)
# Initialization finish, begin to calculate
if layer_type == 'C' or layer_type == 'R':
this_layer_input = layer_input.permute(0, 2, 3, 1).cpu().numpy()
elif layer_type == 'F':
this_layer_input = layer_input.cpu().numpy()
this_hessian = sess.run(generate_hessian_op,
feed_dict={layer_input_holder: this_layer_input})
if batch_idx == 0:
layer_hessian = this_hessian
else:
layer_hessian += this_hessian
if batch_idx % freq_moniter == 0:
print '[%s] Now finish image No. %d / %d' \
%(datetime.now(), batch_idx * batch_size, n_batch_used * batch_size)
if batch_idx == n_batch_used:
break
# net.train()
return (1.0 / n_batch_used) * layer_hessian
def generate_hessian_inv_Woodbury(net, trainloader, layer_name, layer_type, \
n_batch_used = 100, batch_size = 2, stride_factor = 3 , use_tf_backend = True, use_cuda = True):
"""
This function calculated Hessian inverse matrix by Woodbury matrix identity.
Args:
Please find the same parameters explanations above.
use_tf_backend: A TensorFlow wrapper is used to accelerate the process. True for using such wrapper.
"""
hessian_inverse = None
dataset_size = 0
freq_moniter = (n_batch_used * batch_size) / 50 # Total 50 times of printing information
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
net.eval()
for batch_idx, (inputs, _) in enumerate(trainloader):
if use_cuda:
inputs = inputs.cuda()
net(Variable(inputs, volatile=True))
layer_input = net.module.layer_input[layer_name]
# Construct tf op for convolution and res layer
if batch_idx == 0:
if layer_type == 'C' or layer_type == 'R':
print '[%s] Now construct patches extraction op for layer %s' %(datetime.now(), layer_name)
layer_input_np = layer_input.permute(0, 2, 3, 1).cpu().numpy()
layer_kernel = net.module.layer_kernel[layer_name]
layer_stride = net.module.layer_stride[layer_name] * stride_factor
layer_input_holder = tf.placeholder(dtype=tf.float32, shape=layer_input_np.shape)
get_patches_op = \
tf.extract_image_patches(images = layer_input_holder,
ksizes = [1, layer_kernel, layer_kernel,1],
strides = [1, layer_stride, layer_stride, 1],
rates = [1, 1, 1, 1],
padding = 'SAME')
# For a convolution input, extracted pathes would be: [1, 9, 9, 2304]
dataset_size = n_batch_used * int(get_patches_op.get_shape()[0]) * \
int(get_patches_op.get_shape()[1]) * int(get_patches_op.get_shape()[2])
input_dimension = get_patches_op.get_shape()[3]
if layer_type == 'C':
# In convolution layer, input dimension should be added one for bias term
hessian_inverse = 1000000 * np.eye(input_dimension + 1)
if use_tf_backend:
print ('You choose tf backend to calculate Woodbury, constructing your graph.')
hessian_inv_holder, input_holder, Woodbury_hessian_inv_op = \
create_Woodbury_hessian_inv_graph(input_dimension + 1, dataset_size)
else:
hessian_inverse = 1000000 * np.eye(input_dimension)
if use_tf_backend:
print ('You choose tf backend to calculate Woodbury, constructing your graph.')
hessian_inv_holder, input_holder, Woodbury_hessian_inv_op = \
create_Woodbury_hessian_inv_graph(input_dimension, dataset_size)
else:
layer_input_np = layer_input.cpu().numpy()
input_dimension = layer_input_np.shape[1]
dataset_size = n_batch_used * batch_size
hessian_inverse = 1000000 * np.eye(input_dimension + 1)
if use_tf_backend:
print ('You choose tf backend to calculate Woodbury, constructing your graph.')
hessian_inv_holder, input_holder, Woodbury_hessian_inv_op = \
create_Woodbury_hessian_inv_graph(input_dimension + 1, dataset_size)
print '[%s] dataset: %d, input dimension: %d' %(datetime.now(), dataset_size, input_dimension)
# Begin process
if layer_type == 'F':
this_layer_input = layer_input.cpu().numpy() # [2, 4096]
for i in range(this_layer_input.shape[0]):
this_input = this_layer_input[i]
# print this_input.shape
# print np.array([1.0]).shape
wb = np.concatenate([this_input.reshape(1,-1), np.array([1.0]).reshape(1,-1)], axis = 1) # [1, 4097]
if use_tf_backend:
hessian_inverse = sess.run(Woodbury_hessian_inv_op, feed_dict={
hessian_inv_holder: hessian_inverse,
input_holder: wb
})
else:
# [1, 4097] [4097, 4097] [4097, 1]
denominator = dataset_size + np.dot(np.dot(wb,hessian_inverse), wb.T)
# [4097, 4097] [4097, 1] [1, 4097] [4097, 4097]
numerator = np.dot(np.dot(hessian_inverse, wb.T), np.dot(wb,hessian_inverse))
hessian_inverse = hessian_inverse - numerator * (1.0 / denominator)
elif layer_type == 'C' or layer_type == 'R':
this_layer_input = layer_input.permute(0, 2, 3, 1).cpu().numpy()
this_patch = sess.run(get_patches_op, feed_dict={layer_input_holder: this_layer_input})
for i in range(this_patch.shape[0]):
for j in range(this_patch.shape[1]):
for m in range(this_patch.shape[2]):
this_input = this_patch[i][j][m]
if layer_type == 'C':
wb = np.concatenate([this_input.reshape(1,-1), np.array([1.0]).reshape(1,-1)], axis = 1) # [1, 2305]
else:
wb = this_input.reshape(1, -1) # [1, 2304]
if use_tf_backend:
hessian_inverse = sess.run(Woodbury_hessian_inv_op, feed_dict={
hessian_inv_holder: hessian_inverse,
input_holder: wb
})
else:
denominator = dataset_size + np.dot(np.dot(wb,hessian_inverse), wb.T)
numerator = np.dot(np.dot(hessian_inverse, wb.T), np.dot(wb,hessian_inverse))
hessian_inverse = hessian_inverse - numerator * (1.0 / denominator)
if batch_idx % freq_moniter == 0:
print '[%s] Now finish image No. %d / %d' \
%(datetime.now(), batch_idx * batch_size, n_batch_used * batch_size)
if batch_idx == n_batch_used:
sess.close()
break
return hessian_inverse
|
|
# -*- coding: UTF-8 -*-
__author__ = 'mcxiaoke'
import sqlite3
from datetime import datetime
from contextlib import closing
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
import config
import utils
# create app
app = Flask(__name__)
app.config.from_object(config)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource(app.config['SCHEMA_FILE']) as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
g.db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('SELECT id,title,abstract,text,created_at,user_id FROM entries ORDER BY id DESC')
entries = [dict(id=row[0], title=row[1], abstract=row[2], text=row[3], created_at=row[4], user_id=row[5]) for row in
cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/p/<int:id>')
def show_entry(id):
cur = g.db.execute("SELECT id,title,abstract,text,created_at,user_id FROM entries WHERE id=?", (id,))
row = cur.fetchone()
entry = None
if row:
entry = dict(id=row[0], title=row[1], abstract=row[2], text=row[3], created_at=row[4], user_id=row[5])
print "show_entry:", entry
return render_template('show_entry.html', entry=entry)
@app.route('/add', methods=['GET', 'POST'])
def add_entry():
logged_in = session.get('logged_in')
logged_user = session.get('logged_user')
print "add_entry, logged_in:", logged_in
print "add_entry, logged_user:", logged_user
if request.method == 'POST':
if not logged_in or not logged_user:
abort(401)
user_id = session.get('logged_user')
title = request.form['title']
abstract = title
text = request.form['text']
created_at = datetime.strftime(datetime.now(), app.config['DATE_FORMAT'])
g.db.execute("INSERT INTO entries (title,abstract,text,created_at,user_id) "
"VALUES (?,?,?,?,?)",
(title, abstract, text, created_at, user_id,))
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
if not logged_in or not logged_user:
return redirect(url_for('login'))
return render_template('add_entry.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if not username:
error = 'empty username'
elif not password:
error = 'empty password'
else:
cur = g.db.execute("SELECT username,password FROM accounts WHERE username=?", (username,))
row = cur.fetchone()
if not row:
error = 'user not found'
else:
p_hash = row[1]
print 'login,username=', username, 'password=', password, 'hash=', p_hash
if utils.check_hash(username, app.config['SECRET_KEY'], password, p_hash):
session['logged_in'] = True
session['logged_user'] = request.form['username']
flash('You were logged in')
return redirect(url_for('show_entries'))
else:
error = 'username and password not match'
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
session.pop('logged_user', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
username = ""
password1 = ""
password2 = ""
nickname = ""
gender = ""
if request.method == 'POST':
username = request.form['username']
password1 = request.form['password1']
password2 = request.form['password2']
nickname = request.form['nickname']
gender = request.form['gender']
if not username:
error = 'username is required'
elif not nickname:
error = 'nickname is required'
elif not gender:
error = 'gender is required'
elif not password1 or not password2:
error = 'password is required'
elif password1 != password2:
error = 'password is not match'
else:
cur = g.db.execute("SELECT username FROM accounts WHERE username=?", (username,))
row = cur.fetchone()
print row
if row and row[1]:
error = 'username is already exists'
else:
created_at = datetime.strftime(datetime.now(), app.config['DATE_FORMAT'])
p_hash = utils.make_hash(username, app.config['SECRET_KEY'], password2)
print 'register,username=', username, 'password=', password2, 'hash=', p_hash
g.db.execute(
"INSERT INTO accounts (username,password,nickname,gender,created_at,created_ip) "
"VALUES (?,?,?,?,?,?)",
(username, p_hash, nickname, gender, created_at, request.remote_addr,))
g.db.commit()
result = g.db.execute('SELECT id,username,nickname FROM accounts WHERE username=?',
(username,)).fetchone()
print 'register,result=', result
if result:
session['logged_in'] = True
session['logged_user'] = request.form['username']
flash('You were logged in')
return redirect(url_for('show_entries'))
else:
error = 'register failed'
return render_template('register.html', error=error, username=username, nickname=nickname, gender=gender)
if __name__ == '__main__':
app.run()
|
|
import ConfigParser
import os
import itertools
import logging
import logging.config
import logging.handlers
import platform
import string
import subprocess
import sys
import glob
import inspect
import traceback
import re
import imp
import socket
from socket import gaierror
from optparse import OptionParser, Values
from cStringIO import StringIO
from urlparse import urlparse
# project
from util import get_os, Platform, yLoader
from migration import migrate_old_style_configuration
# 3rd party
import yaml
# CONSTANTS
AGENT_VERSION = "5.3.0"
DATADOG_CONF = "datadog.conf"
DEFAULT_CHECK_FREQUENCY = 15 # seconds
LOGGING_MAX_BYTES = 5 * 1024 * 1024
log = logging.getLogger(__name__)
OLD_STYLE_PARAMETERS = [
('apache_status_url', "apache"),
('cacti_mysql_server' , "cacti"),
('couchdb_server', "couchdb"),
('elasticsearch', "elasticsearch"),
('haproxy_url', "haproxy"),
('hudson_home', "Jenkins"),
('memcache_', "memcached"),
('mongodb_server', "mongodb"),
('mysql_server', "mysql"),
('nginx_status_url', "nginx"),
('postgresql_server', "postgres"),
('redis_urls', "redis"),
('varnishstat', "varnish"),
('WMI', "WMI"),
]
NAGIOS_OLD_CONF_KEYS = [
'nagios_log',
'nagios_perf_cfg'
]
DEFAULT_CHECKS = ("network", "ntp")
LEGACY_DATADOG_URLS = [
"app.datadoghq.com",
"app.datad0g.com",
]
class PathNotFound(Exception):
pass
def get_parsed_args():
parser = OptionParser()
parser.add_option('-A', '--autorestart', action='store_true', default=False,
dest='autorestart')
parser.add_option('-d', '--dd_url', action='store', default=None,
dest='dd_url')
parser.add_option('-c', '--clean', action='store_true', default=False,
dest='clean')
parser.add_option('-u', '--use-local-forwarder', action='store_true',
default=False, dest='use_forwarder')
parser.add_option('-n', '--disable-dd', action='store_true', default=False,
dest="disable_dd")
parser.add_option('-v', '--verbose', action='store_true', default=False,
dest='verbose',
help='Print out stacktraces for errors in checks')
try:
options, args = parser.parse_args()
except SystemExit:
# Ignore parse errors
options, args = Values({'autorestart': False,
'dd_url': None,
'clean': False,
'disable_dd':False,
'use_forwarder': False}), []
return options, args
def get_version():
return AGENT_VERSION
# Return url endpoint, here because needs access to version number
def get_url_endpoint(default_url, endpoint_type='app'):
parsed_url = urlparse(default_url)
if parsed_url.netloc not in LEGACY_DATADOG_URLS:
return default_url
subdomain = parsed_url.netloc.split(".")[0]
# Replace https://app.datadoghq.com in https://5-2-0-app.agent.datadoghq.com
return default_url.replace(subdomain,
"{0}-{1}.agent".format(
get_version().replace(".", "-"),
endpoint_type))
def skip_leading_wsp(f):
"Works on a file, returns a file-like object"
return StringIO("\n".join(map(string.strip, f.readlines())))
def _windows_commondata_path():
"""Return the common appdata path, using ctypes
From http://stackoverflow.com/questions/626796/\
how-do-i-find-the-windows-common-application-data-folder-using-python
"""
import ctypes
from ctypes import wintypes, windll
CSIDL_COMMON_APPDATA = 35
_SHGetFolderPath = windll.shell32.SHGetFolderPathW
_SHGetFolderPath.argtypes = [wintypes.HWND,
ctypes.c_int,
wintypes.HANDLE,
wintypes.DWORD, wintypes.LPCWSTR]
path_buf = wintypes.create_unicode_buffer(wintypes.MAX_PATH)
result = _SHGetFolderPath(0, CSIDL_COMMON_APPDATA, 0, 0, path_buf)
return path_buf.value
def _windows_config_path():
common_data = _windows_commondata_path()
path = os.path.join(common_data, 'Datadog', DATADOG_CONF)
if os.path.exists(path):
return path
raise PathNotFound(path)
def _windows_confd_path():
common_data = _windows_commondata_path()
path = os.path.join(common_data, 'Datadog', 'conf.d')
if os.path.exists(path):
return path
raise PathNotFound(path)
def _windows_checksd_path():
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
checksd_path = os.path.join(prog_path, '..', 'checks.d')
else:
cur_path = os.path.dirname(__file__)
checksd_path = os.path.join(cur_path, 'checks.d')
if os.path.exists(checksd_path):
return checksd_path
raise PathNotFound(checksd_path)
def _unix_config_path():
path = os.path.join('/etc/dd-agent', DATADOG_CONF)
if os.path.exists(path):
return path
raise PathNotFound(path)
def _unix_confd_path():
path = os.path.join('/etc/dd-agent', 'conf.d')
if os.path.exists(path):
return path
raise PathNotFound(path)
def _unix_checksd_path():
# Unix only will look up based on the current directory
# because checks.d will hang with the other python modules
cur_path = os.path.dirname(os.path.realpath(__file__))
checksd_path = os.path.join(cur_path, 'checks.d')
if os.path.exists(checksd_path):
return checksd_path
raise PathNotFound(checksd_path)
def _is_affirmative(s):
# int or real bool
if isinstance(s, int):
return bool(s)
# try string cast
return s.lower() in ('yes', 'true', '1')
def get_config_path(cfg_path=None, os_name=None):
# Check if there's an override and if it exists
if cfg_path is not None and os.path.exists(cfg_path):
return cfg_path
if os_name is None:
os_name = get_os()
# Check for an OS-specific path, continue on not-found exceptions
bad_path = ''
if os_name == 'windows':
try:
return _windows_config_path()
except PathNotFound, e:
if len(e.args) > 0:
bad_path = e.args[0]
else:
try:
return _unix_config_path()
except PathNotFound, e:
if len(e.args) > 0:
bad_path = e.args[0]
# Check if there's a config stored in the current agent directory
path = os.path.realpath(__file__)
path = os.path.dirname(path)
if os.path.exists(os.path.join(path, DATADOG_CONF)):
return os.path.join(path, DATADOG_CONF)
# If all searches fail, exit the agent with an error
sys.stderr.write("Please supply a configuration file at %s or in the directory where the Agent is currently deployed.\n" % bad_path)
sys.exit(3)
def get_default_bind_host():
try:
socket.gethostbyname('localhost')
except gaierror:
log.warning("localhost seems undefined in your hosts file, using 127.0.0.1 instead")
return '127.0.0.1'
return 'localhost'
def get_histogram_aggregates(configstr=None):
if configstr is None:
return None
try:
vals = configstr.split(',')
valid_values = ['min', 'max', 'median', 'avg', 'count']
result = []
for val in vals:
val = val.strip()
if val not in valid_values:
log.warning("Ignored histogram aggregate {0}, invalid".format(val))
continue
else:
result.append(val)
except Exception:
log.exception("Error when parsing histogram aggregates, skipping")
return None
return result
def get_histogram_percentiles(configstr=None):
if configstr is None:
return None
result = []
try:
vals = configstr.split(',')
for val in vals:
try:
val = val.strip()
floatval = float(val)
if floatval <= 0 or floatval >= 1:
raise ValueError
if len(val) > 4:
log.warning("Histogram percentiles are rounded to 2 digits: {0} rounded"\
.format(floatval))
result.append(float(val[0:4]))
except ValueError:
log.warning("Bad histogram percentile value {0}, must be float in ]0;1[, skipping"\
.format(val))
except Exception:
log.exception("Error when parsing histogram percentiles, skipping")
return None
return result
def get_config(parse_args=True, cfg_path=None, options=None):
if parse_args:
options, _ = get_parsed_args()
# General config
agentConfig = {
'check_freq': DEFAULT_CHECK_FREQUENCY,
'dogstatsd_port': 8125,
'dogstatsd_target': 'http://localhost:17123',
'graphite_listen_port': None,
'hostname': None,
'listen_port': None,
'tags': None,
'use_ec2_instance_id': False, # DEPRECATED
'version': get_version(),
'watchdog': True,
'additional_checksd': '/etc/dd-agent/checks.d/',
'bind_host': get_default_bind_host(),
'statsd_metric_namespace': None,
'utf8_decoding': False
}
# Config handling
try:
# Find the right config file
path = os.path.realpath(__file__)
path = os.path.dirname(path)
config_path = get_config_path(cfg_path, os_name=get_os())
config = ConfigParser.ConfigParser()
config.readfp(skip_leading_wsp(open(config_path)))
# bulk import
for option in config.options('Main'):
agentConfig[option] = config.get('Main', option)
#
# Core config
#
# FIXME unnecessarily complex
if config.has_option('Main', 'use_dd'):
agentConfig['use_dd'] = config.get('Main', 'use_dd').lower() in ("yes", "true")
else:
agentConfig['use_dd'] = True
agentConfig['use_forwarder'] = False
if options is not None and options.use_forwarder:
listen_port = 17123
if config.has_option('Main', 'listen_port'):
listen_port = int(config.get('Main', 'listen_port'))
agentConfig['dd_url'] = "http://" + agentConfig['bind_host'] + ":" + str(listen_port)
agentConfig['use_forwarder'] = True
elif options is not None and not options.disable_dd and options.dd_url:
agentConfig['dd_url'] = options.dd_url
else:
agentConfig['dd_url'] = config.get('Main', 'dd_url')
if agentConfig['dd_url'].endswith('/'):
agentConfig['dd_url'] = agentConfig['dd_url'][:-1]
# Extra checks.d path
# the linux directory is set by default
if config.has_option('Main', 'additional_checksd'):
agentConfig['additional_checksd'] = config.get('Main', 'additional_checksd')
elif get_os() == 'windows':
# default windows location
common_path = _windows_commondata_path()
agentConfig['additional_checksd'] = os.path.join(common_path, 'Datadog', 'checks.d')
if config.has_option('Main', 'use_dogstatsd'):
agentConfig['use_dogstatsd'] = config.get('Main', 'use_dogstatsd').lower() in ("yes", "true")
else:
agentConfig['use_dogstatsd'] = True
# Concerns only Windows
if config.has_option('Main', 'use_web_info_page'):
agentConfig['use_web_info_page'] = config.get('Main', 'use_web_info_page').lower() in ("yes", "true")
else:
agentConfig['use_web_info_page'] = True
if not agentConfig['use_dd']:
sys.stderr.write("Please specify at least one endpoint to send metrics to. This can be done in datadog.conf.")
exit(2)
# Which API key to use
agentConfig['api_key'] = config.get('Main', 'api_key')
# local traffic only? Default to no
agentConfig['non_local_traffic'] = False
if config.has_option('Main', 'non_local_traffic'):
agentConfig['non_local_traffic'] = config.get('Main', 'non_local_traffic').lower() in ("yes", "true")
# DEPRECATED
if config.has_option('Main', 'use_ec2_instance_id'):
use_ec2_instance_id = config.get('Main', 'use_ec2_instance_id')
# translate yes into True, the rest into False
agentConfig['use_ec2_instance_id'] = (use_ec2_instance_id.lower() == 'yes')
if config.has_option('Main', 'check_freq'):
try:
agentConfig['check_freq'] = int(config.get('Main', 'check_freq'))
except Exception:
pass
# Custom histogram aggregate/percentile metrics
if config.has_option('Main', 'histogram_aggregates'):
agentConfig['histogram_aggregates'] = get_histogram_aggregates(config.get('Main', 'histogram_aggregates'))
if config.has_option('Main', 'histogram_percentiles'):
agentConfig['histogram_percentiles'] = get_histogram_percentiles(config.get('Main', 'histogram_percentiles'))
# Disable Watchdog (optionally)
if config.has_option('Main', 'watchdog'):
if config.get('Main', 'watchdog').lower() in ('no', 'false'):
agentConfig['watchdog'] = False
# Optional graphite listener
if config.has_option('Main', 'graphite_listen_port'):
agentConfig['graphite_listen_port'] = \
int(config.get('Main', 'graphite_listen_port'))
else:
agentConfig['graphite_listen_port'] = None
# Dogstatsd config
dogstatsd_defaults = {
'dogstatsd_port': 8125,
'dogstatsd_target': 'http://' + agentConfig['bind_host'] + ':17123',
}
for key, value in dogstatsd_defaults.iteritems():
if config.has_option('Main', key):
agentConfig[key] = config.get('Main', key)
else:
agentConfig[key] = value
#Forwarding to external statsd server
if config.has_option('Main', 'statsd_forward_host'):
agentConfig['statsd_forward_host'] = config.get('Main', 'statsd_forward_host')
if config.has_option('Main', 'statsd_forward_port'):
agentConfig['statsd_forward_port'] = int(config.get('Main', 'statsd_forward_port'))
# optionally send dogstatsd data directly to the agent.
if config.has_option('Main', 'dogstatsd_use_ddurl'):
if _is_affirmative(config.get('Main', 'dogstatsd_use_ddurl')):
agentConfig['dogstatsd_target'] = agentConfig['dd_url']
# Optional config
# FIXME not the prettiest code ever...
if config.has_option('Main', 'use_mount'):
agentConfig['use_mount'] = _is_affirmative(config.get('Main', 'use_mount'))
if options is not None and options.autorestart:
agentConfig['autorestart'] = True
elif config.has_option('Main', 'autorestart'):
agentConfig['autorestart'] = _is_affirmative(config.get('Main', 'autorestart'))
if config.has_option('Main', 'check_timings'):
agentConfig['check_timings'] = _is_affirmative(config.get('Main', 'check_timings'))
if config.has_option('Main', 'exclude_process_args'):
agentConfig['exclude_process_args'] = _is_affirmative(config.get('Main', 'exclude_process_args'))
try:
filter_device_re = config.get('Main', 'device_blacklist_re')
agentConfig['device_blacklist_re'] = re.compile(filter_device_re)
except ConfigParser.NoOptionError:
pass
if config.has_option('datadog', 'ddforwarder_log'):
agentConfig['has_datadog'] = True
# Dogstream config
if config.has_option("Main", "dogstream_log"):
# Older version, single log support
log_path = config.get("Main", "dogstream_log")
if config.has_option("Main", "dogstream_line_parser"):
agentConfig["dogstreams"] = ':'.join([log_path, config.get("Main", "dogstream_line_parser")])
else:
agentConfig["dogstreams"] = log_path
elif config.has_option("Main", "dogstreams"):
agentConfig["dogstreams"] = config.get("Main", "dogstreams")
if config.has_option("Main", "nagios_perf_cfg"):
agentConfig["nagios_perf_cfg"] = config.get("Main", "nagios_perf_cfg")
if config.has_option("Main", "use_curl_http_client"):
agentConfig["use_curl_http_client"] = _is_affirmative(config.get("Main", "use_curl_http_client"))
else:
# Default to False as there are some issues with the curl client and ELB
agentConfig["use_curl_http_client"] = False
if config.has_section('WMI'):
agentConfig['WMI'] = {}
for key, value in config.items('WMI'):
agentConfig['WMI'][key] = value
if config.has_option("Main", "limit_memory_consumption") and \
config.get("Main", "limit_memory_consumption") is not None:
agentConfig["limit_memory_consumption"] = int(config.get("Main", "limit_memory_consumption"))
else:
agentConfig["limit_memory_consumption"] = None
if config.has_option("Main", "skip_ssl_validation"):
agentConfig["skip_ssl_validation"] = _is_affirmative(config.get("Main", "skip_ssl_validation"))
agentConfig["collect_instance_metadata"] = True
if config.has_option("Main", "collect_instance_metadata"):
agentConfig["collect_instance_metadata"] = _is_affirmative(config.get("Main", "collect_instance_metadata"))
agentConfig["proxy_forbid_method_switch"] = False
if config.has_option("Main", "proxy_forbid_method_switch"):
agentConfig["proxy_forbid_method_switch"] = _is_affirmative(config.get("Main", "proxy_forbid_method_switch"))
agentConfig["collect_ec2_tags"] = False
if config.has_option("Main", "collect_ec2_tags"):
agentConfig["collect_ec2_tags"] = _is_affirmative(config.get("Main", "collect_ec2_tags"))
agentConfig["utf8_decoding"] = False
if config.has_option("Main", "utf8_decoding"):
agentConfig["utf8_decoding"] = _is_affirmative(config.get("Main", "utf8_decoding"))
except ConfigParser.NoSectionError, e:
sys.stderr.write('Config file not found or incorrectly formatted.\n')
sys.exit(2)
except ConfigParser.ParsingError, e:
sys.stderr.write('Config file not found or incorrectly formatted.\n')
sys.exit(2)
except ConfigParser.NoOptionError, e:
sys.stderr.write('There are some items missing from your config file, but nothing fatal [%s]' % e)
# Storing proxy settings in the agentConfig
agentConfig['proxy_settings'] = get_proxy(agentConfig)
if agentConfig.get('ca_certs', None) is None:
agentConfig['ssl_certificate'] = get_ssl_certificate(get_os(), 'datadog-cert.pem')
else:
agentConfig['ssl_certificate'] = agentConfig['ca_certs']
return agentConfig
def get_system_stats():
systemStats = {
'machine': platform.machine(),
'platform': sys.platform,
'processor': platform.processor(),
'pythonV': platform.python_version(),
}
platf = sys.platform
if Platform.is_linux(platf):
grep = subprocess.Popen(['grep', 'model name', '/proc/cpuinfo'], stdout=subprocess.PIPE, close_fds=True)
wc = subprocess.Popen(['wc', '-l'], stdin=grep.stdout, stdout=subprocess.PIPE, close_fds=True)
systemStats['cpuCores'] = int(wc.communicate()[0])
if Platform.is_darwin(platf):
systemStats['cpuCores'] = int(subprocess.Popen(['sysctl', 'hw.ncpu'], stdout=subprocess.PIPE, close_fds=True).communicate()[0].split(': ')[1])
if Platform.is_freebsd(platf):
systemStats['cpuCores'] = int(subprocess.Popen(['sysctl', 'hw.ncpu'], stdout=subprocess.PIPE, close_fds=True).communicate()[0].split(': ')[1])
if Platform.is_linux(platf):
systemStats['nixV'] = platform.dist()
elif Platform.is_darwin(platf):
systemStats['macV'] = platform.mac_ver()
elif Platform.is_freebsd(platf):
version = platform.uname()[2]
systemStats['fbsdV'] = ('freebsd', version, '') # no codename for FreeBSD
elif Platform.is_win32(platf):
systemStats['winV'] = platform.win32_ver()
return systemStats
def set_win32_cert_path():
"""In order to use tornado.httpclient with the packaged .exe on Windows we
need to override the default ceritifcate location which is based on the path
to tornado and will give something like "C:\path\to\program.exe\tornado/cert-file".
If pull request #379 is accepted (https://github.com/facebook/tornado/pull/379) we
will be able to override this in a clean way. For now, we have to monkey patch
tornado.httpclient._DEFAULT_CA_CERTS
"""
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
crt_path = os.path.join(prog_path, 'ca-certificates.crt')
else:
cur_path = os.path.dirname(__file__)
crt_path = os.path.join(cur_path, 'packaging', 'datadog-agent', 'win32',
'install_files', 'ca-certificates.crt')
import tornado.simple_httpclient
log.info("Windows certificate path: %s" % crt_path)
tornado.simple_httpclient._DEFAULT_CA_CERTS = crt_path
def get_proxy(agentConfig, use_system_settings=False):
proxy_settings = {}
# First we read the proxy configuration from datadog.conf
proxy_host = agentConfig.get('proxy_host', None)
if proxy_host is not None and not use_system_settings:
proxy_settings['host'] = proxy_host
try:
proxy_settings['port'] = int(agentConfig.get('proxy_port', 3128))
except ValueError:
log.error('Proxy port must be an Integer. Defaulting it to 3128')
proxy_settings['port'] = 3128
proxy_settings['user'] = agentConfig.get('proxy_user', None)
proxy_settings['password'] = agentConfig.get('proxy_password', None)
proxy_settings['system_settings'] = False
log.debug("Proxy Settings: %s:%s@%s:%s" % (proxy_settings['user'], "*****", proxy_settings['host'], proxy_settings['port']))
return proxy_settings
# If no proxy configuration was specified in datadog.conf
# We try to read it from the system settings
try:
import urllib
proxies = urllib.getproxies()
proxy = proxies.get('https', None)
if proxy is not None:
try:
proxy = proxy.split('://')[1]
except Exception:
pass
px = proxy.split(':')
proxy_settings['host'] = px[0]
proxy_settings['port'] = int(px[1])
proxy_settings['user'] = None
proxy_settings['password'] = None
proxy_settings['system_settings'] = True
if '@' in proxy_settings['host']:
creds = proxy_settings['host'].split('@')[0].split(':')
proxy_settings['user'] = creds[0]
if len(creds) == 2:
proxy_settings['password'] = creds[1]
log.debug("Proxy Settings: %s:%s@%s:%s" % (proxy_settings['user'], "*****", proxy_settings['host'], proxy_settings['port']))
return proxy_settings
except Exception, e:
log.debug("Error while trying to fetch proxy settings using urllib %s. Proxy is probably not set" % str(e))
log.debug("No proxy configured")
return None
def get_confd_path(osname=None):
if not osname:
osname = get_os()
bad_path = ''
if osname == 'windows':
try:
return _windows_confd_path()
except PathNotFound, e:
if len(e.args) > 0:
bad_path = e.args[0]
else:
try:
return _unix_confd_path()
except PathNotFound, e:
if len(e.args) > 0:
bad_path = e.args[0]
cur_path = os.path.dirname(os.path.realpath(__file__))
cur_path = os.path.join(cur_path, 'conf.d')
if os.path.exists(cur_path):
return cur_path
raise PathNotFound(bad_path)
def get_checksd_path(osname=None):
if not osname:
osname = get_os()
if osname == 'windows':
return _windows_checksd_path()
else:
return _unix_checksd_path()
def get_win32service_file(osname, filename):
# This file is needed to log in the event viewer for windows
if osname == 'windows':
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
path = os.path.join(prog_path, filename)
else:
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, filename)
if os.path.exists(path):
log.debug("Certificate file found at %s" % str(path))
return path
else:
cur_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(cur_path, filename)
if os.path.exists(path):
return path
return None
def get_ssl_certificate(osname, filename):
# The SSL certificate is needed by tornado in case of connection through a proxy
if osname == 'windows':
if hasattr(sys, 'frozen'):
# we're frozen - from py2exe
prog_path = os.path.dirname(sys.executable)
path = os.path.join(prog_path, filename)
else:
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, filename)
if os.path.exists(path):
log.debug("Certificate file found at %s" % str(path))
return path
else:
cur_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(cur_path, filename)
if os.path.exists(path):
return path
log.info("Certificate file NOT found at %s" % str(path))
return None
def check_yaml(conf_path):
f = open(conf_path)
check_name = os.path.basename(conf_path).split('.')[0]
try:
check_config = yaml.load(f.read(), Loader=yLoader)
assert 'init_config' in check_config, "No 'init_config' section found"
assert 'instances' in check_config, "No 'instances' section found"
valid_instances = True
if check_config['instances'] is None or not isinstance(check_config['instances'], list):
valid_instances = False
else:
for i in check_config['instances']:
if not isinstance(i, dict):
valid_instances = False
break
if not valid_instances:
raise Exception('You need to have at least one instance defined in the YAML file for this check')
else:
return check_config
finally:
f.close()
def load_check_directory(agentConfig, hostname):
''' Return the initialized checks from checks.d, and a mapping of checks that failed to
initialize. Only checks that have a configuration
file in conf.d will be returned. '''
from checks import AgentCheck
initialized_checks = {}
init_failed_checks = {}
deprecated_checks = {}
agentConfig['checksd_hostname'] = hostname
deprecated_configs_enabled = [v for k,v in OLD_STYLE_PARAMETERS if len([l for l in agentConfig if l.startswith(k)]) > 0]
for deprecated_config in deprecated_configs_enabled:
msg = "Configuring %s in datadog.conf is not supported anymore. Please use conf.d" % deprecated_config
deprecated_checks[deprecated_config] = {'error': msg, 'traceback': None}
log.error(msg)
osname = get_os()
checks_paths = [glob.glob(os.path.join(agentConfig['additional_checksd'], '*.py'))]
try:
checksd_path = get_checksd_path(osname)
checks_paths.append(glob.glob(os.path.join(checksd_path, '*.py')))
except PathNotFound, e:
log.error(e.args[0])
sys.exit(3)
try:
confd_path = get_confd_path(osname)
except PathNotFound, e:
log.error("No conf.d folder found at '%s' or in the directory where the Agent is currently deployed.\n" % e.args[0])
sys.exit(3)
# Migrate datadog.conf integration configurations that are not supported anymore
migrate_old_style_configuration(agentConfig, confd_path, get_config_path(None, os_name=get_os()))
# We don't support old style configs anymore
# So we iterate over the files in the checks.d directory
# If there is a matching configuration file in the conf.d directory
# then we import the check
for check in itertools.chain(*checks_paths):
check_name = os.path.basename(check).split('.')[0]
check_config = None
if check_name in initialized_checks or check_name in init_failed_checks:
log.debug('Skipping check %s because it has already been loaded from another location', check)
continue
# Let's see if there is a conf.d for this check
conf_path = os.path.join(confd_path, '%s.yaml' % check_name)
# Default checks are checks that are enabled by default
# They read their config from the "[CHECKNAME].yaml.default" file
if check_name in DEFAULT_CHECKS:
default_conf_path = os.path.join(confd_path, '%s.yaml.default' % check_name)
else:
default_conf_path = None
conf_exists = False
if os.path.exists(conf_path):
conf_exists = True
elif not conf_exists and default_conf_path is not None:
if not os.path.exists(default_conf_path):
log.error("Default configuration file {0} is missing".format(default_conf_path))
continue
conf_path = default_conf_path
conf_exists = True
if conf_exists:
f = open(conf_path)
try:
check_config = check_yaml(conf_path)
except Exception, e:
log.exception("Unable to parse yaml config in %s" % conf_path)
traceback_message = traceback.format_exc()
init_failed_checks[check_name] = {'error':str(e), 'traceback':traceback_message}
continue
else:
# Compatibility code for the Nagios checks if it's still configured
# in datadog.conf
# fixme: Should be removed in ulterior major version
if check_name == 'nagios':
if any([nagios_key in agentConfig for nagios_key in NAGIOS_OLD_CONF_KEYS]):
log.warning("Configuring Nagios in datadog.conf is deprecated "
"and will be removed in a future version. "
"Please use conf.d")
check_config = {'instances':[dict((key, agentConfig[key]) for key in agentConfig if key in NAGIOS_OLD_CONF_KEYS)]}
else:
continue
else:
log.debug("No configuration file for %s" % check_name)
continue
# If we are here, there is a valid matching configuration file.
# Let's try to import the check
try:
check_module = imp.load_source('checksd_%s' % check_name, check)
except Exception, e:
traceback_message = traceback.format_exc()
# There is a configuration file for that check but the module can't be imported
init_failed_checks[check_name] = {'error':e, 'traceback':traceback_message}
log.exception('Unable to import check module %s.py from checks.d' % check_name)
continue
# We make sure that there is an AgentCheck class defined
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for _, clsmember in classes:
if clsmember == AgentCheck:
continue
if issubclass(clsmember, AgentCheck):
check_class = clsmember
if AgentCheck in clsmember.__bases__:
continue
else:
break
if not check_class:
log.error('No check class (inheriting from AgentCheck) found in %s.py' % check_name)
continue
# Look for the per-check config, which *must* exist
if not check_config.get('instances'):
log.error("Config %s is missing 'instances'" % conf_path)
continue
# Init all of the check's classes with
init_config = check_config.get('init_config', {})
# init_config: in the configuration triggers init_config to be defined
# to None.
if init_config is None:
init_config = {}
instances = check_config['instances']
try:
try:
c = check_class(check_name, init_config=init_config,
agentConfig=agentConfig, instances=instances)
except TypeError, e:
# Backwards compatibility for checks which don't support the
# instances argument in the constructor.
c = check_class(check_name, init_config=init_config,
agentConfig=agentConfig)
c.instances = instances
except Exception, e:
log.exception('Unable to initialize check %s' % check_name)
traceback_message = traceback.format_exc()
init_failed_checks[check_name] = {'error':e, 'traceback':traceback_message}
else:
initialized_checks[check_name] = c
# Add custom pythonpath(s) if available
if 'pythonpath' in check_config:
pythonpath = check_config['pythonpath']
if not isinstance(pythonpath, list):
pythonpath = [pythonpath]
sys.path.extend(pythonpath)
log.debug('Loaded check.d/%s.py' % check_name)
init_failed_checks.update(deprecated_checks)
log.info('initialized checks.d checks: %s' % initialized_checks.keys())
log.info('initialization failed checks.d checks: %s' % init_failed_checks.keys())
return {'initialized_checks':initialized_checks.values(),
'init_failed_checks':init_failed_checks,
}
#
# logging
def get_log_date_format():
return "%Y-%m-%d %H:%M:%S %Z"
def get_log_format(logger_name):
if get_os() != 'windows':
return '%%(asctime)s | %%(levelname)s | dd.%s | %%(name)s(%%(filename)s:%%(lineno)s) | %%(message)s' % logger_name
return '%(asctime)s | %(levelname)s | %(name)s(%(filename)s:%(lineno)s) | %(message)s'
def get_syslog_format(logger_name):
return 'dd.%s[%%(process)d]: %%(levelname)s (%%(filename)s:%%(lineno)s): %%(message)s' % logger_name
def get_logging_config(cfg_path=None):
system_os = get_os()
if system_os != 'windows':
logging_config = {
'log_level': None,
'collector_log_file': '/var/log/datadog/collector.log',
'forwarder_log_file': '/var/log/datadog/forwarder.log',
'dogstatsd_log_file': '/var/log/datadog/dogstatsd.log',
'jmxfetch_log_file': '/var/log/datadog/jmxfetch.log',
'log_to_event_viewer': False,
'log_to_syslog': True,
'syslog_host': None,
'syslog_port': None,
}
else:
collector_log_location = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'collector.log')
forwarder_log_location = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'forwarder.log')
dogstatsd_log_location = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'dogstatsd.log')
jmxfetch_log_file = os.path.join(_windows_commondata_path(), 'Datadog', 'logs', 'jmxfetch.log')
logging_config = {
'log_level': None,
'windows_collector_log_file': collector_log_location,
'windows_forwarder_log_file': forwarder_log_location,
'windows_dogstatsd_log_file': dogstatsd_log_location,
'jmxfetch_log_file': jmxfetch_log_file,
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
}
config_path = get_config_path(cfg_path, os_name=system_os)
config = ConfigParser.ConfigParser()
config.readfp(skip_leading_wsp(open(config_path)))
if config.has_section('handlers') or config.has_section('loggers') or config.has_section('formatters'):
if system_os == 'windows':
config_example_file = "https://github.com/DataDog/dd-agent/blob/master/packaging/datadog-agent/win32/install_files/datadog_win32.conf"
else:
config_example_file = "https://github.com/DataDog/dd-agent/blob/master/datadog.conf.example"
sys.stderr.write("""Python logging config is no longer supported and will be ignored.
To configure logging, update the logging portion of 'datadog.conf' to match:
'%s'.
""" % config_example_file)
for option in logging_config:
if config.has_option('Main', option):
logging_config[option] = config.get('Main', option)
levels = {
'CRITICAL': logging.CRITICAL,
'DEBUG': logging.DEBUG,
'ERROR': logging.ERROR,
'FATAL': logging.FATAL,
'INFO': logging.INFO,
'WARN': logging.WARN,
'WARNING': logging.WARNING,
}
if config.has_option('Main', 'log_level'):
logging_config['log_level'] = levels.get(config.get('Main', 'log_level'))
if config.has_option('Main', 'log_to_syslog'):
logging_config['log_to_syslog'] = config.get('Main', 'log_to_syslog').strip().lower() in ['yes', 'true', 1]
if config.has_option('Main', 'log_to_event_viewer'):
logging_config['log_to_event_viewer'] = config.get('Main', 'log_to_event_viewer').strip().lower() in ['yes', 'true', 1]
if config.has_option('Main', 'syslog_host'):
host = config.get('Main', 'syslog_host').strip()
if host:
logging_config['syslog_host'] = host
else:
logging_config['syslog_host'] = None
if config.has_option('Main', 'syslog_port'):
port = config.get('Main', 'syslog_port').strip()
try:
logging_config['syslog_port'] = int(port)
except Exception:
logging_config['syslog_port'] = None
if config.has_option('Main', 'disable_file_logging'):
logging_config['disable_file_logging'] = config.get('Main', 'disable_file_logging').strip().lower() in ['yes', 'true', 1]
else:
logging_config['disable_file_logging'] = False
return logging_config
def initialize_logging(logger_name):
try:
logging_config = get_logging_config()
logging.basicConfig(
format=get_log_format(logger_name),
level=logging_config['log_level'] or logging.INFO,
)
log_file = logging_config.get('%s_log_file' % logger_name)
if log_file is not None and not logging_config['disable_file_logging']:
# make sure the log directory is writeable
# NOTE: the entire directory needs to be writable so that rotation works
if os.access(os.path.dirname(log_file), os.R_OK | os.W_OK):
file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=LOGGING_MAX_BYTES, backupCount=1)
formatter = logging.Formatter(get_log_format(logger_name), get_log_date_format())
file_handler.setFormatter(formatter)
root_log = logging.getLogger()
root_log.addHandler(file_handler)
else:
sys.stderr.write("Log file is unwritable: '%s'\n" % log_file)
# set up syslog
if logging_config['log_to_syslog']:
try:
from logging.handlers import SysLogHandler
if logging_config['syslog_host'] is not None and logging_config['syslog_port'] is not None:
sys_log_addr = (logging_config['syslog_host'], logging_config['syslog_port'])
else:
sys_log_addr = "/dev/log"
# Special-case macs
if sys.platform == 'darwin':
sys_log_addr = "/var/run/syslog"
handler = SysLogHandler(address=sys_log_addr, facility=SysLogHandler.LOG_DAEMON)
handler.setFormatter(logging.Formatter(get_syslog_format(logger_name), get_log_date_format()))
root_log = logging.getLogger()
root_log.addHandler(handler)
except Exception, e:
sys.stderr.write("Error setting up syslog: '%s'\n" % str(e))
traceback.print_exc()
# Setting up logging in the event viewer for windows
if get_os() == 'windows' and logging_config['log_to_event_viewer']:
try:
from logging.handlers import NTEventLogHandler
nt_event_handler = NTEventLogHandler(logger_name,get_win32service_file('windows', 'win32service.pyd'), 'Application')
nt_event_handler.setFormatter(logging.Formatter(get_syslog_format(logger_name), get_log_date_format()))
nt_event_handler.setLevel(logging.ERROR)
app_log = logging.getLogger(logger_name)
app_log.addHandler(nt_event_handler)
except Exception, e:
sys.stderr.write("Error setting up Event viewer logging: '%s'\n" % str(e))
traceback.print_exc()
except Exception, e:
sys.stderr.write("Couldn't initialize logging: %s\n" % str(e))
traceback.print_exc()
# if config fails entirely, enable basic stdout logging as a fallback
logging.basicConfig(
format=get_log_format(logger_name),
level=logging.INFO,
)
# re-get the log after logging is initialized
global log
log = logging.getLogger(__name__)
|
|
import datetime
from dateutil.parser import parse
from decimal import Decimal
import re
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils import datetime_safe, importlib
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$')
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the ApiField variants.
class ApiField(object):
"""The base implementation of a field used by the resources."""
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.value = None
self.unique = unique
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.attribute.split('__')
current_object = bundle.obj
for attr in attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if not bundle.data.has_key(self.instance_name):
if getattr(self, 'is_related', False) and not getattr(self, 'is_m2m', False):
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
if self.blank:
return None
elif self.attribute and getattr(bundle.obj, self.attribute, None):
return getattr(bundle.obj, self.attribute)
elif self.instance_name and getattr(bundle.obj, self.instance_name, None):
return getattr(bundle.obj, self.instance_name)
elif self.has_default():
if callable(self._default):
return self._default()
return self._default
elif self.null:
return None
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
bundle_val = bundle.data[self.instance_name]
if bundle_val is None and not self.null:
raise ApiFieldError("The '%s' field doesn't allow a null value." % self.instance_name)
else:
return bundle_val
def set_value_on_bundle_obj(self, bundle, value):
"""
Overrideable hook for writing a value into the object on a bundle. Enables the use of
custom setters in your app code if setattr() is too raw for your fancy ORM model.
"""
setattr(bundle.obj, self.attribute, value)
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return unicode(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))
else:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except ValueError:
pass
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
self.instance_name = None
self._resource = None
self.to = to
self.attribute = attribute
self.related_name = related_name
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.full = full
self.api_name = None
self.resource_name = None
self.unique = unique
self._to_class = None
if self.to == 'self':
self.self_referential = True
self._to_class = self.__class__
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
super(RelatedField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def get_related_resource(self):
"""
Instaniates the related resource.
"""
related_resource = self.to_class()
# Fix the ``api_name`` if it's not present.
if related_resource._meta.api_name is None:
if self._resource and not self._resource._meta.api_name is None:
related_resource._meta.api_name = self._resource._meta.api_name
return related_resource
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, basestring):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource, related_instance):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
if not self.full:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(obj=related_instance, request=bundle.request)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
fk_bundle = fk_resource.build_bundle(data=data, request=request)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
# We need to check to see if updates are allowed on the FK
# resource. If not, we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
if not fk_resource.can_update():
# If the resource already exists and the client specified where to find it, we look it up.
if 'resource_uri' in data:
obj = fk_resource.get_via_uri(data['resource_uri'], request=request)
fk_bundle.install_existing_obj( obj )
return fk_bundle
# If the resource supports creation, then we can full_hydrate() and create a new instance.
elif fk_resource.can_create():
return fk_resource.full_hydrate(fk_bundle)
else:
raise ApiFieldError("Resource %s does not support being created via POST" %
fk_resource._meta.resource_name)
try:
return fk_resource.obj_update(fk_bundle, skip_errors=True, **data)
except NotFound:
try:
# Attempt lookup by primary key
lookup_kwargs = dict((k, v) for k, v in data.iteritems() if getattr(fk_resource, k).unique)
if not lookup_kwargs:
raise NotFound()
return fk_resource.obj_update(fk_bundle, skip_errors=True, **lookup_kwargs)
except NotFound:
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle, request)
return fk_bundle
except MultipleObjectsReturned:
return fk_resource.full_hydrate(fk_bundle)
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
self.fk_resource = self.to_class()
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, basestring):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(self.fk_resource, value, **kwargs)
elif isinstance(value, Bundle):
# We got a valid bundle object, the RelatedField had full=True
return value
elif hasattr(value, 'items'):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(self.fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(self.fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.fk_resource = None
def dehydrate(self, bundle):
attrs = self.attribute.split('__')
foreign_obj = bundle.obj
for attr in attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
if not foreign_obj:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
self.fk_resource = self.get_related_resource()
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, self.fk_resource, foreign_obj)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.m2m_bundles = []
def dehydrate(self, bundle):
if not bundle.obj or bundle.obj_is_new:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if isinstance(self.attribute, basestring):
attrs = self.attribute.split('__')
the_m2ms = bundle.obj
for attr in attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
elif callable(self.attribute):
the_m2ms = self.attribute(bundle)
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
self.m2m_resources = []
m2m_dehydrated = []
# TODO: Also model-specific and leaky. Relies on there being a
# ``Manager`` there.
for m2m in the_m2ms.all():
m2m_resource = self.get_related_resource()
m2m_bundle = Bundle(obj=m2m, request=bundle.request)
self.m2m_resources.append(m2m_resource)
m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, m2m))
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
elif self.null:
return []
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
m2m_hydrated = []
for value in bundle.data.get(self.instance_name):
if value is None:
continue
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
m2m_hydrated.append(self.build_related_resource(value, **kwargs))
return m2m_hydrated
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, basestring):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except ValueError, e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
|
|
from __future__ import unicode_literals
from prompt_toolkit.application import get_app
from prompt_toolkit.filters import has_focus, Condition
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.scroll import scroll_page_up, scroll_page_down, scroll_one_line_down, scroll_one_line_up, scroll_half_page_up, scroll_half_page_down
from prompt_toolkit.keys import Keys
from prompt_toolkit.search import stop_search
from prompt_toolkit.utils import suspend_to_background_supported
__all__ = (
'create_key_bindings',
)
def create_key_bindings(pager):
kb = KeyBindings()
handle = kb.add
@Condition
def has_colon():
return pager.in_colon_mode
@Condition
def default_focus():
app = get_app()
return app.layout.current_window == pager.current_source_info.window
@Condition
def displaying_help():
return pager.displaying_help
for c in '01234556789':
@handle(c, filter=default_focus)
def _(event, c=c):
event.append_to_arg_count(c)
@handle('q', filter=default_focus)
@handle('Q', filter=default_focus | has_colon)
@handle('Z', 'Z', filter=default_focus)
def _(event):
" Quit. "
if pager.displaying_help:
pager.quit_help()
else:
event.app.exit()
@handle(' ', filter=default_focus)
@handle('f', filter=default_focus)
@handle(Keys.ControlF, filter=default_focus)
@handle(Keys.ControlV, filter=default_focus)
def _(event):
" Page down."
scroll_page_down(event)
@handle('b', filter=default_focus)
@handle(Keys.ControlB, filter=default_focus)
@handle(Keys.Escape, 'v', filter=default_focus)
def _(event):
" Page up."
scroll_page_up(event)
@handle('d', filter=default_focus)
@handle(Keys.ControlD, filter=default_focus)
def _(event):
" Half page down."
scroll_half_page_down(event)
@handle('u', filter=default_focus)
@handle(Keys.ControlU, filter=default_focus)
def _(event):
" Half page up."
scroll_half_page_up(event)
@handle('e', filter=default_focus)
@handle('j', filter=default_focus)
@handle(Keys.ControlE, filter=default_focus)
@handle(Keys.ControlN, filter=default_focus)
@handle(Keys.ControlJ, filter=default_focus)
@handle(Keys.ControlM, filter=default_focus)
@handle(Keys.Down, filter=default_focus)
def _(event):
" Scoll one line down."
if event.arg > 1:
# When an argument is given, go this amount of lines down.
event.current_buffer.auto_down(count=event.arg)
else:
scroll_one_line_down(event)
@handle('y', filter=default_focus)
@handle('k', filter=default_focus)
@handle(Keys.ControlY, filter=default_focus)
@handle(Keys.ControlK, filter=default_focus)
@handle(Keys.ControlP, filter=default_focus)
@handle(Keys.Up, filter=default_focus)
def _(event):
" Scoll one line up."
if event.arg > 1:
event.current_buffer.auto_up(count=event.arg)
else:
scroll_one_line_up(event)
@handle(Keys.Escape, 'u')
def _(event):
" Toggle search highlighting. "
pager.highlight_search = not pager.highlight_search
@handle('=', filter=default_focus)
@handle(Keys.ControlG, filter=default_focus)
@handle('f', filter=has_colon)
def _(event):
" Print the current file name. "
pager.message = ' {} '.format(pager.current_source.get_name())
@handle('h', filter=default_focus & ~displaying_help)
@handle('H', filter=default_focus & ~displaying_help)
def _(event):
" Display Help. "
pager.display_help()
@handle('g', filter=default_focus)
@handle('<', filter=default_focus)
@handle(Keys.Escape, '<', filter=default_focus)
def _(event):
" Go to the first line of the file. "
event.current_buffer.cursor_position = 0
@handle('G', filter=default_focus)
@handle('>', filter=default_focus)
@handle(Keys.Escape, '>', filter=default_focus)
def _(event):
" Go to the last line of the file. "
b = event.current_buffer
b.cursor_position = len(b.text)
@handle('m', Keys.Any, filter=default_focus)
def _(event):
" Mark current position. "
source_info = pager.current_source_info
source_info.marks[event.data] = (
event.current_buffer.cursor_position,
source_info.window.vertical_scroll)
@handle("'", Keys.Any, filter=default_focus)
def _(event):
" Go to a previously marked position. "
go_to_mark(event, event.data)
@handle(Keys.ControlX, Keys.ControlX, filter=default_focus)
def _(event):
" Same as '. "
go_to_mark(event, '.')
def go_to_mark(event, mark):
b = event.current_buffer
source_info = pager.current_source_info
try:
if mark == '^': # Start of file.
cursor_pos, vertical_scroll = 0, 0
elif mark == '$': # End of file - mark.
cursor_pos, vertical_scroll = len(b.text), 0
else: # Custom mark.
cursor_pos, vertical_scroll = source_info.marks[mark]
except KeyError:
pass # TODO: show warning.
else:
b.cursor_position = cursor_pos
source_info.window.vertical_scroll = vertical_scroll
@handle('F', filter=default_focus)
def _(event):
" Forward forever, like 'tail -f'. "
pager.forward_forever = True
@handle('r', filter=default_focus)
@handle('R', filter=default_focus)
def _(event):
event.app.renderer.clear()
def search_buffer_is_empty():
" Returns True when the search buffer is empty. "
return pager.search_buffer.text == ''
@handle('backspace', filter=has_focus(pager.search_buffer) & Condition(search_buffer_is_empty))
def _(event):
" Cancel search when backspace is pressed. "
stop_search()
@handle(Keys.Left, filter=default_focus)
@handle(Keys.Escape, '(', filter=default_focus)
def _(event):
" Scroll half page to the left. "
w = event.app.layout.current_window
b = event.app.current_buffer
if w and w.render_info:
info = w.render_info
amount = info.window_width // 2
# Move cursor horizontally.
value = b.cursor_position - min(amount, len(b.document.current_line_before_cursor))
b.cursor_position = value
# Scroll.
w.horizontal_scroll = max(0, w.horizontal_scroll - amount)
@handle(Keys.Right, filter=default_focus)
@handle(Keys.Escape, ')', filter=default_focus)
def _(event):
" Scroll half page to the right. "
w = event.app.layout.current_window
b = event.app.current_buffer
if w and w.render_info:
info = w.render_info
amount = info.window_width // 2
# Move the cursor first to a visible line that is long enough to
# have the cursor visible after scrolling. (Otherwise, the Window
# will scroll back.)
xpos = w.horizontal_scroll + amount
for line in info.displayed_lines:
if len(b.document.lines[line]) >= xpos:
b.cursor_position = b.document.translate_row_col_to_index(line, xpos)
break
# Scroll.
w.horizontal_scroll = max(0, w.horizontal_scroll + amount)
@handle(':', filter=default_focus & ~displaying_help)
def _(event):
pager.in_colon_mode = True
@handle('n', filter=has_colon)
def _(event):
" Go to next file. "
pager.focus_next_source()
@handle('p', filter=has_colon)
def _(event):
" Go to previous file. "
pager.focus_previous_source()
@handle('e', filter=has_colon)
@handle(Keys.ControlX, Keys.ControlV, filter=default_focus)
def _(event):
event.app.layout.focus(pager.layout.examine_control)
pager.in_colon_mode = False
@handle('d', filter=has_colon)
def _(event):
pager.remove_current_source()
@handle('backspace', filter=has_colon)
@handle('q', filter=has_colon)
def _(event):
pager.in_colon_mode = False
@handle(Keys.Any, filter=has_colon)
def _(event):
pager.in_colon_mode = False
pager.message = 'No command.'
@handle(Keys.ControlC, filter=has_focus('EXAMINE'))
@handle(Keys.ControlG, filter=has_focus('EXAMINE'))
def _(event):
" Cancel 'Examine' input. "
event.app.layout.focus(pager.current_source_info.window)
@handle(Keys.ControlZ, filter=Condition(lambda: suspend_to_background_supported()))
def _(event):
" Suspend to bakground. "
event.app.suspend_to_background()
return kb
|
|
from evoplotter import utils
from evoplotter import plotter
from evoplotter import printer
from evoplotter import reporting
from evoplotter.dims import *
from gecco19_utils import *
def simplify_benchmark_name(name):
"""Shortens or modifies the path of the benchmark in order to make the table more readable."""
i = name.rfind("/")
name = name if i == -1 else name[i+1:]
name = name[:name.rfind(".")]
return name.replace("_3", "_03").replace("_5", "_05").replace("resistance_par", "res").replace("gravity", "gr")
def benchmark_get_num_tests(name):
i_dot = name.rfind(".")
i_us = name.rfind("_")
return name[i_us+1:i_dot]
def benchmark_shorter(name):
name = simplify_benchmark_name(name)
i_us = name.rfind("_")
x = name[:i_us]
return x
def sort_benchmark_dim(d):
assert isinstance(d, Dim)
def s(config1):
name1 = config1[0][0] # name of the first filter
name1 = name1[name1.rfind("/")+1:]
# name1 = name1[:name1.rfind(".")]
bfamily1 = name1[:name1.find("_")]
if name1.count('_') == 2:
b_constr1 = name1[name1.find("_")+1:name1.rfind("_")]
elif name1.count('_') == 1:
b_constr1 = name1[name1.find("_")+1:]
else:
raise Exception("Unexpected number of '_' in benchmark name.")
constr_precedence = ["", "b", "c1", "c2", "m", "s", "c", "bm", "bs", "ms", "ms", "bms", "sc"]
benchValues = {"gravity":100, "gr":100, "res2":200, "res3":300}
value = benchValues.get(bfamily1, 0)
return value + constr_precedence.index(b_constr1)
return Dim(sorted(d.configs, key=s))
def get_benchmarks_from_props(props, simple_names=True, ignoreNumTests=False):
if ignoreNumTests:
dim_benchmarks = Dim.from_dict_postprocess(props, "benchmark", fun=benchmark_shorter)
else:
dim_benchmarks = Dim.from_dict(props, "benchmark")
if simple_names:
configs = [Config(simplify_benchmark_name(c.get_caption()), c.filters[0][1],
benchmark=c.get_caption()) for c in dim_benchmarks.configs]
dim_benchmarks = Dim(configs)
# dim_benchmarks.sort()
return sort_benchmark_dim(dim_benchmarks)
def p_method_for(name):
return lambda p, name=name: p["method"] == name
def p_matches_dict(p, d):
for k, v in d.items():
if p[k] != v:
return False
return True
def p_dict_matcher(d):
assert isinstance(d, dict)
d = d.copy()
return lambda p, d=d: p_matches_dict(p, d)
def p_generational(p):
return p["evolutionMode"] == "generational"
def p_steadyState(p):
return p["evolutionMode"] == "steadyState"
def p_sel_lexicase(p):
return p["selection"] == "lexicase"
def p_sel_tourn(p):
return p["selection"] == "tournament"
def p_testsRatio_equalTo(ratio):
return lambda p, ratio=ratio: p["testsRatio"] == ratio
def p_benchmarkNumTests_equalTo(x):
return lambda p, x=x: benchmark_get_num_tests(p["benchmark"]) == x
def p_true(p):
return True
dim_true = Dim(Config("mean", lambda p: True, method=None))
# dim_methodCDGP = Dim([Config("CDGP", p_method_for("CDGP"), method="CDGP")])
# dim_methodGP = Dim([Config("GP", p_method_for("GP"), method="GP")])
dim_methodCDGP = Dim([
Config("CDGP", p_dict_matcher({"method": "CDGP", "partialConstraintsInFitness": "false"}), method="CDGP"),
Config("$CDGP_{props}$", p_dict_matcher({"method": "CDGP", "partialConstraintsInFitness": "true"}), method="CDGPprops"),
])
dim_methodGP = Dim([
Config("$GP$", p_dict_matcher({"method": "GP", "populationSize": "500"}), method="GP500"),
# Config("$GP_{1000}$", p_dict_matcher({"method": "GP", "populationSize": "1000"}), method="GP1000"),
# Config("$GP_{5000}$", p_dict_matcher({"method": "GP", "populationSize": "5000"}), method="GP5000"),
])
dim_method = dim_methodCDGP + dim_methodGP
dim_sel = Dim([#Config("$Tour$", p_sel_tourn, selection="tournament"),
Config("$Lex$", p_sel_lexicase, selection="lexicase")])
# dim_evoMode = Dim([Config("$steadyState$", p_steadyState, evolutionMode="steadyState"),
# Config("$generational$", p_generational, evolutionMode="generational")])
dim_evoMode = Dim([Config("$generational$", p_generational, evolutionMode="generational")])
dim_testsRatio = Dim([
# Config("$0.8$", p_testsRatio_equalTo("0.8"), testsRatio="0.8"),
Config("$1.0$", p_testsRatio_equalTo("1.0"), testsRatio="1.0"),
])
dim_optThreshold = Dim([
Config("$0.01$", p_dict_matcher({"optThresholdC": "0.01"}), optThreshold="0.01"),
Config("$0.1$", p_dict_matcher({"optThresholdC": "0.1"}), optThreshold="0.1"),
])
dim_benchmarkNumTests = Dim([
Config("$3$ tests", p_benchmarkNumTests_equalTo("3"), benchmarkNumTests="3"),
Config("$5$ tests", p_benchmarkNumTests_equalTo("5"), benchmarkNumTests="5"),
Config("$10$ tests", p_benchmarkNumTests_equalTo("10"), benchmarkNumTests="10"),
])
def plot_figures(props, exp_prefix):
# We want to consider CDGP only
props = [p for p in props]
if len(props) == 0:
print("No props: plots were not generated.")
return
getter_mse = lambda p: float(p["result.best.mse"])
predicate = lambda v, v_xaxis: v <= v_xaxis
N = 50 # number of points per plot line
r = (0.0, 1e0)
xs = np.linspace(r[0], r[1], N)
xticks = np.arange(r[0], r[1], r[1] / 10)
plotter.plot_ratio_meeting_predicate(props, getter_mse, predicate, xs=xs, xticks=xticks,
show_plot=False,
title="Ratio of solutions with MSE under the certain level",
xlabel="MSE",
series_dim=dim_method,
xlogscale=False,
savepath="results/figures/ratioMSE.pdf".format(exp_prefix))
# print_solved_in_time(props, 12 * 3600 * 1000)
# print_solved_in_time(props, 6 * 3600 * 1000)
# print_solved_in_time(props, 3 * 3600 * 1000)
# print_solved_in_time(props, 1 * 3600 * 1000)
# print_solved_in_time(props, 0.5 * 3600 * 1000)
# print_solved_in_time(props, 0.25 * 3600 * 1000)
# print_solved_in_time(props, 0.125 * 3600 * 1000)
# print_solved_in_time(props, 600 * 1000)
# Plot chart of number of found solutions in time
# success_props = [p for p in props if is_optimal_solution(p)]
# getter = lambda p: float(normalized_total_time(p)) / (60 * 1000) # take minutes as a unit
# predicate = lambda v, v_xaxis: v <= v_xaxis
# xs = np.arange(0.0, 5.0 * 60.5 + 1e-9, 5.0) # a point every 5.0 minutes
# xticks = np.arange(0.0, 5.0 * 60.0 + 1e-9, 15.0) # a tick every 15 minutes
# plotter.plot_ratio_meeting_predicate(success_props, getter, predicate,
# xs=xs, xticks=xticks, show_plot=0,
# series_dim=dim_method, # "series_dim=None" for a single line
# savepath="figures/{0}_ratioTime_correctVsAllCorrect.pdf".format(exp_prefix),
# title="Ratio of found correct solutions out of all correct solutions",
# xlabel="Runtime [minutes]")
# plotter.plot_ratio_meeting_predicate(props, getter, predicate,
# xs=xs, xticks=xticks, show_plot=0,
# series_dim=dim_method,
# savepath="figures/{0}_ratioTime_endedVsAllEnded.pdf".format(exp_prefix),
# title="Ratio of ended runs",
# xlabel="Runtime [minutes]")
# def get_total_evaluated(p):
# if "evolutionMode" not in p:
# return None
# elif p["evolutionMode"] == "steadyState":
# return int(p["result.totalGenerations"])
# else:
# return int(p["result.totalGenerations"]) * int(p["populationSize"])
# xs = np.arange(0.0, 500.0 * 1000.0 + 0.01, 10000)
# xticks = np.arange(0.0, 500.0 *1000.0 + 0.01, 50000)
# plotter.plot_ratio_meeting_predicate(success_props, get_total_evaluated, predicate,
# xs=xs, xticks=xticks, show_plot=0,
# series_dim=dim_method,
# savepath="figures/{0}_ratioEvaluated_correctVsAllCorrect.pdf".format(exp_prefix),
# title="Ratio of found correct solutions out of all found correct solutions in the given config",
# xlabel="Number of evaluated solutions")
# cond = lambda p: p["result.best.isOptimal"] == "true"
# plotter.plot_ratio_meeting_predicate(props, get_total_evaluated, predicate,
# condition=cond,
# xs=xs, xticks=xticks, show_plot=0,
# series_dim=dim_method,
# savepath="figures/{0}_ratioEvaluated_correctVsAllRuns.pdf".format(exp_prefix),
# title="Ratio of runs which ended with correct solution out of all runs",
# xlabel="Number of evaluated solutions")
# plotter.compare_avg_data_series(props, dim_methodCDGP, "CDGPtestsRatio",
# getter_y=get_successRate,
# is_aggr=1,
# savepath="figures/{0}_q_successRates.pdf".format(exp_prefix),
# title="Success rates",
# ylabel="Success rate",
# xlabel="q")
# plotter.compare_avg_data_series_d(success_props, dim_methodCDGP, "CDGPtestsRatio", "result.best.size",
# savepath="figures/{0}_q_sizes.pdf".format(exp_prefix),
# title="Size of found correct solutions",
# ylabel="Size",
# xlabel="q")
#
# plotter.compare_avg_data_series_d(props, dim_methodCDGP, "CDGPtestsRatio", "tests.total",
# savepath="figures/{0}_q_tests.pdf".format(exp_prefix),
# title="Total number of test cases",
# ylabel="Total tests",
# xlabel="q")
def get_content_of_subsections(subsects):
content = []
vspace = reporting.BlockLatex(r"\vspace{0.75cm}"+"\n")
for title, table, cs in subsects:
if isinstance(cs, reporting.ColorScheme3):
cs = cs.toBlockLatex()
sub = reporting.SectionRelative(title, contents=[cs, reporting.BlockLatex(table + "\n"), vspace])
content.append(sub)
return content
def post(s):
s = s.replace("{ccccccccccccc}", "{rrrrrrrrrrrrr}").replace("{rrr", "{lrr")\
.replace(r"\_{lex}", "_{lex}").replace(r"\_{", "_{").replace("resistance_par", "res")\
.replace("gravity", "gr")
return s
def create_section_and_plots(title, desc, props, subsects, figures_list, exp_prefix):
assert isinstance(title, str)
assert isinstance(desc, str)
assert isinstance(props, list)
assert isinstance(figures_list, list)
plot_figures(props, exp_prefix=exp_prefix)
section = reporting.Section(title, [])
section.add(reporting.BlockLatex(desc + "\n"))
for s in subsects:
section.add(s)
# Create figures in the appropriate directory
for f in figures_list:
section.add(reporting.FloatFigure(f))
section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n"))
return section
def create_single_table_bundle(props, dim_rows, dim_cols, cellLambda, headerRowNames, cv0, cv1, cv2, vb=1,
tableVariants=None, onlyNonemptyRows=True, tablePostprocessor=post,
printTextTable=False):
if tableVariants is None:
tableVariants = [p_true]
assert isinstance(tableVariants, list)
text = ""
for variant in tableVariants: # each variant is some predicate on data
props_variant = [p for p in props if variant(p)]
if onlyNonemptyRows:
dim_rows_variant = Dim([c for c in dim_rows.configs if len(c.filter_props(props_variant)) > 0])
else:
dim_rows_variant = dim_rows
tableText = tablePostprocessor(
printer.latex_table(props_variant, dim_rows_variant, dim_cols, cellLambda, layered_headline=True,
vertical_border=vb, headerRowNames=headerRowNames))
if printTextTable:
print("VARIANT: " + str(variant))
print(printer.text_table(props, dim_rows_variant, dim_cols, cellLambda, d_cols=";"))
text += r"\noindent"
text += printer.table_color_map(tableText, cv0, cv1, cv2, "colorLow", "colorMedium", "colorHigh")
# text += "\n"
return text
def create_subsection_aggregation_tests(props, dim_rows, dim_cols, headerRowNames):
vb = 1 # vertical border
variants = None
dim_rows_v2 = get_benchmarks_from_props(props, simple_names=True, ignoreNumTests=True)
dim_rows_v2 += dim_true
# By default: dim_cols = (dim_methodGP * dim_empty + dim_methodCDGP * dim_testsRatio) * dim_optThreshold
tables = [
# TableGenerator(fun_successRateMseOnly, dim_rows_v2,
# (dim_methodGP + dim_methodCDGP),
# headerRowNames=[""],
# title="Success rates (mse below thresh)",
# color_scheme=reporting.color_scheme_violet,
# default_color_thresholds=(0.0, 0.5, 1.0),
# vertical_border=vb, table_postprocessor=post, table_variants=variants,
# ),
TableGenerator(fun_average_mse_sd, dim_rows_v2,
(dim_methodGP + dim_methodCDGP),
headerRowNames=[""],
title="Average MSE",
color_scheme=reporting.color_scheme_violet,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_average_mse, dim_rows_v2,
dim_benchmarkNumTests * (dim_methodGP + dim_methodCDGP),
headerRowNames=["", ""],
title="Average MSE",
color_scheme=reporting.color_scheme_violet,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_successRate, dim_rows_v2,
dim_benchmarkNumTests * (dim_methodGP + dim_methodCDGP),
headerRowNames=["", ""],
title="Success rates (mse below thresh + properties met)",
color_scheme=reporting.color_scheme_darkgreen,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_successRate, dim_rows_v2,
(dim_methodGP + dim_methodCDGP) * dim_benchmarkNumTests,
headerRowNames=["", ""],
title="Success rates (mse below thresh + properties met)",
color_scheme=reporting.color_scheme_darkgreen,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_successRate, dim_rows_v2,
dim_optThreshold * (dim_methodGP + dim_methodCDGP),
headerRowNames=["tolerance", ""],
title="Success rates (mse below thresh + properties met)",
color_scheme=reporting.color_scheme_darkgreen,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_successRate, dim_rows_v2,
(dim_methodGP + dim_methodCDGP),
headerRowNames=[""],
title="Success rates (mse below thresh + properties met)",
color_scheme=reporting.color_scheme_darkgreen,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_allPropertiesMet, dim_rows_v2,
dim_benchmarkNumTests * (dim_methodGP + dim_methodCDGP),
headerRowNames=["", ""],
title="Success rates (properties met)",
color_scheme=reporting.color_scheme_green,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_allPropertiesMet, dim_rows_v2,
(dim_methodGP + dim_methodCDGP) * dim_benchmarkNumTests,
headerRowNames=["", ""],
title="Success rates (properties met)",
color_scheme=reporting.color_scheme_green,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
# TableGenerator(fun_allPropertiesMet, dim_rows_v2,
# dim_benchmarkNumTests * dim_optThreshold * (dim_methodGP + dim_methodCDGP),
# headerRowNames=["", "tolerance", ""],
# title="Success rates (properties met)",
# color_scheme=reporting.color_scheme_green,
# default_color_thresholds=(0.0, 0.5, 1.0),
# vertical_border=vb, table_postprocessor=post, table_variants=variants,
# ),
TableGenerator(fun_allPropertiesMet, dim_rows_v2,
dim_optThreshold * (dim_methodGP + dim_methodCDGP),
headerRowNames=["tolerance", ""],
title="Success rates (properties met)",
color_scheme=reporting.color_scheme_green,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_allPropertiesMet, dim_rows_v2,
(dim_methodGP + dim_methodCDGP),
headerRowNames=[""],
title="Success rates (properties met)",
color_scheme=reporting.color_scheme_green,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
]
subsects_main = []
for t in tables:
tup = (t.title, t.apply(props), t.color_scheme)
subsects_main.append(tup)
return reporting.Subsection("Tests of different data aggregation in tables", get_content_of_subsections(subsects_main))
def create_subsection_shared_stats(props, dim_rows, dim_cols, numRuns, headerRowNames):
vb = 1 # vertical border
variants = [p_benchmarkNumTests_equalTo("3"), p_benchmarkNumTests_equalTo("5"), p_benchmarkNumTests_equalTo("10")]
dim_rows_v2 = get_benchmarks_from_props(props, simple_names=True, ignoreNumTests=True)
dim_rows_v2 += dim_true
# ----------------------------------------------------
# Cleaning experiment. Here, because dimension can be easily constructed.
# dim_rows_v3 = get_benchmarks_from_props(props, simple_names=True, ignoreNumTests=True)
# utils.reorganizeExperimentFiles(props, dim_rows_v3 * dim_benchmarkNumTests * dim_cols, target_dir="./exp3_final/", maxRuns=numRuns)
# ----------------------------------------------------
tables = [
TableGenerator(get_num_computed, dim_rows, dim_cols, headerRowNames=headerRowNames,
title="Status (correctly finished runs)",
color_scheme=reversed(reporting.color_scheme_red),
default_color_thresholds=(0.0, numRuns/2, numRuns),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_successRate, dim_rows, dim_cols, headerRowNames=headerRowNames,
title="Success rates (mse below thresh + properties met)",
color_scheme=reporting.color_scheme_darkgreen,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(fun_allPropertiesMet, dim_rows, dim_cols, headerRowNames=headerRowNames,
title="Success rates (properties met)",
color_scheme=reporting.color_scheme_green,
default_color_thresholds=(0.0, 0.5, 1.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(get_avg_runtime, dim_rows, dim_cols, headerRowNames=headerRowNames,
title="Average runtime [s]",
color_scheme=reporting.color_scheme_violet,
default_color_thresholds=(0.0, 900.0, 1800.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
TableGenerator(get_avg_runtimeOnlySuccessful, dim_rows, dim_cols, headerRowNames=headerRowNames,
title="Average runtime (only successful) [s]",
color_scheme=reporting.color_scheme_violet,
default_color_thresholds=(0.0, 900.0, 1800.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
# TableGenerator(get_stats_size, dim_rows, dim_cols, headerRowNames=headerRowNames,
# title="Average sizes of best of runs (number of nodes)",
# color_scheme=reporting.color_scheme_yellow,
# default_color_thresholds=(0.0, 100.0, 200.0),
# vertical_border=vb, table_postprocessor=post, table_variants=variants,
# ),
TableGenerator(get_stats_sizeOnlySuccessful, dim_rows, dim_cols, headerRowNames=headerRowNames,
title="Average sizes of best of runs (number of nodes) (only successful)",
color_scheme=reporting.color_scheme_yellow,
default_color_thresholds=(0.0, 100.0, 200.0),
vertical_border=vb, table_postprocessor=post, table_variants=variants,
),
]
subsects_main = []
for t in tables:
tup = (t.title, t.apply(props), t.color_scheme)
subsects_main.append(tup)
return reporting.Subsection("Shared Statistics", get_content_of_subsections(subsects_main))
def create_subsection_cdgp_specific(props, dim_rows, dim_cols, headerRowNames):
vb = 1 # vertical border
variants = [p_benchmarkNumTests_equalTo("3"), p_benchmarkNumTests_equalTo("5"), p_benchmarkNumTests_equalTo("10")]
print("AVG BEST-OF-RUN FITNESS (MSE)")
latex_avgBestOfRunFitness = create_single_table_bundle(props, dim_rows, dim_cols, get_avg_mse, headerRowNames,
cv0=0.0, cv1=0.5, cv2=1.0, tableVariants=variants)
# text = post(
# printer.latex_table(props, dim_rows, dim_cols, get_avg_mse, layered_headline=True, vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgBestOfRunFitness = printer.table_color_map(text, 0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh")
print("AVG TOTAL TESTS")
latex_avgTotalTests = create_single_table_bundle(props, dim_rows, dim_cols, get_avg_totalTests, headerRowNames,
cv0=0.0, cv1=1000.0, cv2=2000.0, tableVariants=variants)
# text = post(
# printer.latex_table(props, dim_rows, dim_cols, get_avg_totalTests, layered_headline=True, vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgTotalTests = printer.table_color_map(text, 0.0, 1000.0, 2000.0, "colorLow", "colorMedium", "colorHigh")
# print("AVG RUNTIME PER PROGRAM")
# text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_runtimePerProgram, layered_headline=True,
# vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgRuntimePerProgram = printer.table_color_map(text, 0.01, 1.0, 2.0, "colorLow", "colorMedium", "colorHigh")
print("AVG GENERATION")
latex_avgGeneration = create_single_table_bundle(props, dim_rows, dim_cols, get_avg_generation, headerRowNames,
cv0=0.0, cv1=100.0, cv2=200.0, tableVariants=variants)
# text = post(
# printer.latex_table(props, dim_rows, dim_cols, get_avg_generation, layered_headline=True, vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgGeneration = printer.table_color_map(text, 0.0, 50.0, 100.0, "colorLow", "colorMedium", "colorHigh")
# print("AVG EVALUATED SOLUTIONS")
# text = post(
# printer.latex_table(props, dim_rows, dim_cols, get_avg_evaluated, layered_headline=True, vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgEvaluated = printer.table_color_map(text, 500.0, 25000.0, 100000.0, "colorLow", "colorMedium", "colorHigh")
# print("AVG EVALUATED SOLUTIONS (SUCCESSFUL)")
# text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_evaluatedSuccessful, layered_headline=True,
# vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgEvaluatedSuccessful = printer.table_color_map(text, 500.0, 25000.0, 100000.0, "colorLow", "colorMedium",
# "colorHigh")
print("MAX SOLVER TIME")
latex_maxSolverTimes = create_single_table_bundle(props, dim_rows, dim_cols, get_stats_maxSolverTime, headerRowNames,
cv0=0.0, cv1=0.5, cv2=1.0, tableVariants=variants)
# text = post(printer.latex_table(props, dim_rows, dim_cols, get_stats_maxSolverTime, layered_headline=True,
# vertical_border=vb, headerRowNames=headerRowNames))
# latex_maxSolverTimes = printer.table_color_map(text, 0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh")
print("AVG SOLVER TIME")
latex_avgSolverTimes = create_single_table_bundle(props, dim_rows, dim_cols, get_stats_avgSolverTime, headerRowNames,
cv0=0.0, cv1=0.015, cv2=0.03, tableVariants=variants)
# text = post(printer.latex_table(props, dim_rows, dim_cols, get_stats_avgSolverTime, layered_headline=True,
# vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgSolverTimes = printer.table_color_map(text, 0.0, 0.015, 0.03, "colorLow", "colorMedium", "colorHigh")
print("AVG NUM SOLVER CALLS")
latex_avgSolverTotalCalls = create_single_table_bundle(props, dim_rows, dim_cols, get_avgSolverTotalCalls, headerRowNames,
cv0=1e1, cv1=1e2, cv2=1e4, tableVariants=variants)
# text = post(printer.latex_table(props, dim_rows, dim_cols, get_avgSolverTotalCalls, layered_headline=True,
# vertical_border=vb, headerRowNames=headerRowNames))
# latex_avgSolverTotalCalls = printer.table_color_map(text, 1e1, 1e2, 1e4, "colorLow", "colorMedium", "colorHigh")
print("NUM SOLVER CALLS > 0.5s")
latex_numSolverCallsOverXs = create_single_table_bundle(props, dim_rows, dim_cols, get_numSolverCallsOverXs, headerRowNames,
cv0=0, cv1=50, cv2=100, tableVariants=variants)
# text = post(printer.latex_table(props, dim_rows, dim_cols, get_numSolverCallsOverXs, layered_headline=True,
# vertical_border=vb, headerRowNames=headerRowNames))
# latex_numSolverCallsOverXs = printer.table_color_map(text, 0, 50, 100, "colorLow", "colorMedium", "colorHigh")
subsects_cdgp = [
("Average best-of-run MSE", latex_avgBestOfRunFitness, reporting.color_scheme_green),
("Average sizes of $T_C$ (total tests in run)", latex_avgTotalTests, reporting.color_scheme_blue),
("Average generation (all)", latex_avgGeneration, reporting.color_scheme_teal),
#("Average generation (only successful)", latex_avgGenerationSuccessful, reporting.color_scheme_teal),
# ("Average evaluated solutions", latex_avgEvaluated, reporting.color_scheme_teal),
# ("Average evaluated solutions (only successful)", latex_avgEvaluatedSuccessful, reporting.color_scheme_teal),
# ("Approximate average runtime per program [s]", latex_avgRuntimePerProgram, reporting.color_scheme_brown),
("Max solver time per query [s]", latex_maxSolverTimes, reporting.color_scheme_violet),
("Avg solver time per query [s]", latex_avgSolverTimes, reporting.color_scheme_brown),
("Avg number of solver calls (in thousands; 1=1000)", latex_avgSolverTotalCalls, reporting.color_scheme_blue),
("Number of solver calls $>$ 0.5s", latex_numSolverCallsOverXs, reporting.color_scheme_blue),
]
return reporting.Subsection("CDGP Statistics", get_content_of_subsections(subsects_cdgp))
_prev_props = None
def prepare_report(sects, fname, exp_prefix, simple_bench_names=True, print_status_matrix=True, reuse_props=False,
paperwidth=75, include_all_row=True, dim_cols_listings=None):
"""Creating nice LaTeX report of the results."""
global _prev_props # used in case reuse_props was set to True
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth))
latex_sects = []
for title, desc, folders, subs, figures in sects:
print("\nLoading props for: " + title)
print("Scanned folders:")
for f in folders:
print(f)
# Load props
if reuse_props:
props = _prev_props
else:
props = load_correct_props(folders)
_prev_props = props
print("\nFiltered Info:")
for p in props:
if "result.best.verificationDecision" not in p:
print("file: {0}".format(p["thisFileName"]))
# Automatically detect benchmarks used
dim_benchmarks = get_benchmarks_from_props(props, simple_names=simple_bench_names)
if print_status_matrix:
d = dim_benchmarks * (dim_methodGP * dim_sel * dim_evoMode + dim_methodCDGP * dim_sel * dim_evoMode * dim_testsRatio)
matrix = produce_status_matrix(d, props)
print("\n****** Status matrix:")
print(matrix + "\n")
print("Saving status matrix to file: {0}".format(STATUS_FILE_NAME))
utils.save_to_file(STATUS_FILE_NAME, matrix)
dim_rows = dim_benchmarks #.sort()
if include_all_row:
dim_rows += dim_true
if dim_cols_listings is not None:
save_listings(props, dim_rows, dim_cols_listings)
subsects = []
for fun, args in subs:
if args[0] is None: # no dimensions for rows, take benchmarks as the default
args[0] = dim_rows
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section_and_plots(title, desc, props, subsects, figures, exp_prefix)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(fname)
os.chdir(cwd)
def prepare_report_for_dims(props, dim_rows, dim_cols, sects, fname, exp_prefix,
print_status_matrix=True, paperwidth=75, include_all_row=True,
dim_cols_listings=None):
"""Creating a LaTeX report of the results, where in each table data are presented along the
sam dimensions."""
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth))
# dim_rows = dim_rows.sort()
if include_all_row:
dim_rows += dim_true
latex_sects = []
for title, desc, folders, subs, figures in sects:
if print_status_matrix:
d = dim_rows * dim_cols
matrix = produce_status_matrix(d, props)
print("\n****** Status matrix:")
print(matrix + "\n")
print("Saving status matrix to file: {0}".format(STATUS_FILE_NAME))
utils.save_to_file(STATUS_FILE_NAME, matrix)
if dim_cols_listings is not None:
save_listings(props, dim_rows, dim_cols_listings)
subsects = []
for fun, args in subs:
if args[0] is None: # no dimensions for rows, take benchmarks as the default
args[0] = dim_rows
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section_and_plots(title, desc, props, subsects, figures, exp_prefix)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(fname)
os.chdir(cwd)
def reports_exp3():
# folders = ["EXP3_aut"]
folders = ["exp3_final"]
title = "Experiments for regression CDGP (stop: 0.5h)"
desc = r""""""
dim_cols = (dim_methodGP * dim_all + dim_methodCDGP * dim_testsRatio) * dim_optThreshold
headerRowNames = ["", r"$\alpha$", "tolerance"]
subs = [
(create_subsection_shared_stats, [None, dim_cols, 25, headerRowNames]),
(create_subsection_cdgp_specific, [None, dim_cols, headerRowNames]),
(create_subsection_aggregation_tests, [None, dim_cols, headerRowNames]),
]
figures = [
"figures/ratioMSE.pdf"
# "figures/e0_ratioEvaluated_correctVsAllRuns.pdf",
# "figures/e0_ratioTime_correctVsAllCorrect.pdf",
# "figures/e0_ratioTime_endedVsAllEnded.pdf"
]
sects = [(title, desc, folders, subs, figures)]
prepare_report(sects, "cdgp_r_exp3.tex", "e3", paperwidth=55, include_all_row=True, dim_cols_listings=dim_cols)
# props = load_correct_props(folders)
# dim_rows = get_benchmarks_from_props(props, simple_names=True) * dim_benchmarkNumTests
# prepare_report_for_dims(props, dim_rows, dim_cols, sects, "cdgp_r_exp3_dims.tex", "e3_dims", paperwidth=40, include_all_row=True, dim_cols_listings=dim_cols)
if __name__ == "__main__":
utils.ensure_clear_dir("results/")
utils.ensure_dir("results/figures/")
utils.ensure_dir("results/listings/")
# utils.ensure_dir("results/tables/")
utils.ensure_dir("results/listings/errors/")
reports_exp3()
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import abc
import binascii
import logging
logger = logging.getLogger("gdbstub")
class GdbStub(abc.ABC):
def __init__(self, logfile, elffile):
self.logfile = logfile
self.elffile = elffile
self.socket = None
self.gdb_signal = None
mem_regions = list()
for r in logfile.get_memory_regions():
mem_regions.append(r)
for r in elffile.get_memory_regions():
mem_regions.append(r)
self.mem_regions = mem_regions
def get_gdb_packet(self):
socket = self.socket
if socket is None:
return None
data = b''
checksum = 0
# Wait for '$'
while True:
ch = socket.recv(1)
if ch == b'$':
break
# Get a full packet
while True:
ch = socket.recv(1)
if ch == b'#':
# End of packet
break
checksum += ord(ch)
data += ch
# Get checksum (2-bytes)
ch = socket.recv(2)
in_chksum = ord(binascii.unhexlify(ch))
logger.debug(f"Received GDB packet: {data}")
if (checksum % 256) == in_chksum:
# ACK
logger.debug("ACK")
socket.send(b'+')
return data
else:
# NACK
logger.debug(f"NACK (checksum {in_chksum} != {checksum}")
socket.send(b'-')
return None
def put_gdb_packet(self, data):
socket = self.socket
if socket is None:
return
checksum = 0
for d in data:
checksum += d
pkt = b'$' + data + b'#'
checksum = checksum % 256
pkt += format(checksum, "02X").encode()
logger.debug(f"Sending GDB packet: {pkt}")
socket.send(pkt)
def handle_signal_query_packet(self):
# the '?' packet
pkt = b'S'
pkt += format(self.gdb_signal, "02X").encode()
self.put_gdb_packet(pkt)
@abc.abstractmethod
def handle_register_group_read_packet(self):
# the 'g' packet for reading a group of registers
pass
def handle_register_group_write_packet(self):
# the 'G' packet for writing to a group of registers
#
# We don't support writing so return error
self.put_gdb_packet(b"E01")
def handle_register_single_read_packet(self, pkt):
# the 'p' packet for reading a single register
self.put_gdb_packet(b"E01")
def handle_register_single_write_packet(self, pkt):
# the 'P' packet for writing to registers
#
# We don't support writing so return error
self.put_gdb_packet(b"E01")
def handle_memory_read_packet(self, pkt):
# the 'm' packet for reading memory: m<addr>,<len>
def get_mem_region(addr):
for r in self.mem_regions:
if r['start'] <= addr <= r['end']:
return r
return None
# extract address and length from packet
# and convert them into usable integer values
str_addr, str_length = pkt[1:].split(b',')
s_addr = int(b'0x' + str_addr, 16)
length = int(b'0x' + str_length, 16)
# FIXME: Need more efficient way of extracting memory content
remaining = length
addr = s_addr
barray = b''
r = get_mem_region(addr)
while remaining > 0:
if r is None:
barray = None
break
if addr > r['end']:
r = get_mem_region(addr)
continue
offset = addr - r['start']
barray += r['data'][offset:offset+1]
addr += 1
remaining -= 1
if barray is not None:
pkt = binascii.hexlify(barray)
self.put_gdb_packet(pkt)
else:
self.put_gdb_packet(b"E01")
def handle_memory_write_packet(self, pkt):
# the 'M' packet for writing to memory
#
# We don't support writing so return error
self.put_gdb_packet(b"E02")
def handle_general_query_packet(self, pkt):
self.put_gdb_packet(b'')
def run(self, socket):
self.socket = socket
while True:
pkt = self.get_gdb_packet()
if pkt is None:
continue
pkt_type = pkt[0:1]
logger.debug(f"Got packet type: {pkt_type}")
if pkt_type == b'?':
self.handle_signal_query_packet()
elif pkt_type in (b'C', b'S'):
# Continue/stepping execution, which is not supported.
# So signal exception again
self.handle_signal_query_packet()
elif pkt_type == b'g':
self.handle_register_group_read_packet()
elif pkt_type == b'G':
self.handle_register_group_write_packet()
elif pkt_type == b'p':
self.handle_register_single_read_packet(pkt)
elif pkt_type == b'P':
self.handle_register_single_write_packet(pkt)
elif pkt_type == b'm':
self.handle_memory_read_packet(pkt)
elif pkt_type == b'M':
self.handle_memory_write_packet(pkt)
elif pkt_type == b'q':
self.handle_general_query_packet(pkt)
elif pkt_type == b'k':
# GDB quits
break
else:
self.put_gdb_packet(b'')
|
|
#!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""Rally testcases implementation."""
from __future__ import division
from __future__ import print_function
import fileinput
import json
import logging
import os
import re
import shutil
import subprocess
import time
import pkg_resources
import prettytable
from ruamel.yaml import YAML
import six
from six.moves import configparser
from xtesting.core import testcase
import yaml
from functest.core import singlevm
from functest.utils import config
from functest.utils import env
from functest.utils import functest_utils
LOGGER = logging.getLogger(__name__)
class RallyBase(singlevm.VmReady2):
"""Base class form Rally testcases implementation."""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
stests = ['authenticate', 'glance', 'cinder', 'gnocchi', 'heat',
'keystone', 'neutron', 'nova', 'quotas', 'swift', 'barbican',
'vm']
rally_conf_path = "/etc/rally/rally.conf"
rally_aar4_patch_path = pkg_resources.resource_filename(
'functest', 'ci/rally_aarch64_patch.conf')
rally_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally')
rally_scenario_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally/scenario')
template_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally/scenario/templates')
support_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally/scenario/support')
users_amount = 2
tenants_amount = 3
iterations_amount = 10
concurrency = 4
volume_version = 3
volume_service_type = "volumev3"
blacklist_file = os.path.join(rally_dir, "blacklist.yaml")
task_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), 'task')
temp_dir = os.path.join(task_dir, 'var')
visibility = 'public'
shared_network = True
task_timeout = 3600
username = 'cirros'
def __init__(self, **kwargs):
"""Initialize RallyBase object."""
super().__init__(**kwargs)
assert self.orig_cloud
assert self.project
if self.orig_cloud.get_role("admin"):
role_name = "admin"
elif self.orig_cloud.get_role("Admin"):
role_name = "Admin"
else:
raise Exception("Cannot detect neither admin nor Admin")
self.orig_cloud.grant_role(
role_name, user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
self.results_dir = os.path.join(
getattr(config.CONF, 'dir_results'), self.case_name)
self.task_file = ''
self.creators = []
self.summary = []
self.scenario_dir = ''
self.smoke = None
self.start_time = None
self.result = None
self.compute_cnt = 0
self.flavor_alt = None
self.tests = []
self.run_cmd = ''
self.network_extensions = []
self.services = []
def build_task_args(self, test_name):
"""Build arguments for the Rally task."""
task_args = {'service_list': [test_name]}
task_args['image_name'] = str(self.image.name)
task_args['flavor_name'] = str(self.flavor.name)
task_args['flavor_alt_name'] = str(self.flavor_alt.name)
task_args['glance_image_location'] = str(self.filename)
task_args['glance_image_format'] = str(self.image_format)
task_args['tmpl_dir'] = str(self.template_dir)
task_args['sup_dir'] = str(self.support_dir)
task_args['users_amount'] = self.users_amount
task_args['tenants_amount'] = self.tenants_amount
task_args['use_existing_users'] = False
task_args['iterations'] = self.iterations_amount
task_args['concurrency'] = self.concurrency
task_args['smoke'] = self.smoke
task_args['volume_version'] = self.volume_version
task_args['volume_service_type'] = self.volume_service_type
task_args['block_migration'] = env.get("BLOCK_MIGRATION").lower()
task_args['username'] = self.username
if self.ext_net:
task_args['floating_network'] = str(self.ext_net.name)
else:
task_args['floating_network'] = ''
if self.network:
task_args['netid'] = str(self.network.id)
else:
LOGGER.warning(
'No tenant network created. '
'Trying EXTERNAL_NETWORK as a fallback')
if env.get("EXTERNAL_NETWORK"):
network = self.cloud.get_network(env.get("EXTERNAL_NETWORK"))
task_args['netid'] = str(network.id) if network else ''
else:
task_args['netid'] = ''
return task_args
def _prepare_test_list(self, test_name):
"""Build the list of test cases to be executed."""
test_yaml_file_name = f'opnfv-{test_name}.yaml'
scenario_file_name = os.path.join(self.rally_scenario_dir,
test_yaml_file_name)
if not os.path.exists(scenario_file_name):
scenario_file_name = os.path.join(self.scenario_dir,
test_yaml_file_name)
if not os.path.exists(scenario_file_name):
raise Exception(
f"The scenario '{scenario_file_name}' does not exist.")
LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
test_file_name = os.path.join(self.temp_dir, test_yaml_file_name)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
self.apply_blacklist(scenario_file_name, test_file_name)
return test_file_name
@staticmethod
def get_verifier_deployment_id():
"""
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) as proc:
deployment_uuid = proc.stdout.readline().rstrip()
return deployment_uuid.decode("utf-8")
@staticmethod
def create_rally_deployment(environ=None):
# pylint: disable=unexpected-keyword-arg
"""Create new rally deployment"""
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config...")
with open(
RallyBase.rally_aar4_patch_path, "r",
encoding='utf-8') as pfile:
rally_patch_conf = pfile.read()
for line in fileinput.input(RallyBase.rally_conf_path):
print(line, end=' ')
if "cirros|testvm" in line:
print(rally_patch_conf)
LOGGER.info("Creating Rally environment...")
try:
cmd = ['rally', 'deployment', 'destroy',
'--deployment',
str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
except subprocess.CalledProcessError:
pass
cmd = ['rally', 'deployment', 'create', '--fromenv',
'--name', str(getattr(config.CONF, 'rally_deployment_name'))]
output = subprocess.check_output(cmd, env=environ)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
cmd = ['rally', 'deployment', 'check']
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
return RallyBase.get_verifier_deployment_id()
@staticmethod
def update_keystone_default_role(rally_conf='/etc/rally/rally.conf'):
"""Set keystone_default_role in rally.conf"""
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
if not rconfig.has_section('openstack'):
rconfig.add_section('openstack')
rconfig.set(
'openstack', 'keystone_default_role', env.get("NEW_USER_ROLE"))
with open(rally_conf, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file)
@staticmethod
def clean_rally_conf(rally_conf='/etc/rally/rally.conf'):
"""Clean Rally config"""
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
if rconfig.has_option('openstack', 'keystone_default_role'):
rconfig.remove_option('openstack', 'keystone_default_role')
with open(rally_conf, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file)
@staticmethod
def get_task_id(tag):
"""
Get task id from command rally result.
:param tag:
:return: task_id as string
"""
cmd = ["rally", "task", "list", "--tag", tag, "--uuids-only"]
output = subprocess.check_output(cmd).decode("utf-8").rstrip()
LOGGER.info("%s: %s", " ".join(cmd), output)
return output
@staticmethod
def task_succeed(json_raw):
"""
Parse JSON from rally JSON results.
:param json_raw:
:return: Bool
"""
rally_report = json.loads(json_raw)
tasks = rally_report.get('tasks')
if tasks:
for task in tasks:
if task.get('status') != 'finished' or \
task.get('pass_sla') is not True:
return False
else:
return False
return True
def _migration_supported(self):
"""Determine if migration is supported."""
if self.compute_cnt > 1:
return True
return False
def _network_trunk_supported(self):
"""Determine if network trunk service is available"""
if 'trunk' in self.network_extensions:
return True
return False
@staticmethod
def excl_scenario():
"""Exclude scenario."""
black_tests = []
try:
with open(
RallyBase.blacklist_file, 'r',
encoding='utf-8') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
deploy_scenario = env.get('DEPLOY_SCENARIO')
if (bool(deploy_scenario) and
'scenario' in black_list_yaml.keys()):
for item in black_list_yaml['scenario']:
scenarios = item['scenarios']
in_it = RallyBase.in_iterable_re
if in_it(deploy_scenario, scenarios):
tests = item['tests']
black_tests.extend(tests)
except Exception: # pylint: disable=broad-except
LOGGER.debug("Scenario exclusion not applied.")
return black_tests
@staticmethod
def in_iterable_re(needle, haystack):
"""
Check if given needle is in the iterable haystack, using regex.
:param needle: string to be matched
:param haystack: iterable of strings (optionally regex patterns)
:return: True if needle is eqial to any of the elements in haystack,
or if a nonempty regex pattern in haystack is found in needle.
"""
# match without regex
if needle in haystack:
return True
for pattern in haystack:
# match if regex pattern is set and found in the needle
if pattern and re.search(pattern, needle) is not None:
return True
return False
def excl_func(self):
"""Exclude functionalities."""
black_tests = []
func_list = []
try:
with open(
RallyBase.blacklist_file, 'r',
encoding='utf-8') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
if env.get('BLOCK_MIGRATION').lower() == 'true':
func_list.append("block_migration")
if not self._migration_supported():
func_list.append("no_migration")
if not self._network_trunk_supported():
func_list.append("no_net_trunk_service")
if not self.ext_net:
func_list.append("no_floating_ip")
if 'functionality' in black_list_yaml.keys():
for item in black_list_yaml['functionality']:
functions = item['functions']
for func in func_list:
if func in functions:
tests = item['tests']
black_tests.extend(tests)
except Exception: # pylint: disable=broad-except
LOGGER.debug("Functionality exclusion not applied.")
return black_tests
def apply_blacklist(self, case_file_name, result_file_name):
"""Apply blacklist."""
LOGGER.debug("Applying blacklist...")
with open(case_file_name, 'r', encoding='utf-8') as cases_file, open(
result_file_name, 'w', encoding='utf-8') as result_file:
black_tests = list(set(self.excl_func() + self.excl_scenario()))
if black_tests:
LOGGER.debug("Blacklisted tests: %s", str(black_tests))
include = True
for cases_line in cases_file:
if include:
for black_tests_line in black_tests:
if re.search(black_tests_line,
cases_line.strip().rstrip(':')):
include = False
break
else:
result_file.write(str(cases_line))
else:
if cases_line.isspace():
include = True
@staticmethod
def file_is_empty(file_name):
"""Determine is a file is empty."""
try:
if os.stat(file_name).st_size > 0:
return False
except Exception: # pylint: disable=broad-except
pass
return True
def _save_results(self, test_name, task_id):
""" Generate and save task execution results"""
# check for result directory and create it otherwise
if not os.path.exists(self.results_dir):
LOGGER.debug('%s does not exist, we create it.',
self.results_dir)
os.makedirs(self.results_dir)
# put detailed result to log
cmd = (["rally", "task", "detailed", "--uuid", task_id])
LOGGER.debug('running command: %s', cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
# save report as JSON
report_json_name = f'{test_name}.json'
report_json_dir = os.path.join(self.results_dir, report_json_name)
cmd = (["rally", "task", "report", "--json", "--uuid", task_id,
"--out", report_json_dir])
LOGGER.debug('running command: %s', cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
with open(report_json_dir, encoding='utf-8') as json_file:
json_results = json_file.read()
self._append_summary(json_results, test_name)
# parse JSON operation result
if self.task_succeed(json_results):
LOGGER.info('Test scenario: "%s" OK.', test_name)
else:
LOGGER.info('Test scenario: "%s" Failed.', test_name)
def run_task(self, test_name):
"""Run a task."""
LOGGER.info('Starting test scenario "%s" ...', test_name)
LOGGER.debug('running command: %s', self.run_cmd)
if six.PY3:
subprocess.call(
self.run_cmd, timeout=self.task_timeout,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
with open(os.devnull, 'wb') as devnull:
subprocess.call(self.run_cmd, stdout=devnull, stderr=devnull)
task_id = self.get_task_id(test_name)
LOGGER.debug('task_id : %s', task_id)
if not task_id:
LOGGER.error("Failed to retrieve task_id")
raise Exception("Failed to retrieve task id")
self._save_results(test_name, task_id)
def _append_summary(self, json_raw, test_name):
# pylint: disable=too-many-locals
"""Update statistics summary info."""
nb_tests = 0
nb_success = 0
overall_duration = 0.0
success = []
failures = []
rally_report = json.loads(json_raw)
for task in rally_report.get('tasks'):
for subtask in task.get('subtasks'):
has_errors = False
for workload in subtask.get('workloads'):
if workload.get('full_duration'):
overall_duration += workload.get('full_duration')
if workload.get('data'):
nb_tests += len(workload.get('data'))
for result in workload.get('data'):
if not result.get('error'):
nb_success += 1
else:
has_errors = True
if has_errors:
failures.append(subtask['title'])
else:
success.append(subtask['title'])
scenario_summary = {'test_name': test_name,
'overall_duration': overall_duration,
'nb_tests': nb_tests,
'nb_success': nb_success,
'success': success,
'failures': failures,
'task_status': self.task_succeed(json_raw)}
self.summary.append(scenario_summary)
def prepare_run(self, **kwargs):
"""Prepare resources needed by test scenarios."""
assert self.cloud
LOGGER.debug('Validating run tests...')
for test in kwargs.get('tests', self.stests):
if test in self.stests:
self.tests.append(test)
else:
raise Exception(f"Test name '{test}' is invalid")
if not os.path.exists(self.task_dir):
os.makedirs(self.task_dir)
task = os.path.join(self.rally_dir, 'task.yaml')
if not os.path.exists(task):
LOGGER.error("Task file '%s' does not exist.", task)
raise Exception(f"Task file '{task}' does not exist.")
self.task_file = os.path.join(self.task_dir, 'task.yaml')
shutil.copyfile(task, self.task_file)
task_macro = os.path.join(self.rally_dir, 'macro')
if not os.path.exists(task_macro):
LOGGER.error("Task macro dir '%s' does not exist.", task_macro)
raise Exception(f"Task macro dir '{task_macro}' does not exist.")
macro_dir = os.path.join(self.task_dir, 'macro')
if os.path.exists(macro_dir):
shutil.rmtree(macro_dir)
shutil.copytree(task_macro, macro_dir)
self.update_keystone_default_role()
self.compute_cnt = self.count_hypervisors()
self.network_extensions = self.cloud.get_network_extensions()
self.flavor_alt = self.create_flavor_alt()
self.services = [service.name for service in
functest_utils.list_services(self.cloud)]
LOGGER.debug("flavor: %s", self.flavor_alt)
def prepare_task(self, test_name):
"""Prepare resources for test run."""
file_name = self._prepare_test_list(test_name)
if self.file_is_empty(file_name):
LOGGER.info('No tests for scenario "%s"', test_name)
return False
self.run_cmd = (["rally", "task", "start", "--tag", test_name,
"--abort-on-sla-failure",
"--task", self.task_file, "--task-args",
str(self.build_task_args(test_name))])
return True
def run_tests(self, **kwargs):
"""Execute tests."""
optional = kwargs.get('optional', [])
for test in self.tests:
if test in self.services or test not in optional:
if self.prepare_task(test):
self.run_task(test)
def _generate_report(self):
"""Generate test execution summary report."""
total_duration = 0.0
total_nb_tests = 0
total_nb_success = 0
nb_modules = 0
payload = []
res_table = prettytable.PrettyTable(
padding_width=2,
field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])
res_table.align['Module'] = "l"
res_table.align['Duration'] = "r"
res_table.align['Success'] = "r"
# for each scenario we draw a row for the table
for item in self.summary:
if item['task_status'] is True:
nb_modules += 1
total_duration += item['overall_duration']
total_nb_tests += item['nb_tests']
total_nb_success += item['nb_success']
try:
success_avg = 100 * item['nb_success'] / item['nb_tests']
except ZeroDivisionError:
success_avg = 0
success_str = f"{success_avg:0.2f}%"
duration_str = time.strftime("%H:%M:%S",
time.gmtime(item['overall_duration']))
res_table.add_row([item['test_name'], duration_str,
item['nb_tests'], success_str])
payload.append({'module': item['test_name'],
'details': {'duration': item['overall_duration'],
'nb tests': item['nb_tests'],
'success rate': success_str,
'success': item['success'],
'failures': item['failures']}})
total_duration_str = time.strftime("%H:%M:%S",
time.gmtime(total_duration))
try:
self.result = 100 * total_nb_success / total_nb_tests
except ZeroDivisionError:
self.result = 100
success_rate = f"{self.result:0.2f}"
success_rate_str = str(success_rate) + '%'
res_table.add_row(["", "", "", ""])
res_table.add_row(["TOTAL:", total_duration_str, total_nb_tests,
success_rate_str])
LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
LOGGER.info("Rally '%s' success_rate is %s%% in %s/%s modules",
self.case_name, success_rate, nb_modules,
len(self.summary))
self.details['summary'] = {'duration': total_duration,
'nb tests': total_nb_tests,
'nb success': success_rate}
self.details["modules"] = payload
@staticmethod
def export_task(file_name, export_type="html"):
"""Export all task results (e.g. html or xunit report)
Raises:
subprocess.CalledProcessError: if Rally doesn't return 0
Returns:
None
"""
cmd = ["rally", "task", "export", "--type", export_type,
"--deployment",
str(getattr(config.CONF, 'rally_deployment_name')),
"--to", file_name]
LOGGER.debug('running command: %s', cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
@staticmethod
def verify_report(file_name, uuid, export_type="html"):
"""Generate the verifier report (e.g. html or xunit report)
Raises:
subprocess.CalledProcessError: if Rally doesn't return 0
Returns:
None
"""
cmd = ["rally", "verify", "report", "--type", export_type,
"--uuid", uuid, "--to", file_name]
LOGGER.debug('running command: %s', cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
def clean(self):
"""Cleanup of OpenStack resources. Should be called on completion."""
self.clean_rally_conf()
self.clean_rally_logs()
if self.flavor_alt:
self.orig_cloud.delete_flavor(self.flavor_alt.id)
super().clean()
def is_successful(self):
"""The overall result of the test."""
for item in self.summary:
if item['task_status'] is False:
return testcase.TestCase.EX_TESTCASE_FAILED
return super().is_successful()
@staticmethod
def update_rally_logs(res_dir, rally_conf='/etc/rally/rally.conf'):
"""Print rally logs in res dir"""
if not os.path.exists(res_dir):
os.makedirs(res_dir)
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
rconfig.set('DEFAULT', 'debug', True)
rconfig.set('DEFAULT', 'use_stderr', False)
rconfig.set('DEFAULT', 'log-file', 'rally.log')
rconfig.set('DEFAULT', 'log_dir', res_dir)
with open(rally_conf, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file)
@staticmethod
def clean_rally_logs(rally_conf='/etc/rally/rally.conf'):
"""Clean Rally config"""
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
if rconfig.has_option('DEFAULT', 'use_stderr'):
rconfig.remove_option('DEFAULT', 'use_stderr')
if rconfig.has_option('DEFAULT', 'debug'):
rconfig.remove_option('DEFAULT', 'debug')
if rconfig.has_option('DEFAULT', 'log-file'):
rconfig.remove_option('DEFAULT', 'log-file')
if rconfig.has_option('DEFAULT', 'log_dir'):
rconfig.remove_option('DEFAULT', 'log_dir')
with open(rally_conf, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file)
def run(self, **kwargs):
"""Run testcase."""
self.start_time = time.time()
try:
assert super().run(
**kwargs) == testcase.TestCase.EX_OK
self.update_rally_logs(self.res_dir)
self.create_rally_deployment(environ=self.project.get_environ())
self.prepare_run(**kwargs)
self.run_tests(**kwargs)
self._generate_report()
self.export_task(
f"{self.results_dir}/{self.case_name}.html")
self.export_task(
f"{self.results_dir}/{self.case_name}.xml",
export_type="junit-xml")
res = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
LOGGER.exception('Error with run:')
self.result = 0
res = testcase.TestCase.EX_RUN_ERROR
self.stop_time = time.time()
return res
class RallySanity(RallyBase):
"""Rally sanity testcase implementation."""
def __init__(self, **kwargs):
"""Initialize RallySanity object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "rally_sanity"
super().__init__(**kwargs)
self.smoke = True
self.scenario_dir = os.path.join(self.rally_scenario_dir, 'sanity')
class RallyFull(RallyBase):
"""Rally full testcase implementation."""
task_timeout = 7200
def __init__(self, **kwargs):
"""Initialize RallyFull object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "rally_full"
super().__init__(**kwargs)
self.smoke = False
self.scenario_dir = os.path.join(self.rally_scenario_dir, 'full')
class RallyJobs(RallyBase):
"""Rally OpenStack CI testcase implementation."""
stests = ["neutron"]
task_timeout = 7200
def __init__(self, **kwargs):
"""Initialize RallyJobs object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "rally_jobs"
super().__init__(**kwargs)
self.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')
self.task_yaml = None
def prepare_run(self, **kwargs):
"""Create resources needed by test scenarios."""
super().prepare_run(**kwargs)
with open(
os.path.join(self.rally_dir, 'rally_jobs.yaml'),
'r', encoding='utf-8') as task_file:
self.task_yaml = yaml.safe_load(task_file)
for task in self.task_yaml:
if task not in self.tests:
raise Exception(f"Test '{task}' not in '{self.tests}'")
def apply_blacklist(self, case_file_name, result_file_name):
# pylint: disable=too-many-branches
"""Apply blacklist."""
LOGGER.debug("Applying blacklist...")
black_tests = list(set(self.excl_func() +
self.excl_scenario()))
if black_tests:
LOGGER.debug("Blacklisted tests: %s", str(black_tests))
template = YAML(typ='jinja2')
with open(case_file_name, 'r', encoding='utf-8') as fname:
cases = template.load(fname)
if cases.get("version", 1) == 1:
# scenarios in dictionary
for name in cases.keys():
if self.in_iterable_re(name, black_tests):
cases.pop(name)
else:
# workloads in subtasks
for sind, subtask in reversed(list(
enumerate(cases.get('subtasks', [])))):
for wind, workload in reversed(list(
enumerate(subtask.get('workloads', [])))):
scenario = workload.get('scenario', {})
for name in scenario.keys():
if self.in_iterable_re(name, black_tests):
cases['subtasks'][sind]['workloads'].pop(wind)
break
if 'workloads' in cases['subtasks'][sind]:
if not cases['subtasks'][sind]['workloads']:
cases['subtasks'].pop(sind)
# scenarios in subtasks
for sind, subtask in reversed(list(
enumerate(cases.get('subtasks', [])))):
scenario = subtask.get('scenario', {})
for name in scenario.keys():
if self.in_iterable_re(name, black_tests):
cases['subtasks'].pop(sind)
break
with open(result_file_name, 'w', encoding='utf-8') as fname:
template.dump(cases, fname)
def build_task_args(self, test_name):
"""Build arguments for the Rally task."""
task_args = {}
if self.ext_net:
task_args['floating_network'] = str(self.ext_net.name)
else:
task_args['floating_network'] = ''
task_args['image_name'] = str(self.image.name)
task_args['flavor_name'] = str(self.flavor.name)
return task_args
def prepare_task(self, test_name):
"""Prepare resources for test run."""
jobs_dir = os.path.join(
getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')
task_name = self.task_yaml.get(test_name).get("task")
task = os.path.join(jobs_dir, task_name)
if not os.path.exists(task):
raise Exception(f"The scenario '{task}' does not exist.")
LOGGER.debug('Scenario fetched from : %s', task)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
task_file_name = os.path.join(self.temp_dir, task_name)
self.apply_blacklist(task, task_file_name)
self.run_cmd = (["rally", "task", "start", "--tag", test_name,
"--task", task_file_name,
"--task-args", str(self.build_task_args(test_name))])
return True
|
|
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from django.core.exceptions import (
ImproperlyConfigured, ValidationError, NON_FIELD_ERRORS, FieldError)
from django.forms.fields import Field, ChoiceField
from django.forms.forms import DeclarativeFieldsMetaclass, BaseForm
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput)
from django.utils import six
from django.utils.encoding import smart_text, force_text
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in opts.many_to_many + opts.virtual_fields:
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.concrete_fields + opts.virtual_fields + opts.many_to_many:
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(opts.concrete_fields + sortable_virtual_fields + opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset'):
limit_choices_to = formfield.limit_choices_to
if limit_choices_to is not None:
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instatiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict((o.pk, o) for o in self.get_queryset())
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) get_prep_value.
"""
while field.rel is not None:
field = field.rel.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+', '')
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s.%'."
% (fk_name, parent_model._meta.app_label, parent_model._meta.object_name))
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s.%s' has no field named '%s'."
% (model._meta.app_label, model._meta.object_name, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s.%s' has no ForeignKey to '%s.%s'."
% (model._meta.app_label, model._meta.object_name, parent_model._meta.app_label, parent_model._meta.object_name))
else:
raise ValueError(
"'%s.%s' has more than one ForeignKey to '%s.%s'."
% (model._meta.app_label, model._meta.object_name, parent_model._meta.app_label, parent_model._meta.object_name))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'widgets': widgets,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def _has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def _has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def to_python(self, value):
if not value:
return []
to_py = super(ModelMultipleChoiceField, self).to_python
return [to_py(val) for val in value]
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Implements the distribution corresponding to the loss function.
This library implements the parts of Section 2 of "A General and Adaptive Robust
Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077, that are
required for evaluating the negative log-likelihood (NLL) of the distribution
and for sampling from the distribution.
"""
import numbers
import mpmath
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from robust_loss import cubic_spline
from robust_loss import general
from robust_loss import util
def analytical_base_partition_function(numer, denom):
r"""Accurately approximate the partition function Z(numer / denom).
This uses the analytical formulation of the true partition function Z(alpha),
as described in the paper (the math after Equation 18), where alpha is a
positive rational value numer/denom. This is expensive to compute and not
differentiable, so it's not implemented in TensorFlow and is only used for
unit tests.
Args:
numer: the numerator of alpha, an integer >= 0.
denom: the denominator of alpha, an integer > 0.
Returns:
Z(numer / denom), a double-precision float, accurate to around 9 digits
of precision.
Raises:
ValueError: If `numer` is not a non-negative integer or if `denom` is not
a positive integer.
"""
if not isinstance(numer, numbers.Integral):
raise ValueError('Expected `numer` of type int, but is of type {}'.format(
type(numer)))
if not isinstance(denom, numbers.Integral):
raise ValueError('Expected `denom` of type int, but is of type {}'.format(
type(denom)))
if not numer >= 0:
raise ValueError('Expected `numer` >= 0, but is = {}'.format(numer))
if not denom > 0:
raise ValueError('Expected `denom` > 0, but is = {}'.format(denom))
alpha = numer / denom
# The Meijer-G formulation of the partition function has singularities at
# alpha = 0 and alpha = 2, but at those special cases the partition function
# has simple closed forms which we special-case here.
if alpha == 0:
return np.pi * np.sqrt(2)
if alpha == 2:
return np.sqrt(2 * np.pi)
# Z(n/d) as described in the paper.
a_p = (np.arange(1, numer, dtype=np.float64) / numer).tolist()
b_q = ((np.arange(-0.5, numer - 0.5, dtype=np.float64)) /
numer).tolist() + (np.arange(1, 2 * denom, dtype=np.float64) /
(2 * denom)).tolist()
z = (1. / numer - 1. / (2 * denom))**(2 * denom)
mult = np.exp(np.abs(2 * denom / numer - 1.)) * np.sqrt(
np.abs(2 * denom / numer - 1.)) * (2 * np.pi)**(1 - denom)
return mult * np.float64(mpmath.meijerg([[], a_p], [b_q, []], z))
def partition_spline_curve(alpha):
"""Applies a curve to alpha >= 0 to compress its range before interpolation.
This is a weird hand-crafted function designed to take in alpha values and
curve them to occupy a short finite range that works well when using spline
interpolation to model the partition function Z(alpha). Because Z(alpha)
is only varied in [0, 4] and is especially interesting around alpha=2, this
curve is roughly linear in [0, 4] with a slope of ~1 at alpha=0 and alpha=4
but a slope of ~10 at alpha=2. When alpha > 4 the curve becomes logarithmic.
Some (input, output) pairs for this function are:
[(0, 0), (1, ~1.2), (2, 4), (3, ~6.8), (4, 8), (8, ~8.8), (400000, ~12)]
This function is continuously differentiable.
Args:
alpha: A numpy array or TF tensor (float32 or float64) with values >= 0.
Returns:
An array/tensor of curved values >= 0 with the same type as `alpha`, to be
used as input x-coordinates for spline interpolation.
"""
c = lambda z: tf.cast(z, alpha.dtype)
assert_ops = [tf.Assert(tf.reduce_all(alpha >= 0.), [alpha])]
with tf.control_dependencies(assert_ops):
x = tf.where(alpha < 4, (c(2.25) * alpha - c(4.5)) /
(tf.abs(alpha - c(2)) + c(0.25)) + alpha + c(2),
c(5) / c(18) * util.log_safe(c(4) * alpha - c(15)) + c(8))
return x
def inv_partition_spline_curve(x):
"""The inverse of partition_spline_curve()."""
c = lambda z: tf.cast(z, x.dtype)
assert_ops = [tf.Assert(tf.reduce_all(x >= 0.), [x])]
with tf.control_dependencies(assert_ops):
alpha = tf.where(
x < 8,
c(0.5) * x + tf.where(
x <= 4,
c(1.25) - tf.sqrt(c(1.5625) - x + c(.25) * tf.square(x)),
c(-1.25) + tf.sqrt(c(9.5625) - c(3) * x + c(.25) * tf.square(x))),
c(3.75) + c(0.25) * util.exp_safe(x * c(3.6) - c(28.8)))
return alpha
class Distribution(object):
"""A wrapper class around the distribution."""
def __init__(self):
"""Initialize the distribution.
Load the values, tangents, and x-coordinate scaling of a spline that
approximates the partition function. The spline was produced by running
the script in fit_partition_spline.py.
"""
with util.get_resource_as_file(
'robust_loss/data/partition_spline.npz') as spline_file:
with np.load(spline_file, allow_pickle=False) as f:
self._spline_x_scale = f['x_scale']
self._spline_values = f['values']
self._spline_tangents = f['tangents']
def log_base_partition_function(self, alpha):
r"""Approximate the distribution's log-partition function with a 1D spline.
Because the partition function (Z(\alpha) in the paper) of the distribution
is difficult to model analytically, we approximate it with a (transformed)
cubic hermite spline: Each alpha is pushed through a nonlinearity before
being used to interpolate into a spline, which allows us to use a relatively
small spline to accurately model the log partition function over the range
of all non-negative input values.
Args:
alpha: A tensor or scalar of single or double precision floats containing
the set of alphas for which we would like an approximate log partition
function. Must be non-negative, as the partition function is undefined
when alpha < 0.
Returns:
An approximation of log(Z(alpha)) accurate to within 1e-6
"""
float_dtype = alpha.dtype
# The partition function is undefined when `alpha`< 0.
assert_ops = [tf.Assert(tf.reduce_all(alpha >= 0.), [alpha])]
with tf.control_dependencies(assert_ops):
# Transform `alpha` to the form expected by the spline.
x = partition_spline_curve(alpha)
# Interpolate into the spline.
return cubic_spline.interpolate1d(
x * tf.cast(self._spline_x_scale, float_dtype),
tf.cast(self._spline_values, float_dtype),
tf.cast(self._spline_tangents, float_dtype))
def nllfun(self, x, alpha, scale):
r"""Implements the negative log-likelihood (NLL).
Specifically, we implement -log(p(x | 0, \alpha, c) of Equation 16 in the
paper as nllfun(x, alpha, shape).
Args:
x: The residual for which the NLL is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
Must be a tensorflow tensor or numpy array of floats.
alpha: The shape parameter of the NLL (\alpha in the paper), where more
negative values cause outliers to "cost" more and inliers to "cost"
less. Alpha can be any non-negative value, but the gradient of the NLL
with respect to alpha has singularities at 0 and 2 so you may want to
limit usage to (0, 2) during gradient descent. Must be a tensorflow
tensor or numpy array of floats. Varying alpha in that range allows for
smooth interpolation between a Cauchy distribution (alpha = 0) and a
Normal distribution (alpha = 2) similar to a Student's T distribution.
scale: The scale parameter of the loss. When |x| < scale, the NLL is like
that of a (possibly unnormalized) normal distribution, and when |x| >
scale the NLL takes on a different shape according to alpha. Must be a
tensorflow tensor or numpy array of floats.
Returns:
The NLLs for each element of x, in the same shape as x. This is returned
as a TensorFlow graph node of floats with the same precision as x.
"""
# `scale` and `alpha` must have the same type as `x`.
tf.debugging.assert_type(scale, x.dtype)
tf.debugging.assert_type(alpha, x.dtype)
assert_ops = [
# `scale` must be > 0.
tf.Assert(tf.reduce_all(scale > 0.), [scale]),
# `alpha` must be >= 0.
tf.Assert(tf.reduce_all(alpha >= 0.), [alpha]),
]
with tf.control_dependencies(assert_ops):
loss = general.lossfun(x, alpha, scale, approximate=False)
log_partition = (
tf.math.log(scale) + self.log_base_partition_function(alpha))
nll = loss + log_partition
return nll
def draw_samples(self, alpha, scale):
r"""Draw samples from the robust distribution.
This function implements Algorithm 1 the paper. This code is written to
allow for sampling from a set of different distributions, each parametrized
by its own alpha and scale values, as opposed to the more standard approach
of drawing N samples from the same distribution. This is done by repeatedly
performing N instances of rejection sampling for each of the N distributions
until at least one proposal for each of the N distributions has been
accepted. All samples assume a zero mean --- to get non-zero mean samples,
just add each mean to each sample.
Args:
alpha: A TF tensor/scalar or numpy array/scalar of floats where each
element is the shape parameter of that element's distribution.
scale: A TF tensor/scalar or numpy array/scalar of floats where each
element is the scale parameter of that element's distribution. Must be
the same shape as `alpha`.
Returns:
A TF tensor with the same shape and precision as `alpha` and `scale` where
each element is a sample drawn from the zero-mean distribution specified
for that element by `alpha` and `scale`.
"""
# `scale` must have the same type as `alpha`.
float_dtype = alpha.dtype
tf.debugging.assert_type(scale, float_dtype)
assert_ops = [
# `scale` must be > 0.
tf.Assert(tf.reduce_all(scale > 0.), [scale]),
# `alpha` must be >= 0.
tf.Assert(tf.reduce_all(alpha >= 0.), [alpha]),
# `alpha` and `scale` must have the same shape.
tf.Assert(
tf.reduce_all(tf.equal(tf.shape(alpha), tf.shape(scale))),
[tf.shape(alpha), tf.shape(scale)]),
]
with tf.control_dependencies(assert_ops):
shape = tf.shape(alpha)
# The distributions we will need for rejection sampling. The sqrt(2)
# scaling of the Cauchy distribution corrects for our differing
# conventions for standardization.
cauchy = tfp.distributions.Cauchy(loc=0., scale=tf.sqrt(2.))
uniform = tfp.distributions.Uniform(low=0., high=1.)
def while_cond(_, accepted):
"""Terminate the loop only when all samples have been accepted."""
return ~tf.reduce_all(accepted)
def while_body(samples, accepted):
"""Generate N proposal samples, and then perform rejection sampling."""
# Draw N samples from a Cauchy, our proposal distribution.
cauchy_sample = tf.cast(cauchy.sample(shape), float_dtype)
# Compute the likelihood of each sample under its target distribution.
nll = self.nllfun(cauchy_sample, alpha, tf.cast(1, float_dtype))
# Bound the NLL. We don't use the approximate loss as it may cause
# unpredictable behavior in the context of sampling.
nll_bound = general.lossfun(
cauchy_sample,
tf.cast(0, float_dtype),
tf.cast(1, float_dtype),
approximate=False) + self.log_base_partition_function(alpha)
# Draw N samples from a uniform distribution, and use each uniform
# sample to decide whether or not to accept each proposal sample.
uniform_sample = tf.cast(uniform.sample(shape), float_dtype)
accept = uniform_sample <= tf.math.exp(nll_bound - nll)
# If a sample is accepted, replace its element in `samples` with the
# proposal sample, and set its bit in `accepted` to True.
samples = tf.where(accept, cauchy_sample, samples)
accepted = accept | accepted
return (samples, accepted)
# Initialize the loop. The first item does not matter as it will get
# overwritten, the second item must be all False.
while_loop_vars = (tf.zeros(shape,
float_dtype), tf.zeros(shape, dtype=bool))
# Perform rejection sampling until all N samples have been accepted.
terminal_state = tf.while_loop(
cond=while_cond, body=while_body, loop_vars=while_loop_vars)
# Because our distribution is a location-scale family, we sample from
# p(x | 0, \alpha, 1) and then scale each sample by `scale`.
samples = tf.multiply(terminal_state[0], scale)
return samples
|
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import calendar
import logging
import random
import time
import traceback
from abc import abstractmethod, abstractproperty
from datetime import datetime as dt, datetime
from threading import Thread
from time import sleep
from agora.client.wrapper import Agora
from agora.client.namespaces import AGORA
from concurrent.futures.thread import ThreadPoolExecutor
from rdflib import RDF, RDFS
from redis.lock import Lock
from sdh.curator.daemons.delivery import build_response
from sdh.curator.server import app
from sdh.curator.store import r
from sdh.curator.store.triples import cache, add_stream_triple, load_stream_triples, graph_provider
__author__ = 'Fernando Serena'
log = logging.getLogger('sdh.curator.daemons.fragment')
agora_client = Agora(**app.config['AGORA'])
ON_DEMAND_TH = float(app.config.get('PARAMS', {}).get('on_demand_threshold', 2.0))
MIN_SYNC = int(app.config.get('PARAMS', {}).get('min_sync_time', 10))
N_COLLECTORS = int(app.config.get('PARAMS', {}).get('fragment_collectors', 1))
MAX_CONCURRENT_FRAGMENTS = int(app.config.get('PARAMS', {}).get('max_concurrent_fragments', 8))
COLLECT_THROTTLING = max(1, int(app.config.get('PARAMS', {}).get('collect_throttling', 30)))
log.info("""Fragment daemon setup:
- On-demand threshold: {}
- Minimum sync time: {}
- Maximum concurrent collectors: {}
- Maximum concurrent fragments: {}""".format(ON_DEMAND_TH, MIN_SYNC, N_COLLECTORS,
MAX_CONCURRENT_FRAGMENTS))
thp = ThreadPoolExecutor(max_workers=min(8, MAX_CONCURRENT_FRAGMENTS))
log.info('Cleaning fragment locks...')
fragment_locks = r.keys('*lock*')
for flk in fragment_locks:
r.delete(flk)
log.info('Cleaning fragment pulling flags...')
fragment_pullings = r.keys('fragments:*:pulling')
for fpk in fragment_pullings:
r.delete(fpk)
log.info('Releasing registered fragments...')
fragment_consumers = r.keys('fragments:*:consumers')
for fck in fragment_consumers:
r.delete(fck)
class FragmentPlugin(object):
__plugins = []
@abstractmethod
def consume(self, fid, quad, graph, *args):
pass
@abstractmethod
def complete(self, fid, *args):
pass
@abstractproperty
def sink_class(self):
pass
def sink_aware(self):
return True
@classmethod
def register(cls, p):
if issubclass(p, cls):
cls.__plugins.append(p())
else:
raise ValueError('{} is not a valid fragment plugin'.format(p))
@classmethod
def plugins(cls):
return cls.__plugins[:]
def __bind_prefixes(source_graph):
map(lambda (prefix, uri): cache.bind(prefix, uri), source_graph.namespaces())
def map_variables(tp, mapping):
if mapping is None:
return tp
return tuple(map(lambda x: mapping.get(x, x), tp))
def __consume_quad(fid, (c, s, p, o), graph, sinks=None):
def __sink_consume():
for rid in filter(lambda _: isinstance(sinks[_], plugin.sink_class), sinks):
sink = sinks[rid]
try:
plugin.consume(fid, (map_variables(c, sink.mapping), s, p, o), graph, sink)
except Exception as e:
sink.remove()
yield rid
log.warning(e.message)
def __generic_consume():
try:
plugin.consume(fid, (c, s, p, o), graph)
except Exception as e:
log.warning(e.message)
for plugin in FragmentPlugin.plugins():
if plugin.sink_class is not None:
invalid_sinks = list(__sink_consume())
for _ in invalid_sinks:
del sinks[_]
else:
__generic_consume()
def __notify_completion(fid, sinks):
for plugin in FragmentPlugin.plugins():
try:
filtered_sinks = filter(lambda _: isinstance(sinks[_], plugin.sink_class), sinks)
for rid in filtered_sinks:
sink = sinks[rid]
if sink.delivery == 'accepted':
sink.delivery = 'ready'
if plugin.sink_aware:
plugin.complete(fid, sink)
if not plugin.sink_aware:
plugin.complete(fid)
except Exception as e:
log.warning(e.message)
def __triple_pattern(graph, c):
def extract_node_id(node):
nid = node
if (node, RDF.type, AGORA.Variable) in graph:
nid = list(graph.objects(node, RDFS.label)).pop()
elif (node, RDF.type, AGORA.Literal) in graph:
nid = list(graph.objects(node, AGORA.value)).pop()
return nid
predicate = list(graph.objects(c, AGORA.predicate)).pop()
subject_node = list(graph.objects(c, AGORA.subject)).pop()
object_node = list(graph.objects(c, AGORA.object)).pop()
subject = extract_node_id(subject_node)
obj = extract_node_id(object_node)
return str(subject), predicate.n3(graph.namespace_manager), str(obj)
# Cache is used as the triple store for fragments.
# Each fragment is assigned three different main contexts:
# - fid: Where all its triple patterns are persisted
# - /fid: Fragment data
# - (fid, c): Triple pattern based fragment data (1 context per triple pattern, c)
def __replace_fragment(fid):
"""
Recreate fragment <fid> cached data and all its data-contexts from the corresponding stream (Redis)
:param fid:
:return:
"""
tps = cache.get_context(fid).subjects(RDF.type, AGORA.TriplePattern)
cache.remove_context(cache.get_context('/' + fid))
for tp in tps:
cache.remove_context(cache.get_context(str((fid, __triple_pattern(cache, tp)))))
fragment_triples = load_stream_triples(fid, calendar.timegm(dt.now().timetuple()))
for c, s, p, o in fragment_triples:
cache.get_context(str((fid, c))).add((s, p, o))
cache.get_context('/' + fid).add((s, p, o))
with r.pipeline() as pipe:
pipe.delete('fragments:{}:stream'.format(fid))
pipe.execute()
def __cache_plan_context(fid, graph):
"""
Use <graph> to extract the triple patterns of the current fragment <fid> and replace them as the expected context
(triple patterns context) in the cache graph
:param fid:
:param graph:
:return:
"""
try:
fid_context = cache.get_context(fid)
cache.remove_context(fid_context)
tps = graph.subjects(RDF.type, AGORA.TriplePattern)
for tp in tps:
for (s, p, o) in graph.triples((tp, None, None)):
fid_context.add((s, p, o))
for t in graph.triples((o, None, None)):
fid_context.add(t)
except Exception, e:
log.error(e.message)
def __remove_fragment(fid):
log.debug('Waiting to remove fragment {}...'.format(fid))
lock_key = 'fragments:{}:lock'.format(fid)
lock = r.lock(lock_key, lock_class=Lock)
lock.acquire()
with r.pipeline(transaction=True) as p:
requests, r_sinks = __load_fragment_requests(fid)
__notify_completion(fid, r_sinks)
fragment_keys = r.keys('fragments:{}*'.format(fid))
map(lambda k: p.delete(k), fragment_keys)
p.srem('fragments', fid)
p.execute()
log.info('Fragment {} has been removed'.format(fid))
def __load_fragment_requests(fid):
requests_ = r.smembers('fragments:{}:requests'.format(fid))
sinks_ = {}
for rid in requests_:
try:
sinks_[rid] = build_response(rid).sink
except Exception, e:
traceback.print_exc()
log.warning(e.message)
with r.pipeline(transaction=True) as p:
p.multi()
p.srem('fragments:{}:requests'.format(fid), rid)
p.execute()
return requests_, sinks_
def __pull_fragment(fid):
tps = r.smembers('fragments:{}:gp'.format(fid))
requests, r_sinks = __load_fragment_requests(fid)
log.info("""Starting collection of fragment {}:
- GP: {}
- Supporting: ({}) {}""".format(fid, list(tps), len(requests), list(requests)))
start_time = datetime.now()
try:
fgm_gen, _, graph = agora_client.get_fragment_generator('{ %s }' % ' . '.join(tps), workers=N_COLLECTORS,
provider=graph_provider, queue_size=N_COLLECTORS)
except Exception:
log.error('Agora is not available')
return
# There is no search plan to execute
if not list(graph.subjects(RDF.type, AGORA.SearchTree)):
log.info('There is no search plan for fragment {}. Removing...'.format(fid))
# TODO: Send additional headers notifying the reason to end
__notify_completion(fid, r_sinks)
__remove_fragment(fid)
return
triple_patterns = {tpn: __triple_pattern(graph, tpn) for tpn in
graph.subjects(RDF.type, AGORA.TriplePattern)}
fragment_contexts = {tpn: (fid, triple_patterns[tpn]) for tpn in triple_patterns}
__bind_prefixes(graph)
lock_key = 'fragments:{}:lock'.format(fid)
lock = r.lock(lock_key, lock_class=Lock)
lock.acquire()
lock_consume_key = 'fragments:{}:lock:consume'.format(fid)
c_lock = r.lock(lock_consume_key, lock_class=Lock)
c_lock.acquire()
# Update fragment contexts
with r.pipeline(transaction=True) as p:
p.multi()
p.set('fragments:{}:pulling'.format(fid), True)
p.delete('fragments:{}:contexts'.format(fid))
for tpn in fragment_contexts.keys():
p.sadd('fragments:{}:contexts'.format(fid), fragment_contexts[tpn])
p.execute()
lock.release()
c_lock.release()
n_triples = 0
fragment_weight = 0
fragment_delta = 0
try:
for (c, s, p, o) in fgm_gen:
pre_ts = datetime.now()
triple_weight = len(u'{}{}{}'.format(s, p, o))
fragment_weight += triple_weight
fragment_delta += triple_weight
lock.acquire()
if add_stream_triple(fid, triple_patterns[c], (s, p, o)):
__consume_quad(fid, (triple_patterns[c], s, p, o), graph, sinks=r_sinks)
lock.release()
if fragment_delta > 1000:
fragment_delta = 0
log.info('Pulling fragment {} [{} kB]'.format(fid, fragment_weight / 1000.0))
if r.scard('fragments:{}:requests'.format(fid)) != len(requests):
requests, r_sinks = __load_fragment_requests(fid)
n_triples += 1
post_ts = datetime.now()
elapsed = (post_ts - pre_ts).total_seconds()
excess = (1.0 / COLLECT_THROTTLING) - elapsed
if excess > 0:
sleep(excess)
except Exception, e:
traceback.print_exc()
elapsed = (datetime.now() - start_time).total_seconds()
log.info(
'{} triples retrieved for fragment {} in {} s [{} kB]'.format(n_triples, fid, elapsed,
fragment_weight / 1000.0))
lock.acquire()
c_lock.acquire()
__replace_fragment(fid)
log.info('Fragment {} data has been replaced with the recently collected'.format(fid))
__cache_plan_context(fid, graph)
log.info('BGP context of fragment {} has been cached'.format(fid))
with r.pipeline(transaction=True) as p:
p.multi()
sync_key = 'fragments:{}:sync'.format(fid)
demand_key = 'fragments:{}:on_demand'.format(fid)
# Fragment is now synced
p.set(sync_key, True)
# If the fragment collection time has not exceeded the threshold, switch to on-demand mode
if elapsed < ON_DEMAND_TH and elapsed * random.random() < ON_DEMAND_TH / 4:
p.set(demand_key, True)
log.info('Fragment {} has been switched to on-demand mode'.format(fid))
else:
p.delete(demand_key)
min_durability = int(max(MIN_SYNC, elapsed))
durability = random.randint(min_durability, min_durability * 2)
p.expire(sync_key, durability)
log.info('Fragment {} is considered synced for {} s'.format(fid, durability))
p.set('fragments:{}:updated'.format(fid), dt.now())
p.delete('fragments:{}:pulling'.format(fid))
p.execute()
c_lock.release()
__notify_completion(fid, r_sinks)
lock.release()
def __collect_fragments():
registered_fragments = r.scard('fragments')
synced_fragments = len(r.keys('fragments:*:sync'))
log.info("""Collector daemon started:
- Fragments: {}
- Synced: {}""".format(registered_fragments, synced_fragments))
futures = {}
while True:
for fid in filter(
lambda x: r.get('fragments:{}:sync'.format(x)) is None and r.get(
'fragments:{}:pulling'.format(x)) is None,
r.smembers('fragments')):
if fid in futures:
if futures[fid].done():
del futures[fid]
if fid not in futures:
futures[fid] = thp.submit(__pull_fragment, fid)
time.sleep(1)
th = Thread(target=__collect_fragments)
th.daemon = True
th.start()
|
|
# Copyright (c) 2011 Martin Vilcans
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
import os
import os.path
from screenplain.types import *
from screenplain.richstring import plain
class tag(object):
"""Handler for automatically opening and closing a tag.
E.g.
>>> import sys
>>> from __future__ import with_statement
>>> with tag(sys.stdout, 'div'):
... print('hello')
...
<div>hello
</div>
Adding classes to the element is possible:
>>> with tag(sys.stdout, 'div', classes=['action']):
... print('hello')
<div class="action">hello
</div>
>>> with tag(sys.stdout, 'div', classes=['action', 'centered']):
... print('hello')
<div class="action centered">hello
</div>
"""
def __init__(self, out, tag, classes=None):
self.out = out
self.tag = tag
self.classes = classes
def __enter__(self):
if self.classes:
self.out.write('<%s class="%s">' % (
self.tag,
' '.join(self.classes)
))
else:
self.out.write('<%s>' % self.tag)
def __exit__(self, exception_type, value, traceback):
if not exception_type:
self.out.write('</%s>' % self.tag)
return False
def to_html(text):
html = text.to_html()
if html == '':
return ' '
else:
return html
class Formatter(object):
"""Class for converting paragraphs into HTML."""
def __init__(self, out):
"""Initializes the formatter.
`out` is a file-like object to write to.
After initializing, call the convert function to convert
any number of paragraphs.
"""
self.out = out
self._format_functions = {
Slug: self.format_slug,
Action: self.format_action,
Dialog: self.format_dialog,
DualDialog: self.format_dual,
Transition: self.format_transition,
Section: self.format_section,
PageBreak: self.format_page_break,
}
def convert(self, screenplay):
"""Converts a number of paragraphs into HTML and writes
it to the output stream.
`screenplay` is a sequence of paragraphs.
"""
self.page_break_before_next = False
for para in screenplay:
format_function = self._format_functions.get(type(para), None)
if format_function:
format_function(para)
self.out.write('\n')
def format_dialog(self, dialog):
with self._tag('div', classes=['dialog']):
self._write_dialog_block(dialog)
def format_dual(self, dual):
with self._tag('div', classes=['dual']):
with self._tag('div', classes=['left']):
self._write_dialog_block(dual.left)
with self._tag('div', classes=['right']):
self._write_dialog_block(dual.right)
self.out.write('<br />')
def _write_dialog_block(self, dialog):
with self._tag('p', classes=['character']):
self.out.write(to_html(dialog.character))
for parenthetical, text in dialog.blocks:
classes = ['parenthetical'] if parenthetical else None
with self._tag('p', classes=classes):
self.out.write(to_html(text))
def format_slug(self, slug):
num = slug.scene_number
with self._tag('h6'):
if num:
with self._tag('span', classes=['scnuml']):
self.out.write(to_html(slug.scene_number))
self.out.write(to_html(slug.line))
if num:
with self._tag('span', classes=['scnumr']):
self.out.write(to_html(slug.scene_number))
if slug.synopsis:
with self._tag('span', classes=['h6-synopsis']):
self.out.write(to_html(plain(slug.synopsis)))
def format_section(self, section):
with self._tag('h%d' % section.level):
self.out.write(to_html(section.text))
if section.synopsis:
with self._tag('span', classes=['h%d-synopsis' % section.level]):
self.out.write(to_html(plain(section.synopsis)))
def format_action(self, para):
classes = ['action']
if para.centered:
classes.append('centered')
with self._tag('div', classes=classes):
with self._tag('p'):
for number, line in enumerate(para.lines):
if number != 0:
self.out.write('<br/>')
self.out.write(to_html(line))
def format_transition(self, para):
with self._tag('div', classes=['transition']):
self.out.write(to_html(para.line))
def format_page_break(self, para):
self.page_break_before_next = True
def _tag(self, tag_name, classes=[]):
if self.page_break_before_next:
self.page_break_before_next = False
classes = set(classes).union(('page-break',))
return tag(self.out, tag_name, classes)
def _read_file(filename):
with open(path) as stream:
return stream.read()
def convert(screenplay, out, css_file=None, bare=False):
"""Convert the screenplay into HTML, written to the file-like object `out`.
The output will be a complete HTML document unless `bare` is true.
"""
if bare:
convert_bare(screenplay, out)
else:
convert_full(
screenplay, out,
css_file or os.path.join(os.path.dirname(__file__), 'default.css')
)
def convert_full(screenplay, out, css_file):
"""Convert the screenplay into a complete HTML document,
written to the file-like object `out`.
"""
with open(css_file, 'r') as stream:
css = stream.read()
out.write(
'<!DOCTYPE html>\n'
'<html>'
'<head>'
'<title>Screenplay</title>'
'<style type="text/css">'
)
out.write(css)
out.write(
'</style>'
'</head>'
'<body>'
'<div id="wrapper" class="screenplay">\n'
)
convert_bare(screenplay, out)
out.write(
'</div>'
'</body>'
'</html>\n'
)
def convert_bare(screenplay, out):
"""Convert the screenplay into HTML, written to the file-like object `out`.
Does not create a complete HTML document, as it doesn't include
<html>, <body>, etc.
"""
formatter = Formatter(out)
formatter.convert(screenplay)
|
|
#!usr/local/bin
#sidgan
from pylab import *
import pylab
import scipy
import os
import numpy
import sys
import sklearn
from sklearn.kernel_approximation import RBFSampler
import sklearn.cluster
import optparse
from sklearn.decomposition import RandomizedPCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn import linear_model
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA
from sklearn import tree
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import GaussianNB
from sklearn import cross_validation
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.cross_validation import cross_val_score
import random
import csv as csv
import pandas as pd
import numpy as np
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
import sklearn
import Image
from sklearn.ensemble import AdaBoostClassifier
import pylab as pl
from PIL import Image
import os
#setup a standard image size; this will distort some images but will get everything into the same shape
STANDARD_SIZE = (300, 200)
def img_to_matrix(filename, verbose=False):
"""
takes a filename and turns it into a numpy array of RGB pixels
"""
img = Image.open(filename)
if verbose==True:
print "changing size from %s to %s" % (str(img.size), str(STANDARD_SIZE))
img = img.resize(STANDARD_SIZE)
img = list(img.getdata())
img = map(list, img)
img = np.array(img)
return img
def flatten_image(img):
"""
takes in an (m, n) numpy array and flattens it
into an array of shape (1, m * n)
"""
s = img.shape[0] * img.shape[1]
img_wide = img.reshape(1, s)
return img_wide[0]
#LOCATION OF DATA AND IMPORT
img_dir = "/images/"
images = [img_dir+ f for f in os.listdir(img_dir)]
labels = ["alpha" if "a" in f.split('/')[-1] else "muon" for f in images]
data = []
for image in images:
img = img_to_matrix(image)
img = flatten_image(img)
data.append(img)
data = np.array(data)
is_train = np.random.uniform(0, 1, len(data)) <= 0.7
y = np.where(np.array(labels)=="check", 1, 0)
train_x, train_y = data[is_train], y[is_train]
test_x, test_y = data[is_train==False], y[is_train==False]
pca = RandomizedPCA(n_components=2)
X = pca.fit_transform(data)
df = pd.DataFrame({"x": X[:, 0], "y": X[:, 1], "label":np.where(y==1, "ALPHA", "MUON")})
colors = ["red", "yellow"]
for label, color in zip(df['label'].unique(), colors):
mask = df['label']==label
pl.scatter(df[mask]['x'], df[mask]['y'], c=color, label=label)
pl.legend()
pl.show()
pca = RandomizedPCA(n_components=5)
train_x = pca.fit_transform(train_x)
test_x = pca.transform(test_x)
#PERFORMS CROSS VALDIATION
def cal_score(method, clf, features_test, target_test):
scores = cross_val_score(clf, features_test, target_test)
print method + " : %f " % scores.max()
#print scores.max()
knn = KNeighborsClassifier()
knn.fit(train_x, train_y)
#print predctions
print pd.crosstab(test_y, knn.predict(test_x), rownames=["Actual"], colnames =["Predicted"])
clf_ada = AdaBoostClassifier(n_estimators=100)
params = {
'learning_rate': [.05, .1,.2,.3,2,3, 5],
'max_features': [.25,.50,.75,1],
'max_depth': [3,4,5],
}
gs = GridSearchCV(clf_ada, params, cv=5, scoring ='accuracy', n_jobs=4)
clf_ada.fit(train_x, train_y)
cal_score("ADABOOST",clf_ada, test_x, test_y)
features_test = test_x
target_test = test_y
features_train = train_x
target_train = train_y
prob = 1
#Naive Bayes
nb_estimator = GaussianNB()
nb_estimator.fit(features_train, target_train)
cal_score("NAIVE BAYES CLASSIFICATION",nb_estimator, features_test, target_test)
#predictions = nb_estimator.predict(test)
#SVC Ensemble
rf = RandomForestClassifier(n_estimators=100)
rf = rf.fit(features_train, target_train)
cal_score("RANDOM FOREST CLASSIFIER",rf, features_test, target_test)
predictions = rf.predict_proba(test)
print predictions
#Gradient Boosting
gb = GradientBoostingClassifier(n_estimators=100, subsample=.8)
params = {
'learning_rate': [.05, .1,.2,.3,2,3, 5],
'max_features': [.25,.50,.75,1],
'max_depth': [3,4,5],
}
gs = GridSearchCV(gb, params, cv=5, scoring ='accuracy', n_jobs=4)
gs.fit(features_train, target_train)
#predictions = gs.predict_proba(test)
#print predictions
cal_score("GRADIENT BOOSTING",gs, features_test, target_test)
#sorted(gs.grid_scores_, key = lambda x: x.mean_validation_score)
#print gs.best_score_
#print gs.best_params_
#predictions = gs.predict_proba(test)
#KERNEL APPROXIMATIONS - RBF
rbf_feature = RBFSampler(gamma=1, random_state=1)
X_features = rbf_feature.fit_transform(data)
#SGD CLASSIFIER
clf = SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=True, verbose=0,
warm_start=False)
clf.fit(features_train, target_train)
cal_score("SGD Regression",clf, features_test, target_test)
#KN Classifier
neigh = KNeighborsClassifier(n_neighbors = 1)
neigh.fit(features_train, target_train)
cal_score("KN CLASSIFICATION",neigh, features_test, target_test)
clf_tree = tree.DecisionTreeClassifier(max_depth=10)
clf_tree.fit(features_train, target_train)
cal_score("DECISION TREE CLASSIFIER",clf_tree, features_test, target_test)
#LOGISTIC REGRESSION
logreg = LogisticRegression(C=3)
logreg.fit(features_train, target_train)
cal_score("LOGISTIC REGRESSION",logreg, features_test, target_test)
#predictions = logreg.predict(test)
# SUPPORT VECTOR MACHINES
clf = svm.SVC(kernel = 'linear')
clf.fit(features_train, target_train)
cal_score("LINEAR KERNEL",clf, features_test, target_test)
#print clf.kernel
#for sigmoid kernel
clf= svm.SVC(kernel='rbf', C=2).fit(features_train, target_train)
cal_score("SVM RBF KERNEL",clf, features_test, target_test)
#predictions = clf.predict(test)
#Lasso
clf = linear_model.Lasso(alpha=.1)
clf.fit(features_train, target_train)
cal_score("LASSO",clf, features_test, target_test)
#elastic net
clf = linear_model.ElasticNet(alpha=.1, l1_ratio=.5, fit_intercept=True, normalize=False, precompute='auto',max_iter=1000, copy_X=True, tol =.0001, warm_start=False, positive=False)
clf.fit(features_train, target_train)
cal_score("ELASTIC NET",clf, features_test, target_test)
#SGD REGRESSION
clf = SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=True, verbose=0,
warm_start=False)
clf.fit(features_train, target_train)
cal_score("SGD Regression",clf, features_test, target_test)
prob = 3
#MINI BATCH K MEANS CLUSTERING
clf = sklearn.cluster.MiniBatchKMeans(init='k-means++', max_iter=100, batch_size=100, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init=3, reassignment_ratio=0.01)
clf.fit(features_train, target_train)
#MEAN SHIFT
clf = sklearn.cluster.MeanShift(bandwidth=None, seeds=[features_train, target_train], bin_seeding=False, min_bin_freq=1, cluster_all=True)
#clf.fit([features_train, target_train])
#clf.fit(data, target)
#if options.cross_validation == 'True':
# cal_score("MEAN SHIFT CLUSTERING",clf, features_test, target_test)
#K MEANS CLUSTERING
clf = sklearn.cluster.KMeans( init='k-means++', n_init=10, max_iter=300, tol=0.0001, precompute_distances=True, verbose=0, random_state=None, copy_x=True, n_jobs=1)
clf.fit(data)
#if options.cross_validation == 'True':
# cal_score("K MEANS CLUSTERING",clf, features_test, target_test)
prob = 4
#PCA
pca = PCA(n_components=1)
pca_train = pca.fit(data)
pca_test = pca.transform(test)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_binding(base_resource):
""" Binding class showing the resources that can be bound to csvserver_binding.
"""
def __init__(self) :
self._name = ""
self.csvserver_spilloverpolicy_binding = []
self.csvserver_auditnslogpolicy_binding = []
self.csvserver_filterpolicy_binding = []
self.csvserver_cmppolicy_binding = []
self.csvserver_lbvserver_binding = []
self.csvserver_appflowpolicy_binding = []
self.csvserver_responderpolicy_binding = []
self.csvserver_transformpolicy_binding = []
self.csvserver_feopolicy_binding = []
self.csvserver_authorizationpolicy_binding = []
self.csvserver_rewritepolicy_binding = []
self.csvserver_cachepolicy_binding = []
self.csvserver_cspolicy_binding = []
self.csvserver_auditsyslogpolicy_binding = []
self.csvserver_tmtrafficpolicy_binding = []
self.csvserver_appfwpolicy_binding = []
@property
def name(self) :
"""Name of a content switching virtual server for which to display information, including the policies bound to the virtual server. To display a list of all configured Content Switching virtual servers, do not specify a value for this parameter.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of a content switching virtual server for which to display information, including the policies bound to the virtual server. To display a list of all configured Content Switching virtual servers, do not specify a value for this parameter.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def csvserver_auditnslogpolicy_bindings(self) :
"""auditnslogpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_auditnslogpolicy_binding
except Exception as e:
raise e
@property
def csvserver_tmtrafficpolicy_bindings(self) :
"""tmtrafficpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_tmtrafficpolicy_binding
except Exception as e:
raise e
@property
def csvserver_lbvserver_bindings(self) :
"""lbvserver that can be bound to csvserver.
"""
try :
return self._csvserver_lbvserver_binding
except Exception as e:
raise e
@property
def csvserver_responderpolicy_bindings(self) :
"""responderpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_responderpolicy_binding
except Exception as e:
raise e
@property
def csvserver_cachepolicy_bindings(self) :
"""cachepolicy that can be bound to csvserver.
"""
try :
return self._csvserver_cachepolicy_binding
except Exception as e:
raise e
@property
def csvserver_filterpolicy_bindings(self) :
"""filterpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_filterpolicy_binding
except Exception as e:
raise e
@property
def csvserver_transformpolicy_bindings(self) :
"""transformpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_transformpolicy_binding
except Exception as e:
raise e
@property
def csvserver_appflowpolicy_bindings(self) :
"""appflowpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_appflowpolicy_binding
except Exception as e:
raise e
@property
def csvserver_authorizationpolicy_bindings(self) :
"""authorizationpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_authorizationpolicy_binding
except Exception as e:
raise e
@property
def csvserver_appfwpolicy_bindings(self) :
"""appfwpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_appfwpolicy_binding
except Exception as e:
raise e
@property
def csvserver_auditsyslogpolicy_bindings(self) :
"""auditsyslogpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_auditsyslogpolicy_binding
except Exception as e:
raise e
@property
def csvserver_cmppolicy_bindings(self) :
"""cmppolicy that can be bound to csvserver.
"""
try :
return self._csvserver_cmppolicy_binding
except Exception as e:
raise e
@property
def csvserver_rewritepolicy_bindings(self) :
"""rewritepolicy that can be bound to csvserver.
"""
try :
return self._csvserver_rewritepolicy_binding
except Exception as e:
raise e
@property
def csvserver_spilloverpolicy_bindings(self) :
"""spilloverpolicy that can be bound to csvserver.
"""
try :
return self._csvserver_spilloverpolicy_binding
except Exception as e:
raise e
@property
def csvserver_cspolicy_bindings(self) :
"""cspolicy that can be bound to csvserver.
"""
try :
return self._csvserver_cspolicy_binding
except Exception as e:
raise e
@property
def csvserver_feopolicy_bindings(self) :
"""feopolicy that can be bound to csvserver.
"""
try :
return self._csvserver_feopolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
""" Use this API to fetch csvserver_binding resource.
"""
try :
if type(name) is not list :
obj = csvserver_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [csvserver_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class csvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_binding = [csvserver_binding() for _ in range(length)]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations(object):
"""ApplicationSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.ApplicationSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ApplicationSecurityGroup"]
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update ApplicationSecurityGroup
operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.ApplicationSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
"""Updates an application security group's tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to update application security group tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationSecurityGroupListResult"]
"""Gets all application security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApplicationSecurityGroupListResult"]
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
|
|
from django.conf.urls import patterns, include, url
from citizengrid import cg_api_views
import os
from . import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# Routers provide an easy way of automatically determining the URL conf.
router = cg_api_views.HybridRouter()
# Only admin can see this view
router.register(r'users', cg_api_views.UserViewSet)
router.register(r'groups', cg_api_views.GroupViewSet) # All registered users
router.add_api_view("apps",
url(r'^apps/$',
cg_api_views.ApplicationListPublicView.as_view(),
name='ApplicationListPublicView'))
router.add_api_view("myapps",
url(r'^myapps/$',
cg_api_views.MyApplicationListView.as_view(),
name='MyApplicationListView'))
router.add_api_view("branches",
url(r'^branches/$',
cg_api_views.BranchListView.as_view(),
name='BranchListView'))
router.add_api_view("categories",
url(r'^categories/$',
cg_api_views.CategoryListView.as_view(),
name='CategoryListView'))
router.add_api_view("subcategories",
url(r'^subcategories/$',
cg_api_views.SubCategoryListView.as_view(),
name='SubCategoryListView'))
router.add_api_view("usercredentials",
url(r'^usercredentials/$',
cg_api_views.UserCloudCredentialsListView.as_view(),
name='UserCloudCredentialsListView'))
urlpatterns = patterns('',
# Examples:
url(r'^$', 'citizengrid.views.home', name='home'),
url(r'^about', 'citizengrid.views.about', name='about'),
url(r'^contact',
'citizengrid.views.contact',
name='contact'),
url(r'^doc',
'citizengrid.views.userdoc',
name='userdoc'),
url(r'^media/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
url(r'^cg$',
'citizengrid.secure_views.cg',
name='cg_home'),
url(r'^js/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': os.path.join(settings.STATIC_ROOT,
'js')}),
url(r'^css/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': os.path.join(settings.STATIC_ROOT,
'css')}),
url(r'^img/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': os.path.join(settings.STATIC_ROOT,
'img')}),
url(r'^media/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
url(r'^font/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': os.path.join(settings.STATIC_ROOT,
'fonts')}),
url(r'^fonts/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': os.path.join(settings.STATIC_ROOT,
'fonts')}),
url(r'^cg/upload$',
'citizengrid.secure_views.upload',
name='jfu_upload'),
url(r'^cg/delupload/(?P<uploadid>\d+)$',
'citizengrid.secure_views.delupload',
name='del_file'),
url(r'^cg/launchapp/(?P<appid>\d+)/(?P<launchtype>\w+)/(?P<apptag>.*)$',
'citizengrid.launch_views.launchapp',
name='launch_app'),
url(r'^cg/launchserver/(?P<appid>\d+)/(?P<launchtype>\w*)$',
'citizengrid.launch_views.launchserver',
name='launch_server'),
url(
r'^cg/manage/instances/(?P<task>\w+)/(?P<appid>\w*)/(?P<instancerecord>[-\w]*)$',
'citizengrid.launch_views.manage_instances',
name='manage_instances'),
url(r'^cg/manage/cred$',
'citizengrid.secure_views.manage_credentials',
name='manage_credentials'),
url(r'^cg/manage/group/applist$',
'citizengrid.secure_views.application_list',
name='application_list'),
url(r'^cg/manage/group$',
'citizengrid.secure_views.manage_groups',
name='manage_groups'),
url(r'^cg/manage/group/edit/(?P<id>\d+)$',
'citizengrid.secure_views.edit_group',
name='edit_group'),
url(r'^cg/manage/group/create/$',
'citizengrid.secure_views.add_group',
name='add_group'),
url(r'^cg/manage/groups/$',
'citizengrid.secure_views.groups',
name='add_group'),
url(r'^cg/manage/group/delete/(?P<id>\d+)$',
'citizengrid.secure_views.delete_group',
name='delete_group'),
url(r'^cg/manage/group/detail/(?P<id>\d+)$',
'citizengrid.secure_views.detail_group',
name='delete_group'),
url(r'^cg/manage/group/leave/(?P<id>\d+)$',
'citizengrid.secure_views.leave_group',
name='leave_group'),
url(r'^cg/manage/group/join/$',
'citizengrid.secure_views.join_group',
name='join_group'),
url(r'^cg/manage/group/attachapptogrp/(?P<id>\d+)$',
'citizengrid.secure_views.attach_app_to_group',
name='attach_app_to_group'),
url(r'^cg/manage/group/detachfromgroup/(?P<id>\d+)$',
'citizengrid.secure_views.detach_app_from_group',
name='detach_app_from_group'),
url(r'^cg/manage/group/applicationgrouptagdetail/$',
'citizengrid.secure_views.application_grp_tag_detail',
name='application_grp_tag_detail'),
url(r'^cg/manage/applicationgrouptag/(?P<id>\d+)$',
'citizengrid.secure_views.application_grp_tag',
name='application_grp_tag'),
url(r'^cg/manage/updateaccount$',
'citizengrid.secure_views.update_user_account',
name='update_user_account'),
url(r'^cg/manage/cred/cloud$',
'citizengrid.secure_views.add_update_credentials',
name='add_update_credentials'),
url(r'^cg/manage/images/(?P<app>\w+)$',
'citizengrid.secure_views.manage_images',
name='manage_images'),
url(r'^cg/info/servers$',
'citizengrid.secure_views.get_running_servers',
name='get_running_servers'),
url(r'^cg/info/cloudcredentials$',
'citizengrid.secure_views.get_cloud_credentials',
name='get_cloud_credentials'),
url(r'^accounts/login/$',
'django.contrib.auth.views.login'),
url(r'^accounts/logout/$',
'django.contrib.auth.views.logout'),
url(r'^accounts/register/$',
'citizengrid.views.register'),
url(r'^accounts/confirmation/$',
'citizengrid.views.register_confirmation'),
url(r'^accounts/', include('password_reset.urls')),
url(r'^gettabledata/$',
'citizengrid.secure_views.getTableData'),
url(r'^getuserapps/$',
'citizengrid.secure_views.getUserApps'),
url(r'^cg/appdetail/(?P<appid>\w+)$',
'citizengrid.secure_views.application_detail'),
url(r'^cg/myappdetail/(?P<appid>\w+)$',
'citizengrid.secure_views.my_application'),
url(r'^cg/delete/(?P<id>[\w-]+)$',
'citizengrid.secure_views.cg_delete'),
(r'^branch/(?P<branch>\d+)/all_json_category/$',
'citizengrid.views.all_json_category'),
(r'^category/(?P<category>\d+)/all_json_subcategory/$',
'citizengrid.views.all_json_subcategory'),
url(r'^cg/app/wizard$',
'citizengrid.secure_views.wrapped_wizard_view',
name='wrapped_wizard_view'),
# url(r'^api/citizengrid/apps/(?P<appid>\d+)/$',
# cg_api_views.ApplicationDetailListView.as_view(),
# name='ApplicationDetailListView'),
url(r'^api/citizengrid/myapps/(?P<appid>\d+)/$',
cg_api_views.MyApplicationDetailListView.as_view(),
name='MyApplicationDetailListView'),
# start the application locally
url(r'^api/citizengrid/apps/(?P<appid>\d+)/startapp/$',
'citizengrid.cg_api_views.startapp_locally',
name='start'),
url(r'^api/citizengrid/apps/(?P<appid>\d+)/files/$',
cg_api_views.ApplicationFileList.as_view(),
name='ApplicationFileList'),
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/files/(?P<fileid>\d+)/$',
cg_api_views.ApplicationFileDetail.as_view(),
name='ApplicationFileDetail'),
# OSImagesList
url(r'^api/citizengrid/apps/(?P<appid>\d+)/osimages/$',
cg_api_views.ApplicationOpenstackImagesList.as_view(),
name='ApplicationOpenstackImagesList'),
# OsImage Detail url
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/osimages/(?P<fileid>\d+)/$',
cg_api_views.ApplicationOpenstackImageDetail.as_view(),
name='ApplicationOpenstackImageDetail'),
# EC2ImagesList
url(r'^api/citizengrid/apps/(?P<appid>\d+)/ec2images/$',
cg_api_views.ApplicationEc2ImagesList.as_view(),
name='ApplicationEc2ImagesList'),
# EC2Image Detail url
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/ec2images/(?P<fileid>\d+)/$',
cg_api_views.ApplicationEc2ImageDetail.as_view(),
name='ApplicationEc2ImageDetail'),
# start EC2 app on cloud
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/ec2images/(?P<fileid>\d+)/startserver/$',
cg_api_views.start_Ec2server,
name='start_Ec2server'),
# start OpenStack app on cloud
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/osimages/(?P<fileid>\d+)/startserver/$',
cg_api_views.start_OpenstackServer,
name='start_OpenstackServer'),
# Credentials list
url(r'^api/citizengrid/usercredentials/(?P<credid>\d+)/$',
cg_api_views.UserCloudCredentialsDetailView.as_view(),
name='UserCloudCredentialsDetailView'),
# Openstack Instances
url(r'^api/citizengrid/apps/(?P<appid>\d+)/osinstances/$',
cg_api_views.CloudInstancesList.as_view(),
name='CloudInstancesList'),
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/osinstances/(?P<instanceid>([a-zA-Z])-([0-9a-zA-Z])+)/$',
cg_api_views.CloudInstancesDetail.as_view(),
name='CloudInstancesDetail'),
# AWS Instances
url(r'^api/citizengrid/apps/(?P<appid>\d+)/awsinstances/$',
cg_api_views.AWSCloudInstancesList.as_view(),
name='AWSCloudInstancesList'),
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/awsinstances/(?P<instanceid>([a-zA-Z])-([0-9a-zA-Z])+)/$',
cg_api_views.AWSCloudInstancesDetail.as_view(),
name='AWSCloudInstancesDetail'),
# stop instance
url(
r'^api/citizengrid/apps/(?P<appid>\d+)/instances/(?P<instanceid>([a-zA-Z])-([0-9a-zA-Z])+)/stop$',
cg_api_views.stopinstance,
name='stopinstance'),
# Group web api urls
# List all groups
url(r'^api/citizengrid/manage/group$',
cg_api_views.MyGroupList.as_view(),
name='MyGroupList'),
url(r'^api/citizengrid/manage/group/(?P<pk>\d+)$',
cg_api_views.MyGroupDetailView.as_view(),
name='MyGroupDetailView'),
url(r'^api/citizengrid/manage/group/(?P<groupid>\d+)/leave$',
cg_api_views.leave_group,
name='leave_group'),
url(r'^api/citizengrid/manage/group/join',
cg_api_views.join_group,
name='join_group'),
url(r'^api/citizengrid/manage/group/(?P<groupid>\d+)/attachapp$',
cg_api_views.attachapp,
name='attachapp'),
url(r'^api/citizengrid/manage/group/detachapp/(?P<appid>\d+)$',
cg_api_views.detachapp,
name='detachapp'),
url(r'^api/citizengrid/', include(router.urls)),
url(r'^api-auth/citizengrid/',
include('rest_framework.urls',
namespace='rest_framework')),
# admin
url(r'^admin/', include(admin.site.urls)),
)
|
|
#!/usr/bin/env python
from abc import ABCMeta, abstractmethod
from os import listdir, path
from re import findall
from urllib2 import urlopen
class Parser:
__metaclass__ = ABCMeta
def __init__(self, url, regex):
self.url = url
self.regex = regex
def newest(self):
if self.url is None:
return False
else:
request = urlopen(self.url)
content = self.content(request.read(), request.geturl())
matches = findall(self.regex, content)
result = matches[0][0] if (len(matches) > 0 and len(matches[0]) > 0) else None
if result is None:
raise Exception('Failed to parse content for version')
else:
return result
@abstractmethod
def content(self, content, url):
pass
class AdColony(Parser):
def __init__(self):
Parser.__init__(
self,
'https://raw.githubusercontent.com/AdColony/AdColony-Android-SDK-3/master/CHANGELOG.md',
r'\#\#\s+((\d+\.)+\d+)')
def content(self, content, url):
return content
class AdMob(Parser):
def __init__(self):
Parser.__init__(
self,
'https://firebase.google.com/docs/admob/android/quick-start',
r'compile \'com\.google\.firebase:firebase-ads:((\d+\.)+\d+)\'')
def content(self, content, url):
return content
class Amazon(Parser):
def __init__(self):
Parser.__init__(
self,
'https://developer.amazon.com/sdk-download',
r'Amazon Mobile Ads \(((\d+\.)+\d+)\)')
def content(self, content, url):
return content
class AppLovin(Parser):
def __init__(self):
Parser.__init__(self, None, None)
def content(self, content, url):
return None
class Chartboost(Parser):
def __init__(self):
Parser.__init__(
self,
'http://www.chartboo.st/sdk/android',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class Facebook(Parser):
def __init__(self):
Parser.__init__(
self,
'http://central.maven.org/maven2/com/facebook/android/audience-network-sdk/maven-metadata.xml',
r'<latest>((\d+\.)+\d+)')
def content(self, content, url):
return content
class Flurry(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/yahoo/maven/com.flurry/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class HyprMx(Parser):
def __init__(self):
Parser.__init__(
self,
None,
None)
def content(self, content, url):
return None
class InMobi(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/inmobi/maven/inmobi-ads/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class Ironsource(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/ironsource-mobile/android-sdk/mediation/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class LoopMe(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/loopme/maven/loopme-sdk/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class MachineZone(Parser):
def __init__(self):
Parser.__init__(
self,
None,
None)
def content(self, content, url):
return None
class MobFox(Parser):
def __init__(self):
Parser.__init__(
self,
'https://api.github.com/repos/mobfox/MobFox-Android-SDK/releases/latest',
r'((\d+\.)+\d+)')
def content(self, content, url):
return content
class MoPub(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/mopub/mopub-android-sdk/mopub-android-sdk/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class Tapjoy(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/tapjoy/tapjoy-sdk/android-sdk/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class Thirdpresence(Parser):
def __init__(self):
Parser.__init__(
self,
'https://bintray.com/thirdpresence/thirdpresence-ad-sdk-android/com.thirdpresence.adsdk.sdk/_latestVersion',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
class Unity(Parser):
def __init__(self):
Parser.__init__(
self,
'https://api.github.com/repos/Unity-Technologies/unity-ads-android/releases/latest',
r'((\d+\.)+\d+)')
def content(self, content, url):
return content
class Vungle(Parser):
def __init__(self):
Parser.__init__(
self,
'https://v.vungle.com/dashboard/api/1/sdk/android',
r'((\d+\.)+\d+)')
def content(self, content, url):
return url
providers = {
'adcolony': AdColony(),
'admob': AdMob(),
'amazon': Amazon(),
'applovin': AppLovin(),
'chartboost': Chartboost(),
'facebook': Facebook(),
'flurry': Flurry(),
'hyprmx': HyprMx(),
'inmobi': InMobi(),
'ironsource': Ironsource(),
'loopme': LoopMe(),
'machinezone': MachineZone(),
'mobfox': MobFox(),
'mopub': MoPub(),
'tapjoy': Tapjoy(),
'thirdpresence': Thirdpresence(),
'unity': Unity(),
'vungle': Vungle()
}
def newer(new, old):
newl = new.split('.')
oldl = old.split('.')
if len(newl) > len(oldl):
oldl.extend(['0' for _ in xrange(len(newl) - len(oldl))])
elif len(oldl) > len(newl):
newl.extend(['0' for _ in xrange(len(oldl) - len(newl))])
for o, n in zip(oldl, newl):
if int(o) < int(n):
return True
return False
def main():
directories = [ x for x in listdir('.') if x.startswith('provider-') ]
for directory in directories:
provider = directory[len('provider-'):]
properties = dict(line.strip().split('=') for line in open(path.join('.', directory, 'gradle.properties'), 'r'))
current = properties['PROVIDER_VERSION']
try:
newest = providers[provider].newest()
except Exception, e:
print("# %s has failed checking for a new version (%s)" % (provider, e))
continue
if newest is False:
print("# %s is not supported" % provider)
elif newer(newest, current):
print("# %s has a new version %s (%s)" % (provider, newest, current))
if __name__ == "__main__":
main()
|
|
"""
homeassistant.bootstrap
~~~~~~~~~~~~~~~~~~~~~~~
Provides methods to bootstrap a home assistant instance.
Each method will return a tuple (bus, statemachine).
After bootstrapping you can add your own components or
start by calling homeassistant.start_home_assistant(bus)
"""
import os
import logging
from collections import defaultdict
import homeassistant.core as core
import homeassistant.util.dt as date_util
import homeassistant.util.package as pkg_util
import homeassistant.util.location as loc_util
import homeassistant.config as config_util
import homeassistant.loader as loader
import homeassistant.components as core_components
import homeassistant.components.group as group
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
EVENT_COMPONENT_LOADED, CONF_LATITUDE, CONF_LONGITUDE,
CONF_TEMPERATURE_UNIT, CONF_NAME, CONF_TIME_ZONE, CONF_CUSTOMIZE,
TEMP_CELCIUS, TEMP_FAHRENHEIT)
_LOGGER = logging.getLogger(__name__)
ATTR_COMPONENT = 'component'
PLATFORM_FORMAT = '{}.{}'
def setup_component(hass, domain, config=None):
""" Setup a component and all its dependencies. """
if domain in hass.config.components:
return True
_ensure_loader_prepared(hass)
if config is None:
config = defaultdict(dict)
components = loader.load_order_component(domain)
# OrderedSet is empty if component or dependencies could not be resolved
if not components:
return False
for component in components:
if component in hass.config.components:
continue
if not _setup_component(hass, component, config):
return False
return True
def _handle_requirements(component, name):
""" Installs requirements for component. """
if not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True
def _setup_component(hass, domain, config):
""" Setup a component for Home Assistant. """
component = loader.get_component(domain)
missing_deps = [dep for dep in component.DEPENDENCIES
if dep not in hass.config.components]
if missing_deps:
_LOGGER.error(
'Not initializing %s because not all dependencies loaded: %s',
domain, ", ".join(missing_deps))
return False
if not _handle_requirements(component, domain):
return False
try:
if not component.setup(hass, config):
_LOGGER.error('component %s failed to initialize', domain)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error during setup of component %s', domain)
return False
hass.config.components.append(component.DOMAIN)
# Assumption: if a component does not depend on groups
# it communicates with devices
if group.DOMAIN not in component.DEPENDENCIES:
hass.pool.add_worker()
hass.bus.fire(
EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: component.DOMAIN})
return True
def prepare_setup_platform(hass, config, domain, platform_name):
""" Loads a platform and makes sure dependencies are setup. """
_ensure_loader_prepared(hass)
platform_path = PLATFORM_FORMAT.format(domain, platform_name)
platform = loader.get_component(platform_path)
# Not found
if platform is None:
return None
# Already loaded
elif platform_path in hass.config.components:
return platform
# Load dependencies
if hasattr(platform, 'DEPENDENCIES'):
for component in platform.DEPENDENCIES:
if not setup_component(hass, component, config):
_LOGGER.error(
'Unable to prepare setup for platform %s because '
'dependency %s could not be initialized', platform_path,
component)
return None
if not _handle_requirements(platform, platform_path):
return None
return platform
# pylint: disable=too-many-branches, too-many-statements
def from_config_dict(config, hass=None):
"""
Tries to configure Home Assistant from a config dict.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
process_ha_core_config(hass, config.get(core.DOMAIN, {}))
enable_logging(hass)
_ensure_loader_prepared(hass)
# Make a copy because we are mutating it.
# Convert it to defaultdict so components can always have config dict
# Convert values to dictionaries if they are None
config = defaultdict(
dict, {key: value or {} for key, value in config.items()})
# Filter out the repeating and common config section [homeassistant]
components = (key for key in config.keys()
if ' ' not in key and key != core.DOMAIN)
if not core_components.setup(hass, config):
_LOGGER.error('Home Assistant core failed to initialize. '
'Further initialization aborted.')
return hass
_LOGGER.info('Home Assistant core initialized')
# Setup the components
for domain in loader.load_order_components(components):
_setup_component(hass, domain, config)
return hass
def from_config_file(config_path, hass=None):
"""
Reads the configuration file and tries to start all the required
functionality. Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
# Set config dir to directory holding config file
hass.config.config_dir = os.path.abspath(os.path.dirname(config_path))
config_dict = config_util.load_config_file(config_path)
return from_config_dict(config_dict, hass)
def enable_logging(hass):
""" Setup the logging for home assistant. """
logging.basicConfig(level=logging.INFO)
fmt = ("%(log_color)s%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s%(reset)s")
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
fmt,
datefmt='%y-%m-%d %H:%M:%S',
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
_LOGGER.warning(
"Colorlog package not found, console coloring disabled")
# Log errors to a file if we have write access to file or config dir
err_log_path = hass.config.path('home-assistant.log')
err_path_exists = os.path.isfile(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(hass.config.config_dir, os.W_OK)):
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.WARNING)
err_handler.setFormatter(
logging.Formatter('%(asctime)s %(name)s: %(message)s',
datefmt='%y-%m-%d %H:%M:%S'))
logging.getLogger('').addHandler(err_handler)
else:
_LOGGER.error(
'Unable to setup error log %s (access denied)', err_log_path)
def process_ha_core_config(hass, config):
""" Processes the [homeassistant] section from the config. """
hac = hass.config
def set_time_zone(time_zone_str):
""" Helper method to set time zone in HA. """
if time_zone_str is None:
return
time_zone = date_util.get_time_zone(time_zone_str)
if time_zone:
hac.time_zone = time_zone
date_util.set_default_time_zone(time_zone)
else:
_LOGGER.error('Received invalid time zone %s', time_zone_str)
for key, attr in ((CONF_LATITUDE, 'latitude'),
(CONF_LONGITUDE, 'longitude'),
(CONF_NAME, 'location_name')):
if key in config:
setattr(hac, attr, config[key])
set_time_zone(config.get(CONF_TIME_ZONE))
customize = config.get(CONF_CUSTOMIZE)
if isinstance(customize, dict):
for entity_id, attrs in config.get(CONF_CUSTOMIZE, {}).items():
if not isinstance(attrs, dict):
continue
Entity.overwrite_attribute(entity_id, attrs.keys(), attrs.values())
if CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
if unit == 'C':
hac.temperature_unit = TEMP_CELCIUS
elif unit == 'F':
hac.temperature_unit = TEMP_FAHRENHEIT
# If we miss some of the needed values, auto detect them
if None not in (
hac.latitude, hac.longitude, hac.temperature_unit, hac.time_zone):
return
_LOGGER.info('Auto detecting location and temperature unit')
info = loc_util.detect_location_info()
if info is None:
_LOGGER.error('Could not detect location information')
return
if hac.latitude is None and hac.longitude is None:
hac.latitude = info.latitude
hac.longitude = info.longitude
if hac.temperature_unit is None:
if info.use_fahrenheit:
hac.temperature_unit = TEMP_FAHRENHEIT
else:
hac.temperature_unit = TEMP_CELCIUS
if hac.location_name is None:
hac.location_name = info.city
if hac.time_zone is None:
set_time_zone(info.time_zone)
def _ensure_loader_prepared(hass):
""" Ensure Home Assistant loader is prepared. """
if not loader.PREPARED:
loader.prepare(hass)
|
|
from django import forms
from django.conf import settings
from django.template import loader
from django.template import RequestContext
from django.utils.translation import ugettext as _
from satchmo.configuration import config_value, config_choice_values
from satchmo.contact.forms import ContactInfoForm
from satchmo.contact.models import Contact
from satchmo.discount.models import Discount
from satchmo.discount.utils import find_best_auto_discount
from satchmo.l10n.utils import moneyfmt
from satchmo.payment import signals
from satchmo.payment.config import labelled_payment_choices
from satchmo.payment.models import CreditCardDetail
from satchmo.payment.utils import create_pending_payment, get_or_create_order, pay_ship_save
from satchmo.shipping.config import shipping_methods, shipping_method_by_key
from satchmo.shop.models import Cart
from satchmo.shop.views.utils import CreditCard
from satchmo.tax.templatetags.satchmo_tax import _get_taxprocessor
from satchmo.utils.dynamic import lookup_template
import calendar
import datetime
import sys
MONTHS = [(month,'%02d'%month) for month in range(1,13)]
def _get_shipping_choices(request, paymentmodule, cart, contact, default_view_tax=False):
"""Iterate through legal shipping modules, building the list for display to the user.
Returns the shipping choices list, along with a dictionary of shipping choices, useful
for building javascript that operates on shipping choices.
"""
shipping_options = []
shipping_dict = {}
if not cart.is_shippable:
methods = [shipping_method_by_key('NoShipping'),]
else:
methods = shipping_methods()
for method in methods:
method.calculate(cart, contact)
if method.valid():
template = lookup_template(paymentmodule, 'shipping_options.html')
t = loader.get_template(template)
shipcost = method.cost()
shipping_tax = None
taxed_shipping_price = None
if config_value('TAX','TAX_SHIPPING'):
shipping_tax = config_value('TAX', 'TAX_CLASS')
taxer = _get_taxprocessor(request)
total = shipcost + taxer.by_price(shipping_tax, shipcost)
taxed_shipping_price = moneyfmt(total)
c = RequestContext(request, {
'amount': shipcost,
'description' : method.description(),
'method' : method.method(),
'expected_delivery' : method.expectedDelivery(),
'default_view_tax' : default_view_tax,
'shipping_tax': shipping_tax,
'taxed_shipping_price': taxed_shipping_price})
shipping_options.append((method.id, t.render(c)))
shipping_dict[method.id] = shipcost
return shipping_options, shipping_dict
class CustomChargeForm(forms.Form):
orderitem = forms.IntegerField(required=True, widget=forms.HiddenInput())
amount = forms.DecimalField(label=_('New price'), required=False)
shipping = forms.DecimalField(label=_('Shipping adjustment'), required=False)
notes = forms.CharField(_("Notes"), required=False, initial="Your custom item is ready.")
class PaymentMethodForm(forms.Form):
paymentmethod = forms.ChoiceField(
label=_('Payment method'),
choices=labelled_payment_choices(),
widget=forms.RadioSelect,
required=True
)
def __init__(self, *args, **kwargs):
try:
cart = kwargs['cart']
del kwargs['cart']
except KeyError:
cart = None
try:
order = kwargs['order']
del kwargs['order']
except KeyError:
order = None
super(forms.Form, self).__init__(*args, **kwargs)
# Send a signal to perform additional filtering of available payment methods.
# Receivers have cart/order passed in variables to check the contents and modify methods
# list if neccessary.
payment_choices = labelled_payment_choices()
signals.payment_methods_query.send(
PaymentMethodForm,
methods=payment_choices,
cart=cart,
order=order
)
if len(payment_choices) == 1:
self.fields['paymentmethod'].widget = forms.HiddenInput(attrs={'value' : payment_choices[0][0]})
else:
self.fields['paymentmethod'].widget = forms.RadioSelect(attrs={'value' : payment_choices[0][0]})
self.fields['paymentmethod'].choices = payment_choices
class PaymentContactInfoForm(ContactInfoForm, PaymentMethodForm):
def __init__(self, *args, **kwargs):
super(PaymentContactInfoForm, self).__init__(*args, **kwargs)
signals.payment_form_init.send(PaymentContactInfoForm, form=self)
def save(self, *args, **kwargs):
contactid = super(PaymentContactInfoForm, self).save(*args, **kwargs)
signals.form_save.send(PaymentContactInfoForm, form=self)
return contactid
class SimplePayShipForm(forms.Form):
shipping = forms.ChoiceField(widget=forms.RadioSelect(), required=False)
discount = forms.CharField(max_length=30, required=False)
def __init__(self, request, paymentmodule, *args, **kwargs):
super(SimplePayShipForm, self).__init__(*args, **kwargs)
self.order = None
self.orderpayment = None
try:
self.tempCart = Cart.objects.from_request(request)
if self.tempCart.numItems > 0:
products = [item.product for item in self.tempCart.cartitem_set.all()]
sale = find_best_auto_discount(products)
if sale:
self.fields['discount'].initial = sale.code
except Cart.DoesNotExist:
self.tempCart = None
try:
self.tempContact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
self.tempContact = None
if kwargs.has_key('default_view_tax'):
default_view_tax = kwargs['default_view_tax']
else:
default_view_tax = config_value('TAX', 'TAX_SHIPPING')
shipping_choices, shipping_dict = _get_shipping_choices(request, paymentmodule, self.tempCart, self.tempContact, default_view_tax=default_view_tax)
self.fields['shipping'].choices = shipping_choices
self.shipping_dict = shipping_dict
signals.payment_form_init.send(SimplePayShipForm, form=self)
def clean_shipping(self):
shipping = self.cleaned_data['shipping']
if not shipping and self.tempCart.is_shippable:
raise forms.ValidationError(_('This field is required.'))
return shipping
def clean_discount(self):
""" Check if discount exists and is valid. """
data = self.cleaned_data['discount']
if data:
try:
discount = Discount.objects.get(code=data, active=True)
except Discount.DoesNotExist:
raise forms.ValidationError(_('Invalid discount.'))
valid, msg = discount.isValid(self.tempCart)
if not valid:
raise forms.ValidationError(msg)
# TODO: validate that it can work with these products
return data
def save(self, request, cart, contact, payment_module):
self.order = get_or_create_order(request, cart, contact, self.cleaned_data)
self.orderpayment = create_pending_payment(self.order, payment_module)
signals.form_save.send(SimplePayShipForm, form=self)
class CreditPayShipForm(SimplePayShipForm):
credit_type = forms.ChoiceField()
credit_number = forms.CharField(max_length=20)
month_expires = forms.ChoiceField(choices=MONTHS)
year_expires = forms.ChoiceField()
ccv = forms.CharField(max_length=4, label='Sec code')
def __init__(self, request, paymentmodule, *args, **kwargs):
creditchoices = paymentmodule.CREDITCHOICES.choice_values
super(CreditPayShipForm, self).__init__(request, paymentmodule, *args, **kwargs)
self.cc = None
self.fields['credit_type'].choices = creditchoices
year_now = datetime.date.today().year
self.fields['year_expires'].choices = [(year, year) for year in range(year_now, year_now+6)]
self.tempCart = Cart.objects.from_request(request)
try:
self.tempContact = Contact.objects.from_request(request)
except Contact.DoesNotExist:
self.tempContact = None
#shipping_choices, shipping_dict = _get_shipping_choices(paymentmodule, self.tempCart, self.tempContact)
#self.fields['shipping'].choices = shipping_choices
#self.shipping_dict = shipping_dict
def clean_credit_number(self):
""" Check if credit card is valid. """
credit_number = self.cleaned_data['credit_number']
card = CreditCard(credit_number, self.cleaned_data['credit_type'])
results, msg = card.verifyCardTypeandNumber()
if not results:
raise forms.ValidationError(msg)
return credit_number
def clean_month_expires(self):
return int(self.cleaned_data['month_expires'])
def clean_year_expires(self):
""" Check if credit card has expired. """
month = self.cleaned_data['month_expires']
year = int(self.cleaned_data['year_expires'])
max_day = calendar.monthrange(year, month)[1]
if datetime.date.today() > datetime.date(year=year, month=month, day=max_day):
raise forms.ValidationError(_('Your card has expired.'))
return year
def clean_ccv(self):
""" Validate a proper CCV is entered. Remember it can have a leading 0 so don't convert to int and return it"""
try:
check = int(self.cleaned_data['ccv'])
return self.cleaned_data['ccv']
except ValueError:
raise forms.ValidationError(_('Invalid ccv.'))
def save(self, request, cart, contact, payment_module):
"""Save the order and the credit card information for this orderpayment"""
super(CreditPayShipForm, self).save(request, cart, contact, payment_module)
data = self.cleaned_data
cc = CreditCardDetail(orderpayment=self.orderpayment,
expire_month=data['month_expires'],
expire_year=data['year_expires'],
credit_type=data['credit_type'])
cc.storeCC(data['credit_number'])
cc.save()
# set ccv into cache
cc.ccv = data['ccv']
self.cc = cc
signals.form_save.send(CreditPayShipForm, form=self)
|
|
# coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Model specification for SimCLR."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import data_util as data_util
import model_util as model_util
import objective as obj_lib
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
FLAGS = flags.FLAGS
def build_model_fn(model, num_classes, num_train_examples):
"""Build model function."""
def model_fn(features, labels, mode, params=None):
"""Build model and optimizer."""
is_training = mode == tf.estimator.ModeKeys.TRAIN
# Check training mode.
if FLAGS.train_mode == 'pretrain':
num_transforms = 2
if FLAGS.fine_tune_after_block > -1:
raise ValueError('Does not support layer freezing during pretraining,'
'should set fine_tune_after_block<=-1 for safety.')
elif FLAGS.train_mode == 'finetune':
num_transforms = 1
else:
raise ValueError('Unknown train_mode {}'.format(FLAGS.train_mode))
# Split channels, and optionally apply extra batched augmentation.
features_list = tf.split(
features, num_or_size_splits=num_transforms, axis=-1)
if FLAGS.use_blur and is_training and FLAGS.train_mode == 'pretrain':
features_list = data_util.batch_random_blur(
features_list, FLAGS.image_size, FLAGS.image_size)
features = tf.concat(features_list, 0) # (num_transforms * bsz, h, w, c)
# Base network forward pass.
with tf.variable_scope('base_model'):
if FLAGS.train_mode == 'finetune' and FLAGS.fine_tune_after_block >= 4:
# Finetune just supervised (linear) head will not update BN stats.
model_train_mode = False
else:
# Pretrain or finetune anything else will update BN stats.
model_train_mode = is_training
hiddens = model(features, is_training=model_train_mode)
# Add head and loss.
if FLAGS.train_mode == 'pretrain':
tpu_context = params['context'] if 'context' in params else None
hiddens_proj = model_util.projection_head(hiddens, is_training)
contrast_loss, logits_con, labels_con = obj_lib.add_contrastive_loss(
hiddens_proj,
hidden_norm=FLAGS.hidden_norm,
temperature=FLAGS.temperature,
tpu_context=tpu_context if is_training else None)
logits_sup = tf.zeros([params['batch_size'], num_classes])
else:
contrast_loss = tf.zeros([])
logits_con = tf.zeros([params['batch_size'], 10])
labels_con = tf.zeros([params['batch_size'], 10])
hiddens = model_util.projection_head(hiddens, is_training)
logits_sup = model_util.supervised_head(
hiddens, num_classes, is_training)
obj_lib.add_supervised_loss(
labels=labels['labels'],
logits=logits_sup,
weights=labels['mask'])
# Add weight decay to loss, for non-LARS optimizers.
model_util.add_weight_decay(adjust_per_optimizer=True)
loss = tf.losses.get_total_loss()
if FLAGS.train_mode == 'pretrain':
variables_to_train = tf.trainable_variables()
else:
collection_prefix = 'trainable_variables_inblock_'
variables_to_train = []
for j in range(FLAGS.fine_tune_after_block + 1, 6):
variables_to_train += tf.get_collection(collection_prefix + str(j))
assert variables_to_train, 'variables_to_train shouldn\'t be empty!'
tf.logging.info('===============Variables to train (begin)===============')
tf.logging.info(variables_to_train)
tf.logging.info('================Variables to train (end)================')
learning_rate = model_util.learning_rate_schedule(
FLAGS.learning_rate, num_train_examples)
if is_training:
if FLAGS.train_summary_steps > 0:
# Compute stats for the summary.
prob_con = tf.nn.softmax(logits_con)
entropy_con = - tf.reduce_mean(
tf.reduce_sum(prob_con * tf.math.log(prob_con + 1e-8), -1))
summary_writer = tf2.summary.create_file_writer(FLAGS.model_dir)
# TODO(iamtingchen): remove this control_dependencies in the future.
with tf.control_dependencies([summary_writer.init()]):
with summary_writer.as_default():
should_record = tf.math.equal(
tf.math.floormod(tf.train.get_global_step(),
FLAGS.train_summary_steps), 0)
with tf2.summary.record_if(should_record):
contrast_acc = tf.equal(
tf.argmax(labels_con, 1), tf.argmax(logits_con, axis=1))
contrast_acc = tf.reduce_mean(tf.cast(contrast_acc, tf.float32))
label_acc = tf.equal(
tf.argmax(labels['labels'], 1), tf.argmax(logits_sup, axis=1))
label_acc = tf.reduce_mean(tf.cast(label_acc, tf.float32))
tf2.summary.scalar(
'train_contrast_loss',
contrast_loss,
step=tf.train.get_global_step())
tf2.summary.scalar(
'train_contrast_acc',
contrast_acc,
step=tf.train.get_global_step())
tf2.summary.scalar(
'train_label_accuracy',
label_acc,
step=tf.train.get_global_step())
tf2.summary.scalar(
'contrast_entropy',
entropy_con,
step=tf.train.get_global_step())
tf2.summary.scalar(
'learning_rate', learning_rate,
step=tf.train.get_global_step())
optimizer = model_util.get_optimizer(learning_rate)
control_deps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if FLAGS.train_summary_steps > 0:
control_deps.extend(tf.summary.all_v2_summary_ops())
with tf.control_dependencies(control_deps):
train_op = optimizer.minimize(
loss, global_step=tf.train.get_or_create_global_step(),
var_list=variables_to_train)
if FLAGS.checkpoint:
def scaffold_fn():
"""Scaffold function to restore non-logits vars from checkpoint."""
tf.train.init_from_checkpoint(
FLAGS.checkpoint,
{v.op.name: v.op.name
for v in tf.global_variables(FLAGS.variable_schema)})
if FLAGS.zero_init_logits_layer:
# Init op that initializes output layer parameters to zeros.
output_layer_parameters = [
var for var in tf.trainable_variables() if var.name.startswith(
'head_supervised')]
tf.logging.info('Initializing output layer parameters %s to zero',
[x.op.name for x in output_layer_parameters])
with tf.control_dependencies([tf.global_variables_initializer()]):
init_op = tf.group([
tf.assign(x, tf.zeros_like(x))
for x in output_layer_parameters])
return tf.train.Scaffold(init_op=init_op)
else:
return tf.train.Scaffold()
else:
scaffold_fn = None
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode, train_op=train_op, loss=loss, scaffold_fn=scaffold_fn)
else:
def metric_fn(logits_sup, labels_sup, logits_con, labels_con, mask,
**kws):
"""Inner metric function."""
metrics = {k: tf.metrics.mean(v, weights=mask)
for k, v in kws.items()}
metrics['label_top_1_accuracy'] = tf.metrics.accuracy(
tf.argmax(labels_sup, 1), tf.argmax(logits_sup, axis=1),
weights=mask)
metrics['label_top_5_accuracy'] = tf.metrics.recall_at_k(
tf.argmax(labels_sup, 1), logits_sup, k=5, weights=mask)
metrics['contrastive_top_1_accuracy'] = tf.metrics.accuracy(
tf.argmax(labels_con, 1), tf.argmax(logits_con, axis=1),
weights=mask)
metrics['contrastive_top_5_accuracy'] = tf.metrics.recall_at_k(
tf.argmax(labels_con, 1), logits_con, k=5, weights=mask)
return metrics
metrics = {
'logits_sup': logits_sup,
'labels_sup': labels['labels'],
'logits_con': logits_con,
'labels_con': labels_con,
'mask': labels['mask'],
'contrast_loss': tf.fill((params['batch_size'],), contrast_loss),
'regularization_loss': tf.fill((params['batch_size'],),
tf.losses.get_regularization_loss()),
}
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, metrics),
scaffold_fn=None)
return model_fn
|
|
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# GraphCutSegmentSelfTest
#
class GraphCutSegmentSelfTest(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "GraphCutSegmentSelfTest" # TODO make this more human readable by adding spaces
self.parent.categories = ["Testing.TestCases"]
self.parent.dependencies = []
self.parent.contributors = ["Danfeng Chen (Western University)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# GraphCutSegmentSelfTestWidget
#
class GraphCutSegmentSelfTestWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# GraphCutSegmentSelfTestLogic
#
class GraphCutSegmentSelfTestLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def hasImageData(self,volumeNode):
"""This is an example logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
logging.debug('hasImageData failed: no volume node')
return False
if volumeNode.GetImageData() == None:
logging.debug('hasImageData failed: no image data in volume node')
return False
return True
def isValidInputOutputData(self, inputVolumeNode, outputVolumeNode):
"""Validates if the output is not the same as input
"""
if not inputVolumeNode:
logging.debug('isValidInputOutputData failed: no input volume node defined')
return False
if not outputVolumeNode:
logging.debug('isValidInputOutputData failed: no output volume node defined')
return False
if inputVolumeNode.GetID()==outputVolumeNode.GetID():
logging.debug('isValidInputOutputData failed: input and output volume is the same. Create a new volume for output to avoid this error.')
return False
return True
def takeScreenshot(self,name,description,type=-1):
# show the message even if not taking a screen shot
slicer.util.delayDisplay('Take screenshot: '+description+'.\nResult is available in the Annotations module.', 3000)
lm = slicer.app.layoutManager()
# switch on the type to get the requested window
widget = 0
if type == slicer.qMRMLScreenShotDialog.FullLayout:
# full layout
widget = lm.viewport()
elif type == slicer.qMRMLScreenShotDialog.ThreeD:
# just the 3D window
widget = lm.threeDWidget(0).threeDView()
elif type == slicer.qMRMLScreenShotDialog.Red:
# red slice window
widget = lm.sliceWidget("Red")
elif type == slicer.qMRMLScreenShotDialog.Yellow:
# yellow slice window
widget = lm.sliceWidget("Yellow")
elif type == slicer.qMRMLScreenShotDialog.Green:
# green slice window
widget = lm.sliceWidget("Green")
else:
# default to using the full window
widget = slicer.util.mainWindow()
# reset the type so that the node is set correctly
type = slicer.qMRMLScreenShotDialog.FullLayout
# grab and convert to vtk image data
qpixMap = qt.QPixmap().grabWidget(widget)
qimage = qpixMap.toImage()
imageData = vtk.vtkImageData()
slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)
annotationLogic = slicer.modules.annotations.logic()
annotationLogic.CreateSnapShot(name, description, type, 1, imageData)
def run(self, inputVolume, outputVolume, imageThreshold, enableScreenshots=0):
"""
Run the actual algorithm
"""
if not self.isValidInputOutputData(inputVolume, outputVolume):
slicer.util.errorDisplay('Input volume is the same as output volume. Choose a different output volume.')
return False
logging.info('Processing started')
# Compute the thresholded output volume using the Threshold Scalar Volume CLI module
cliParams = {'InputVolume': inputVolume.GetID(), 'OutputVolume': outputVolume.GetID(), 'ThresholdValue' : imageThreshold, 'ThresholdType' : 'Above'}
cliNode = slicer.cli.run(slicer.modules.thresholdscalarvolume, None, cliParams, wait_for_completion=True)
# Capture screenshot
if enableScreenshots:
self.takeScreenshot('GraphCutSegmentSelfTestTest-Start','MyScreenshot',-1)
logging.info('Processing completed')
return True
class GraphCutSegmentSelfTestTest(ScriptedLoadableModuleLogic):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_GraphCutSegmentSelfTest1()
def test_GraphCutSegmentSelfTest1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
#
# first, get some data
#
# import urllib
# downloads = (('http://www.slicer.org/slicerWiki/images/5/59/RegLib_C01_1.nrrd', 'Tumor.nrrd', slicer.util.loadVolume),)
#export http_proxy=http://proxyhost:proxyport
# for url,name,loader in downloads:
# filePath = slicer.app.temporaryPath + '/' + name
# if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
# logging.info('Requesting download %s from %s...\n' % (name, url))
# urllib.urlretrieve(url, filePath)
# if loader:
# logging.info('Loading %s...' % (name,))
# loader(filePath)
# self.delayDisplay('Finished with download and loading')
self.delayDisplay("Starting the test")
mainWindow = slicer.util.mainWindow()
mainWindow.moduleSelector().selectModule('GraphCutInteravtiveSegmenter')
volumeNode = self.downloadMRHead()
# slicer.util.loadVolume(slicer.app.slicerHome+"/RegLib_C01_1.nrrd")
# volumeNode = slicer.util.getNode('RegLib_C01_1')
cropVolumeNode = slicer.vtkMRMLCropVolumeParametersNode()
cropVolumeNode.SetScene(slicer.mrmlScene)
slicer.mrmlScene.AddNode(cropVolumeNode)
fiducial = slicer.vtkMRMLMarkupsFiducialNode()
fiducial.SetScene(slicer.mrmlScene)
fiducial.AddFiducial(-1.8,31.6,11.9)
fiducial.AddFiducial(15.7,45.0,30.1)
fiducial.AddFiducial(-18.5,12.0,30.1)
fiducial.AddFiducial(-2.6,28.7,45.5)
slicer.mrmlScene.AddNode(fiducial)
graphCutLogic = slicer.modules.graphcutinteractivesegmenter.logic()
graphCutLogic.checkMarkups(volumeNode,fiducial)
graphCutLogic.crop(volumeNode,cropVolumeNode)
graphCutLogic.apply(slicer.mrmlScene.GetNodeByID(cropVolumeNode.GetOutputVolumeNodeID()),True,True)
self.delayDisplay('Test passed!')
def downloadMRHead(self):
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
self.delayDisplay('Getting MR Head Volume')
mrHeadVolume = sampleDataLogic.downloadMRBrainTumor1()
return mrHeadVolume
|
|
#===============================================================================
# Copyright 2007 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
This module contains classes that allow reading from an index.
"""
from bisect import bisect_right
from heapq import heapify, heapreplace, heappop, nlargest
from whoosh.fields import UnknownFieldError
from whoosh.util import ClosableMixin
from whoosh.postings import MultiPostingReader
# Exceptions
class TermNotFound(Exception):
pass
# Base class
class IndexReader(ClosableMixin):
"""Do not instantiate this object directly. Instead use Index.reader().
"""
def __contains__(self, term):
"""Returns True if the given term tuple (fieldid, text) is
in this reader.
"""
raise NotImplementedError
def close(self):
"""Closes the open files associated with this reader.
"""
raise NotImplementedError
def has_deletions(self):
"""Returns True if the underlying index/segment has deleted
documents.
"""
raise NotImplementedError
def is_deleted(self, docnum):
"""Returns True if the given document number is marked deleted.
"""
raise NotImplementedError
def stored_fields(self, docnum):
"""Returns the stored fields for the given document number.
"""
raise NotImplementedError
def all_stored_fields(self):
"""Yields the stored fields for all documents.
"""
raise NotImplementedError
def doc_count_all(self):
"""Returns the total number of documents, DELETED OR UNDELETED,
in this reader.
"""
raise NotImplementedError
def doc_count(self):
"""Returns the total number of UNDELETED documents in this reader.
"""
raise NotImplementedError
def scorable(self, fieldid):
"""Returns true if the given field stores field lengths.
"""
return self.schema[fieldid].scorable
def fieldname_to_num(self, fieldname):
return self.schema.name_to_number(fieldname)
def field_length(self, fieldid):
"""Returns the total number of terms in the given field. This is used
by some scoring algorithms.
"""
raise NotImplementedError
def doc_field_length(self, docnum, fieldid):
"""Returns the number of terms in the given field in the
given document. This is used by some scoring algorithms.
"""
raise NotImplementedError
def doc_field_lengths(self, docnum):
"""Returns an array corresponding to the lengths of the
scorable fields in the given document. It's up to the
caller to correlate the positions of the numbers in the
array with the scorable fields in the schema.
"""
raise NotImplementedError
def has_vector(self, docnum, fieldid):
"""Returns True if the given document has a term vector for
the given field.
"""
raise NotImplementedError
def postings(self, fieldid, text, exclude_docs = None):
"""Returns a :class:`~whoosh.postings.PostingReader` for the postings
of the given term.
>>> pr = searcher.postings("content", "render")
>>> pr.skip_to(10)
>>> pr.id
12
:param fieldid: the field name or field number of the term.
:param text: the text of the term.
:exclude_docs: an optional BitVector of documents to exclude from the
results, or None to not exclude any documents.
:rtype: :class:`whoosh.postings.PostingReader`
"""
raise NotImplementedError
def vector(self, docnum, fieldid):
"""Returns a :class:`~whoosh.postings.PostingReader` object for the given
term vector.
>>> docnum = searcher.document_number(path=u'/a/b/c')
>>> v = searcher.vector(docnum, "content")
>>> v.all_as("frequency")
[(u"apple", 3), (u"bear", 2), (u"cab", 2)]
:param docnum: the document number of the document for which you want
the term vector.
:param fieldid: the field name or field number of the field for which
you want the term vector.
:rtype: :class:`whoosh.postings.PostingReader`
"""
raise NotImplementedError
def vector_as(self, astype, docnum, fieldid):
"""Returns an iterator of (termtext, value) pairs for the terms in the
given term vector. This is a convenient shortcut to calling vector()
and using the PostingReader object when all you want are the terms
and/or values.
>>> docnum = searcher.document_number(path=u'/a/b/c')
>>> searcher.vector_as("frequency", docnum, "content")
[(u"apple", 3), (u"bear", 2), (u"cab", 2)]
:param docnum: the document number of the document for which you want
the term vector.
:param fieldid: the field name or field number of the field for which
you want the term vector.
:param astype: a string containing the name of the format you
want the term vector's data in, for example "weights".
"""
vec = self.vector(docnum, fieldid)
return vec.all_as(astype)
def format(self, fieldid):
"""Returns the Format object corresponding to the given field name.
"""
if fieldid in self.schema:
return self.schema[fieldid].format
else:
raise UnknownFieldError(fieldid)
def __iter__(self):
"""Yields (fieldnum, text, docfreq, indexfreq) tuples for
each term in the reader, in lexical order.
"""
raise NotImplementedError
def doc_frequency(self, fieldid, text):
"""Returns how many documents the given term appears in.
"""
raise NotImplementedError
def frequency(self, fieldid, text):
"""Returns the total number of instances of the given term
in the collection.
"""
raise NotImplementedError
def iter_from(self, fieldnum, text):
"""Yields (field_num, text, doc_freq, index_freq) tuples
for all terms in the reader, starting at the given term.
"""
raise NotImplementedError
def expand_prefix(self, fieldid, prefix):
"""Yields terms in the given field that start with the given prefix.
"""
fieldid = self.schema.to_number(fieldid)
for fn, t, _, _ in self.iter_from(fieldid, prefix):
if fn != fieldid or not t.startswith(prefix):
return
yield t
def all_terms(self):
"""Yields (fieldname, text) tuples for every term in the index.
"""
num2name = self.schema.number_to_name
current_fieldnum = None
current_fieldname = None
for fn, t, _, _ in self:
# Only call self.schema.number_to_name when the
# field number changes.
if fn != current_fieldnum:
current_fieldnum = fn
current_fieldname = num2name(fn)
yield (current_fieldname, t)
def iter_field(self, fieldid, prefix = ''):
"""Yields (text, doc_freq, index_freq) tuples for all terms
in the given field.
"""
fieldid = self.schema.to_number(fieldid)
for fn, t, docfreq, freq in self.iter_from(fieldid, prefix):
if fn != fieldid:
return
yield t, docfreq, freq
def iter_prefix(self, fieldid, prefix):
"""Yields (field_num, text, doc_freq, index_freq) tuples
for all terms in the given field with a certain prefix.
"""
fieldid = self.schema.to_number(fieldid)
for fn, t, docfreq, colfreq in self.iter_from(fieldid, prefix):
if fn != fieldid or not t.startswith(prefix):
return
yield (t, docfreq, colfreq)
def most_frequent_terms(self, fieldid, number=5, prefix=''):
"""Returns the top 'number' most frequent terms in the given field as
a list of (frequency, text) tuples.
"""
return nlargest(number, ((tf, token)
for token, _, tf
in self.iter_prefix(fieldid, prefix)))
def most_distinctive_terms(self, fieldid, number=5, prefix=None):
"""Returns the top 'number' terms with the highest ``tf*idf``
scores as a list of (score, text) tuples.
"""
return nlargest(number, ((tf * (1.0/df), token)
for token, df, tf
in self.iter_prefix(fieldid, prefix)))
def lexicon(self, fieldid):
"""Yields all terms in the given field."""
for t, _, _ in self.iter_field(fieldid):
yield t
# Multisegment reader class
class MultiReader(IndexReader):
"""Do not instantiate this object directly. Instead use Index.reader().
"""
def __init__(self, readers, doc_offsets, schema):
self.readers = readers
self.doc_offsets = doc_offsets
self.schema = schema
self._scorable_fields = self.schema.scorable_fields()
self.is_closed = False
def __contains__(self, term):
return any(r.__contains__(term) for r in self.readers)
def __iter__(self):
return self._merge_iters([iter(r) for r in self.readers])
def has_deletions(self):
return any(r.has_deletions() for r in self.readers)
def is_deleted(self):
segmentnum, segmentdoc = self._segment_and_doc
return self.readers[segmentnum].is_deleted(segmentdoc)
def stored_fields(self, docnum):
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].stored_fields(segmentdoc)
def all_stored_fields(self):
for reader in self.readers:
for result in reader.all_stored_fields():
yield result
def close(self):
"""Closes the open files associated with this reader.
"""
for d in self.readers:
d.close()
self.is_closed = True
def doc_count_all(self):
return sum(dr.doc_count_all() for dr in self.readers)
def doc_count(self):
return sum(dr.doc_count() for dr in self.readers)
def field_length(self, fieldnum):
return sum(dr.field_length(fieldnum) for dr in self.readers)
def doc_field_length(self, docnum, fieldid):
fieldid = self.schema.to_number(fieldid)
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].doc_field_length(segmentdoc, fieldid)
def doc_field_lengths(self, docnum):
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].doc_field_lengths(segmentdoc)
def unique_count(self, docnum):
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].unique_count(segmentdoc)
def _document_segment(self, docnum):
return max(0, bisect_right(self.doc_offsets, docnum) - 1)
def _segment_and_docnum(self, docnum):
segmentnum = self._document_segment(docnum)
offset = self.doc_offsets[segmentnum]
return segmentnum, docnum - offset
def has_vector(self, docnum, fieldid):
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].has_vector(segmentdoc, fieldid)
def postings(self, fieldid, text, exclude_docs = None):
format = self.schema[fieldid].format
postreaders = []
docoffsets = []
for i, r in enumerate(self.readers):
if (fieldid, text) in r:
postreaders.append(r.postings(fieldid, text, exclude_docs=exclude_docs))
docoffsets.append(self.doc_offsets[i])
if not postreaders:
raise TermNotFound(fieldid, text)
else:
return MultiPostingReader(format, postreaders, docoffsets)
def vector(self, docnum, fieldid):
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].vector(segmentdoc, fieldid)
def vector_as(self, astype, docnum, fieldid):
segmentnum, segmentdoc = self._segment_and_docnum(docnum)
return self.readers[segmentnum].vector_as(astype, segmentdoc, fieldid)
def iter_from(self, fieldnum, text):
return self._merge_iters([r.iter_from(fieldnum, text) for r in self.readers])
def doc_frequency(self, fieldnum, text):
return sum(r.doc_frequency(fieldnum, text) for r in self.readers)
def frequency(self, fieldnum, text):
return sum(r.frequency(fieldnum, text) for r in self.readers)
def _merge_iters(self, iterlist):
# Merge-sorts terms coming from a list of
# term iterators (IndexReader.__iter__() or
# IndexReader.iter_from()).
# Fill in the list with the head term from each iterator.
# infos is a list of [headterm, iterator] lists.
current = []
for it in iterlist:
fnum, text, docfreq, termcount = it.next()
current.append((fnum, text, docfreq, termcount, it))
heapify(current)
# Number of active iterators
active = len(current)
while active > 0:
# Peek at the first term in the sorted list
fnum, text = current[0][:2]
docfreq = 0
termcount = 0
# Add together all terms matching the first term in the list.
while current and current[0][0] == fnum and current[0][1] == text:
docfreq += current[0][2]
termcount += current[0][3]
it = current[0][4]
try:
fn, t, df, tc = it.next()
heapreplace(current, (fn, t, df, tc, it))
except StopIteration:
heappop(current)
active -= 1
# Yield the term with the summed doc frequency and term count.
yield (fnum, text, docfreq, termcount)
|
|
import socket
import select
from struct import *
import hashlib
import base64
import json
import gc
import Client
from urllib import quote
#properties:
# - The server will kick out the client if the server detects that the client tampered with the client-side code if the client breaks protocol
# - Any special characters sent to the server will automatically be turned into HTML entities.
class server:
sock = None
clients = {}
deadClients = []
'''
Checks and handles sockets connecting
'''
def __init__(self):
self.startServer()
global server
cycle = 0
while True:
print '[Debug] Heartbeat #' + str(cycle) #for debugging purposes, counts each cycle
gc.collect()
# Handle the dead sockets
if not int(len(self.deadClients)) == 0:
for (i, u) in enumerate(self.deadClients):
if self.deadClients[i] in self.clients:
self.clients[self.deadClients[i]].died()
del self.deadClients[i]
# Add all the clients to the socket list
_select = [self.sock]
for i in self.clients:
_select.append(i)
read, _, error = select.select(_select, [], _select, None)
if self.sock in error:
self.startServer()
continue
for _socket in read:
send = True
# New connection
if _socket == self.sock:
_client, _addr = self.sock.accept()
self.clients[_client] = Client.Client(_client, self)
# Handle all the users
else:
user = self.clients[_socket]
try:
data = _socket.recv(1024)
print '[Debug] length of data: ' + str(len(data))
#check if user has completed the websocket handshake
if not user.handshake_completed:
###websocket handshake
headers = data
headers = self.getSockKey(headers[headers.index(headers[headers.index('Sec-WebSocket-Key: ')+len('Sec-WebSocket-Key: '):]):].split("\r\n")[0])
headers = "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: " + headers + "\r\n\r\n"
user.usersock.send(headers)
data = None
user.handshake_completed = True
except: pass
if not data or len(data) <=1:
#The tested case(s) when this happens:
#1) When a user just completes a handshake
print '[Debug] NO DATA PASSED: PASS'
pass
else:
try:
#NOTE FOR CLIENT REPO: The server will escape certain characters into HTML entities to prevent XSS.
#it will be the client's responsibility to escape the HTML entities SAFELY.
parsed = json.loads(str(self.parseMessage(bytearray(data))))
print '[Debug] '+ str(parsed)
for i in parsed:
#NOTE FOR CLIENT REPO: max message length is 256 characters while the avatar, names, etc. max length is 32 characters long
#if breaks rules, then the server kicks out the user, indicating that the client tampered with client-side code.
#if the user supplies a null user/avatar, then user is kicked
if parsed[i] == None or (len(parsed[i]) > 256 or (i == "message" and len(parsed[i]) > 256)):
print '[Debug] DISCONNECTED CLIENT FOR BREAKING PROTOCOL'
self.kick(_socket)
parsed['type'] = "ERROR"
break
if not (parsed['type'] == "ERROR"):
# Get the name from the user
if parsed['type'] == "join":
onlineUsers = []
#NOTE FOR CLIENT REPO: Client will NEED to parse the following user format in the "message" field...
#ex) "name,alice,bob"
self.clients[_socket].name = parsed['name']
for i in self.clients:
if not i == _socket:
onlineUsers.append(self.clients[i].getName()) data = {}
data['message'] = ",".join(onlineUsers)
data['type'] = 'onSet'
data['name'] = 'System'
data['avi'] = 'n/a'
json_data = json.dumps(data)
self.send_to_client(str(json_data), self.clients[_socket])
dataz = {}
for i in parsed:
ayy = parsed[i]
dataz[i] = quote(ayy.encode('utf-8')) #quote() to escape any HTML entities just incase
json_data = json.dumps(dataz)
print json_data
self.broadcast(str(json_data))
except ValueError:
#The cases where this will activate:
#1) unformatted JSON packets are sent
#2) a user disconnects
self.kick(_socket)
cycle +=1 #debugging purposes
'''
Kicks a user off the server
'''
def kick(self,c):
tempname = self.clients[c].getName()
self.clients[c].died()
self.broadcast(str(json.dumps({'message':tempname,
'type':'leave',
'name':'System',
'avi':'profile.png'})))
'''
Sends a message to a specific user
takes in a client handler and a message
'''
def send_to_client(self,msg,client):
for j in self.pack_message(msg):
if isinstance(j, (int, long)):
client.usersock.send(chr(j))
else:
client.usersock.send(j)
'''
Initialize all the socket stuff here
'''
def startServer(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.sock.bind(('0.0.0.0', 1337))
self.sock.listen(True)
'''
Used to encode the websocket key to complete the handshake
'''
def getSockKey(self,key):
MAGIC = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
m = hashlib.sha1(key+MAGIC)
return base64.b64encode(m.digest())
'''
Broadcasts a message to every single client
'''
def broadcast(self, msg):
for i in self.clients:
for j in self.pack_message(msg):
if isinstance(j, (int, long)):
i.send(chr(j))
else:
i.send(j)
'''
Encode any messages to be sent to the client complying with websocket standards
'''
def pack_message(self,msg):
payload = []
payload.append(129)
if(len(msg) < 126):
payload.append(len(msg))
elif len(msg) >= 126:
payload.append((126))
payload.append(pack("!H",len(msg)))
payload.append(msg)
return payload
'''
Unmasks any messages sent from the client to the server to be read
'''
def parseMessage(self,msg):
startIndex = 2
if (msg[1]&127) == 126:
startIndex = 4
elif (msg[1]&127) == 127:
startIndex = 10
ns = msg[startIndex:startIndex+4]
a = 0
decoded = bytearray()
for i in range(startIndex+4,len(msg)):
msg[i]
ns[a%4]
decoded.append(msg[i]^ns[a%4])
a=a+1
return decoded
|
|
# -*- coding: utf-8 -*-
"""All classes and routines related to files."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import os.path
import re
import jinja2
import jinja2.meta
import six
import scd.utils
try:
from collections.abc import Hashable
except Exception as exc:
from collections import Hashable
DEFAULT_REPLACEMENTS = {
"base": "{{ base }}",
"full": "{{ full }}"
}
"""A mapping of default replacements."""
@six.python_2_unicode_compatible
class SearchReplace(Hashable):
"""Class, which presents a pair of single search and replacement.
:param regexp search: Search regular expression.
:param replace: Replacement template
:type replace: :py:class:`jinja2.Template`
"""
__slots__ = "search", "replace"
@staticmethod
@scd.utils.lru_cache()
def get_replacement(replace, version):
"""Return rendered template, taken context from version.
:param replace: Template for replacement.
:type replace: :py:class:`jinja2.Template`
:param version: Version instance, where template takes context.
:type version: :py:class:`scd.version.Version`
:return: Rendered template, ready to insert.
:raises ValueError: if there is no enough context to render template.
:rtype: str
"""
context = version.context
missed_names = replace.required_vars - set(context)
if missed_names:
logging.error("Cannot find replacement vars %s",
sorted(missed_names))
raise ValueError("Cannot find replacement vars")
return replace.render(**context)
def __init__(self, search, replace):
self.search = search
self.replace = replace
def __str__(self):
return (
"<{0.__class__.__name__}(search={0.search.pattern!r}, "
"replace={0.replace!r})>").format(self)
__repr__ = __str__
def __hash__(self):
return hash("|".join(
[str(hash(self.search)), str(hash(self.replace))]))
def process(self, version, text):
"""Process text according to given version.
This does what is expected: search in text (as a rule, line from
file) and inserts replacement where required.
:param version: Version instance to use.
:type version: :py:class:`scd.version.Version`
:param str text: Text to process.
:return: Processed line, after inserting replacement if needed.
Return original line otherwise.
:rtype: str
"""
replacement = self.get_replacement(self.replace, version)
modified_text = self.search.sub(replacement, text)
if text != modified_text:
logging.info("Modify %r to %r",
text.strip(), modified_text.strip())
return modified_text
@six.python_2_unicode_compatible
class File(Hashable):
"""This is a wrapper for a file on FS which should be managed by scd.
The same story as for :py:class:`scd.config.Config`: this wrapper is
used for purposes of conveience mostly. Also, it is required when
one need to emit a list of :py:class:`SearchReplace` instances for a
file.
:param str name: The name of the file from config (as is, not absolute
one)
:param config: Instance of used config.
:type config: :py:class:`scd.config.Config`
:param list data: A contents of search/replacement parts of the
config.
"""
__slots__ = "name", "config", "data"
def __init__(self, name, data, config):
self.name = name
self.config = config
self.data = data
def __hash__(self):
return hash(self.path)
def __str__(self):
return (
"<{0.__class__.__name__}(filename={0.filename!r}, "
"path={0.path!r}, patterns={0.patterns})>").format(self)
__repr__ = __str__
@property
def filename(self):
r"""Relative filename of the file.
The most cool part about this property is that such
name is platform independent: on Windows it might be
:file:`docs\conf.py`, on Linux: :file:`docs/conf.py`. That cool.
:return: Native platform filename
:rtype: str
"""
return os.path.join(*self.name.split("/"))
@property
def path(self):
"""Absolute path to the file for current platform.
:return: Native platform absolute path.
:rtype: str
"""
return os.path.join(self.config.project_directory, self.filename)
@property
def default_replacements(self):
"""Mapping of default replacements for a file.
Key is the name of the replacement, value is an instance of
:py:class:`jinja2.Template`.
:return: Mapping of replacements.
:rtype: dict[str, str]
"""
return {k: make_template(v) for k, v in DEFAULT_REPLACEMENTS.items()}
@property
def all_replacements(self):
"""Mapping of all known replacements for a file.
This mapping includes default replacements and those, defined
in config file.
Key is the name of the replacement, value is an instance of
:py:class:`jinja2.Template`.
:return: Mapping of replacements.
:rtype: dict[str, str]
"""
replacements = self.default_replacements.copy()
replacements.update(
(k, make_template(v))
for k, v in self.config.replacement_patterns.items())
return replacements
@property
def default_search_patterns(self):
"""Mapping of default search patterns for a file.
Key is the name of the replacement, value is compiled regular
expression.
:return: Mapping of patterns.
:rtype: dict[str, str]
"""
return {
name: make_pattern("{{ %s }}" % name, self.config)
for name in scd.utils.get_version_plugins()
}
@property
def all_search_patterns(self):
"""Mapping of all search patterns for a file.
This mapping includes default patterns and those, defined
in config file.
Key is the name of the replacement, value is compiled regular
expression.
:return: Mapping of patterns.
:rtype: dict[str, str]
"""
patterns = self.default_search_patterns.copy()
patterns.update(
(k, make_pattern(v, self.config))
for k, v in self.config.search_patterns.items())
return patterns
@property
def default_search_pattern(self):
"""Property, returns default search pattern from config.
:return: Default search pattern
:rtype: Regular expression
"""
return self.all_search_patterns[self.config.defaults["search"]]
@property
def default_replace_pattern(self):
"""Property, returns default replacement template from config.
:return: Default replacement pattern
:rtype: :py:class:`jinja2.Template`
"""
return self.all_replacements[self.config.defaults["replacement"]]
@property
def patterns(self):
"""A list of search/replacements for a file, based on config.
:return: List of instances for file management.
:rtype: list[:py:class:`SearchReplace`]
"""
patterns = []
for item in self.data:
if item == "default":
patterns.append(SearchReplace(
self.default_search_pattern,
self.default_replace_pattern))
continue
if "search_raw" in item:
search_pattern = make_pattern(item["search_raw"], self.config)
elif "search" in item:
search_pattern = self.all_search_patterns[item["search"]]
else:
search_pattern = self.default_search_pattern
if "replace_raw" in item:
replacement_pattern = make_template(item["replace_raw"])
elif "replace" in item:
replacement_pattern = self.all_replacements[item["replace"]]
else:
replacement_pattern = self.default_replace_pattern
patterns.append(SearchReplace(search_pattern, replacement_pattern))
return patterns
@scd.utils.lru_cache()
def make_template(template):
"""Function for creating template instance from text template.
:param str template: Text template to process.
:return: Correct template instance, based on given text.
:rtype: :py:class:`jinja2.Template`
"""
tpl = jinja2.Template(template)
tpl.required_vars = jinja2.meta.find_undeclared_variables(
tpl.environment.parse(template))
return tpl
@scd.utils.lru_cache()
def make_pattern(base_pattern, config):
"""Function, which creates regular expression based on given pattern.
Also, it injects all predefined search regexps like ``pep440`` etc.
:param str base_pattern: Pattern to transform to regular expression
instance.
:return: Regular expression pattern
:rtype: regexp
:raises ValueError: if pattern cannot be parsed.
"""
patterns = config.extra_context.copy()
for name, data in scd.utils.get_version_plugins().items():
if not hasattr(data, "REGEXP"):
logging.warning("Plugin %s has no regexp, skip.")
continue
if not hasattr(data.REGEXP, "pattern"):
logging.warning("Plugin %s regexp is not a pattern, skip.")
continue
patterns[name] = data.REGEXP.pattern
pattern = make_template(base_pattern)
missed_names = pattern.required_vars - set(patterns)
if missed_names:
logging.error("Cannot find required names %s in pattern",
sorted(missed_names))
raise ValueError("Missed pattern names")
pattern = pattern.render(**patterns)
try:
pattern = re.compile(pattern, re.VERBOSE | re.UNICODE)
except Exception as exc:
logging.error("Base pattern: %s, replaced %s, error: %s",
base_pattern, pattern, exc)
raise ValueError("Cannot parse pattern {0}".format(base_pattern))
return pattern
def validate_access(files):
"""Function, which validates access to the files.
:param files: A list of files to check
:type files: list[:py:class:`scd.files.File`]
:return: Is all files are accessible or not
:rtype: bool
"""
ok = True
for fileobj in files:
if not os.path.isfile(fileobj.path):
logging.error("Path %s is not a file.", fileobj.path)
ok = False
elif not os.access(fileobj.path, os.R_OK | os.W_OK):
logging.error("File %s is not readable and writable.",
fileobj.path)
ok = False
else:
logging.debug("File %s is ok", fileobj.path)
return ok
|
|
# KVS_test.py 27/05/2016 D.J.Whale
#
# Tester for Key Value Store
import unittest
from lifecycle import *
from KVS import KVS, NotPersistableError
#---- DUMMY TEST CLASSES ------------------------------------------------------
class TV():
def __init__(self, id):
print("Creating TV %s" % id)
self.id = id
def __repr__(self):
return "TV(%s)" % self.id
def get_config(self):
return {
"id": self.id
}
class FACTORY():
@staticmethod
def get(name, **kwargs):
if name == "TV": return TV(**kwargs)
else:
raise ValueError("Unknown device name %s" % name)
#----- FILE HELPERS -----------------------------------------------------------
#TODO: This is repeated in Registry_test.py
def remove_file(filename):
import os
try:
os.unlink(filename)
except OSError:
pass # ignore
def show_file(filename):
"""Show the contents of a file on screen"""
with open(filename) as f:
for l in f.readlines():
l = l.strip() # remove nl
print(l)
def write_file(filename, contents):
with open(filename, "w") as f:
lines = contents.split("\n")
for line in lines:
f.write(line + '\n')
#----- TEST KVS MEMORY --------------------------------------------------------
#
# Test the KVS in-memory only configuration (no persistence to file)
class TestKVSMemory(unittest.TestCase):
@test_1
def test_create_blank(self):
"""Create a blank kvs, not bound to any external file"""
kvs = KVS()
# it should not fall over
@test_1
def test_add(self):
"""Add an object into the kvs store"""
kvs = KVS()
kvs["tv1"] = TV(1)
kvs["tv2"] = TV(2)
print(kvs.store)
@test_1
def test_change(self):
"""Change the value associated with an existing key"""
kvs = KVS()
kvs["tv1"] = TV(1)
kvs["tv1"] = TV(111) # change it
print(kvs.store)
@test_1
def test_get(self):
"""Get the object associated with a key in the store"""
kvs = KVS()
kvs["tv1"] = TV(1)
t = kvs["tv1"]
print(t)
@test_1
def test_delete(self):
"""Delete an existing key in the store, and a missing key for error"""
kvs = KVS()
kvs["tv1"] = TV(1)
del kvs["tv1"]
print(kvs.store)
try:
del kvs["tv1"] # expect error
self.fail("Did not get expected KeyError exception")
except KeyError:
pass # expected
@test_1
def test_size(self):
"""How big is the kvs"""
kvs = KVS()
kvs["tv1"] = TV(1)
print(len(kvs))
kvs["tv2"] = TV(2)
print(len(kvs))
@test_1
def test_keys(self):
"""Get out all keys of the kvs"""
kvs = KVS()
kvs["tv1"] = TV(1)
kvs["tv2"] = TV(2)
kvs["tv3"] = TV(3)
print(kvs.keys())
#----- TEST KVS PERSISTED -----------------------------------------------------
#
# Test the KVS persisted to a file
class TestKVSPersisted(unittest.TestCase):
KVS_FILENAME = "test.kvs"
@test_1
def test_write(self):
"""Write an in memory KVS to a file"""
remove_file(self.KVS_FILENAME)
kvs = KVS()
kvs["tv1"] = TV(1)
kvs.write(self.KVS_FILENAME)
show_file(self.KVS_FILENAME)
@test_1
def test_load_cache(self):
"""Load record from a kvs file into the kvs cache"""
# create a file to test against
remove_file(self.KVS_FILENAME)
kvs = KVS()
kvs["tv1"] = TV(1)
kvs.write(self.KVS_FILENAME)
kvs = KVS() # clear it out again
# load the file
kvs.load(self.KVS_FILENAME)
# check the state of the kvs memory
print(kvs.store)
# check state of the kvs file at end
show_file(self.KVS_FILENAME)
@test_1
def test_add(self):
"""Add a new record to a persisted KVS"""
remove_file(self.KVS_FILENAME)
kvs = KVS(self.KVS_FILENAME)
kvs["tv1"] = TV(1)
print(kvs.store)
show_file(self.KVS_FILENAME)
@test_1
def test_delete(self):
"""Delete an existing key from the persistent version"""
remove_file(self.KVS_FILENAME)
kvs = KVS(self.KVS_FILENAME)
kvs["tv1"] = TV(1)
kvs["tv2"] = TV(2)
kvs["tv3"] = TV(3)
kvs["tv4"] = TV(4)
show_file(self.KVS_FILENAME)
del kvs["tv1"]
@test_1
def test_change(self):
"""Change an existing record in a persisted KVS"""
remove_file(self.KVS_FILENAME)
kvs = KVS(self.KVS_FILENAME)
kvs["tv1"] = TV(1)
show_file(self.KVS_FILENAME)
kvs["tv1"] = TV(2)
show_file(self.KVS_FILENAME)
@test_1
def test_ADD_nofactory(self):
#NOTE: This is an under the bonnet test of parsing an ADD record from the file
# No factory callback provided, use ADD parse action
obj = {
"type": "MIHO005",
"id": 1234
}
kvs = KVS(self.KVS_FILENAME)
kvs.ADD("tv1", obj)
# expected result: object described as a kvp becomes a kvp in the store if no factory callback
print(kvs.store)
@test_1
def test_ADD_factory(self):
#NOTE: This is an under the bonnet test of parsing an ADD record from the file
obj = {
"type": "TV",
"id": 1234
}
kvs = KVS(self.KVS_FILENAME)
kvs.ADD("tv1", obj, create_fn=FACTORY.get)
# expected result: object described as a kvp becomes a configured object instance in store
print(kvs.store)
@test_1
def test_IGN(self):
#NOTE: This is an under the bonnet test of parsing an IGN record from the file
obj = {
"type": "TV",
"id": 1234
}
kvs = KVS(self.KVS_FILENAME)
kvs.IGN("tv1", obj)
# expected result: no change to the in memory data structures
print(kvs.store)
@test_1
def test_DEL(self):
#NOTE: This is an under the bonnet test of parsing a DEL record from the file
#NOTE: This is an under the bonnet test of parsing an IGN record from the file
obj = {
"type": "TV",
"id": 1234
}
kvs = KVS(self.KVS_FILENAME)
kvs.ADD("tv1", obj)
kvs.DEL("tv1", obj)
# expected result: record is deleted from in memory store
print(kvs.store)
try:
kvs.DEL("tv1", obj)
self.fail("Did not get expected KeyError")
except KeyError:
pass # expected
# expected result: error if it was not in the store in the first place
print(kvs.store)
@test_1
def test_load_process(self):
"""Load and process a file with lots of records in it"""
CONTENTS = """\
ADD tv
type=TV
id=1
IGN fan
type=TV
id=2
DEL tv
ADD fridge
type=TV
id=99
"""
write_file(self.KVS_FILENAME, CONTENTS)
kvs = KVS(self.KVS_FILENAME)
kvs.load(create_fn=FACTORY.get)
print(kvs.store)
@test_1
def test_not_persistable(self):
class NPC():
pass
remove_file(self.KVS_FILENAME)
kvs = KVS(self.KVS_FILENAME)
try:
kvs["npc"] = NPC() # should throw NotPersistableError
self.fail("Did not get expected NotPersistableError")
except NotPersistableError:
pass # expected
if __name__ == "__main__":
unittest.main()
# END
|
|
"""Test the nuki config flow."""
from unittest.mock import patch
from pynuki.bridge import InvalidCredentialsException
from requests.exceptions import RequestException
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import dhcp
from homeassistant.components.nuki.const import DOMAIN
from homeassistant.const import CONF_TOKEN
from .mock import HOST, MAC, MOCK_INFO, NAME, setup_nuki_integration
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
return_value=MOCK_INFO,
), patch(
"homeassistant.components.nuki.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == 123456789
assert result2["data"] == {
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
side_effect=InvalidCredentialsException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
side_effect=RequestException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_exception(hass):
"""Test we handle unknown exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_form_already_configured(hass):
"""Test we get the form."""
await setup_nuki_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
return_value=MOCK_INFO,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_dhcp_flow(hass):
"""Test that DHCP discovery for new bridge works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=dhcp.DhcpServiceInfo(hostname=NAME, ip=HOST, macaddress=MAC),
context={"source": config_entries.SOURCE_DHCP},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == config_entries.SOURCE_USER
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
return_value=MOCK_INFO,
), patch(
"homeassistant.components.nuki.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == 123456789
assert result2["data"] == {
"host": "1.1.1.1",
"port": 8080,
"token": "test-token",
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_dhcp_flow_already_configured(hass):
"""Test that DHCP doesn't setup already configured devices."""
await setup_nuki_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=dhcp.DhcpServiceInfo(hostname=NAME, ip=HOST, macaddress=MAC),
context={"source": config_entries.SOURCE_DHCP},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_reauth_success(hass):
"""Test starting a reauthentication flow."""
entry = await setup_nuki_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
return_value=MOCK_INFO,
), patch(
"homeassistant.components.nuki.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: "new-token"},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert entry.data[CONF_TOKEN] == "new-token"
async def test_reauth_invalid_auth(hass):
"""Test starting a reauthentication flow with invalid auth."""
entry = await setup_nuki_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
side_effect=InvalidCredentialsException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: "new-token"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "reauth_confirm"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_cannot_connect(hass):
"""Test starting a reauthentication flow with cannot connect."""
entry = await setup_nuki_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
side_effect=RequestException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: "new-token"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "reauth_confirm"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_reauth_unknown_exception(hass):
"""Test starting a reauthentication flow with an unknown exception."""
entry = await setup_nuki_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}, data=entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
with patch(
"homeassistant.components.nuki.config_flow.NukiBridge.info",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_TOKEN: "new-token"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "reauth_confirm"
assert result2["errors"] == {"base": "unknown"}
|
|
""" Defines the RangeSelection controller class.
"""
# Major library imports
import numpy
# Chaco imports
from range_selection import RangeSelection
class RangeSelection2D(RangeSelection):
""" Selects a range along the index or value axis for plots on 2D data,
such as image plots
The user right-click-drags to select a region, which stays selected until
the user left-clicks to deselect.
"""
#------------------------------------------------------------------------
# Event handlers for the "selected" event state
#------------------------------------------------------------------------
def selected_left_down(self, event):
""" Handles the left mouse button being pressed when the tool is in
the 'selected' state.
If the user is allowed to resize the selection, and the event occurred
within the resize margin of an endpoint, then the tool switches to the
'selecting' state so that the user can resize the selection.
If the event is within the bounds of the selection region, then the tool
switches to the 'moving' states.
Otherwise, the selection becomes deselected.
"""
screen_bounds = self._get_selection_screencoords()
if screen_bounds is None:
self.deselect(event)
return
low = min(screen_bounds)
high = max(screen_bounds)
tmp = (event.x, event.y)
ndx = self._determine_axis()
mouse_coord = tmp[ndx]
if self.enable_resize:
if (abs(mouse_coord - high) <= self.resize_margin) or \
(abs(mouse_coord - low) <= self.resize_margin):
return self.selected_right_down(event)
if tmp[self.axis_index] >= low and tmp[self.axis_index] <= high:
self.event_state = "moving"
self._down_point = numpy.array([event.x, event.y])
self._down_data_coord = self._map_data([self._down_point])[0][self.axis_index]
self._original_selection = numpy.array(self.selection)
elif self.allow_deselection:
self.deselect(event)
else:
# Treat this as a combination deselect + left down
self.deselect(event)
self.normal_left_down(event)
event.handled = True
return
def selected_right_down(self, event):
""" Handles the right mouse button being pressed when the tool is in
the 'selected' state.
If the user is allowed to resize the selection, and the event occurred
within the resize margin of an endpoint, then the tool switches to the
'selecting' state so that the user can resize the selection.
Otherwise, the selection becomes deselected, and a new selection is
started..
"""
if self.enable_resize:
coords = self._get_selection_screencoords()
if coords is not None:
start, end = coords
tmp = (event.x, event.y)
ndx = self._determine_axis()
mouse_coord = tmp[ndx]
# We have to do a little swapping; the "end" point
# is always what gets updated, so if the user
# clicked on the starting point, we have to reverse
# the sense of the selection.
if abs(mouse_coord - end) <= self.resize_margin:
self.event_state = "selecting"
self._drag_edge = "high"
self.selecting_mouse_move(event)
elif abs(mouse_coord - start) <= self.resize_margin:
self.event_state = "selecting"
self._drag_edge = "low"
self.selecting_mouse_move(event)
elif self.allow_deselection:
self.deselect(event)
else:
# Treat this as a combination deselect + right down
self.deselect(event)
self.normal_right_down(event)
else:
# Treat this as a combination deselect + right down
self.deselect(event)
self.normal_right_down(event)
event.handled = True
return
def selected_mouse_move(self, event):
""" Handles the mouse moving when the tool is in the 'selected' state.
If the user is allowed to resize the selection, and the event
occurred within the resize margin of an endpoint, then the cursor
changes to indicate that the selection could be resized.
Otherwise, the cursor is set to an arrow.
"""
if self.enable_resize:
# Change the mouse cursor when the user moves within the resize margin
coords = self._get_selection_screencoords()
if coords is not None:
start, end = coords
tmp = (event.x, event.y)
ndx = self._determine_axis()
mouse_coord = tmp[ndx]
if abs(mouse_coord - end) <= self.resize_margin or \
abs(mouse_coord - start) <= self.resize_margin:
self._set_sizing_cursor(event)
return
event.window.set_pointer("arrow")
event.handled = True
return
#------------------------------------------------------------------------
# Event handlers for the "moving" event state
#------------------------------------------------------------------------
def moving_mouse_move(self, event):
""" Handles the mouse moving when the tool is in the 'moving' state.
Moves the selection range by an amount corresponding to the amount
that the mouse has moved since its button was pressed. If the new
selection range overlaps the endpoints of the data, it is truncated to
that endpoint.
"""
cur_point = numpy.array([event.x, event.y])
cur_data_point = self._map_data([cur_point])[0]
original_selection = self._original_selection
new_selection = original_selection + (cur_data_point[self.axis_index] \
- self._down_data_coord)
selection_data_width = original_selection[1] - original_selection[0]
range = self.mapper.range
range_low = range.low[self.axis_index]
range_high = range.high[self.axis_index]
if min(new_selection) < range_low:
new_selection = (range_low, range_low + selection_data_width)
elif max(new_selection) > range_high:
new_selection = (range_high - selection_data_width, range_high)
self.selection = new_selection
self.selection_completed = new_selection
self.component.request_redraw()
event.handled = True
return
#------------------------------------------------------------------------
# Event handlers for the "normal" event state
#------------------------------------------------------------------------
def normal_right_down(self, event):
""" Handles the right mouse button being pressed when the tool is in
the 'normal' state.
Puts the tool into 'selecting' mode, changes the cursor to show that it
is selecting, and starts defining the selection.
"""
x_pos = self._get_axis_coord(event, "index")
y_pos = self._get_axis_coord(event, "value")
self._down_point = numpy.array([x_pos, y_pos])
mapped_pos = self._map_data([(x_pos,y_pos)])[0][self.axis_index]
self.selection = (mapped_pos, mapped_pos)
self._set_sizing_cursor(event)
self.event_state = "selecting"
self.selecting_mouse_move(event)
return
#------------------------------------------------------------------------
# Event handlers for the "selecting" event state
#------------------------------------------------------------------------
def selecting_mouse_move(self, event):
""" Handles the mouse being moved when the tool is in the 'selecting'
state.
Expands the selection range at the appropriate end, based on the new
mouse position.
"""
if self.selection is not None:
axis_index = self.axis_index
low = self.plot.position[axis_index]
high = low + self.plot.bounds[axis_index] - 1
tmp = self._get_axis_coord(event)
if tmp >= low and tmp <= high:
x_pos = self._get_axis_coord(event, "index")
y_pos = self._get_axis_coord(event, "value")
new_edge = self._map_data([(x_pos,y_pos)])[0][self.axis_index]
if self._drag_edge == "high":
low_val = self.selection[0]
# the selection should be a range consisting of 2 points,
# if it appears that only 1 point is selected, move one
# edge over a pixel
if new_edge == low_val:
new_edge = self._map_data([(x_pos+1,y_pos+1)])[0][self.axis_index]
if new_edge > low_val:
self.selection = (low_val, new_edge)
else:
self.selection = (new_edge, low_val)
self._drag_edge = "low"
else:
high_val = self.selection[1]
# the selection should be a range consisting of 2 points,
# if it appears that only 1 point is selected, move one
# edge over a pixel
if new_edge == high_val:
new_edge = self._map_data([(x_pos-1,y_pos-1)])[0][self.axis_index]
if new_edge < high_val:
self.selection = (new_edge, high_val)
else:
self.selection = (high_val, new_edge)
self._drag_edge = "high"
self.component.request_redraw()
event.handled = True
return
def selecting_mouse_leave(self, event):
""" Handles the mouse leaving the plot when the tool is in the
'selecting' state.
Determines whether the event's position is outside the component's
bounds, and if so, clips the selection. Sets the cursor to an arrow.
"""
axis_index = self.axis_index
low = self.plot.position[axis_index]
high = low + self.plot.bounds[axis_index] - 1
old_selection = self.selection
selection_low = old_selection[0]
selection_high = old_selection[1]
pos = self._get_axis_coord(event)
if pos >= high:
if self.axis == 'index':
selection_high = self._map_data([(high, 0)])[0][self.axis_index]
else:
selection_high = self._map_data([(0, high)])[0][self.axis_index]
elif pos <= low:
if self.axis == 'index':
selection_low = self._map_data([(low, 0)])[0][self.axis_index]
else:
selection_low = self._map_data([(0, low)])[0][self.axis_index]
self.selection = (selection_low, selection_high)
event.window.set_pointer("arrow")
self.component.request_redraw()
return
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _map_data(self, screen_pts):
return self.mapper.map_data(screen_pts)
def _map_screen(self, data_pts):
return self.mapper.map_screen(data_pts)
def _get_selection_screencoords(self):
""" Returns a tuple of (x1, x2) screen space coordinates of the start
and end selection points.
If there is no current selection, then it returns None.
"""
selection = self.selection
if selection is not None and len(selection) == 2:
if self.axis == 'index':
return [x for x,y in self._map_screen([(x,0) for x in self.selection])]
else:
return [y for x,y in self._map_screen([(0,y) for y in self.selection])]
else:
return None
|
|
'''
Python wrapper for the Content Services API
'''
import json
import math
from datetime import datetime
from copy import deepcopy
from wsgiref.handlers import format_date_time
from time import mktime
from cache import NoCache
import utils
import logging
log = logging.getLogger('p2p')
import requests
from .adapters import TribAdapter
def get_connection():
"""
Get a connected p2p object. This function is meant to auto-discover
the settings from your shell environment or from Django.
We'll read these from your shell variables::
export P2P_API_KEY=your_p2p_api_key
export P2P_API_URL=url_of_p2p_endpoint
# Optional
export P2P_API_DEBUG=plz # display an http log
export P2P_IMAGE_SERVICES_URL=url_of_image_services_endpoint
Or those same settings from your Django settings::
P2P_API_KEY = your_p2p_api_key
P2P_API_URL = url_of_p2p_endpoint
P2P_API_DEBUG = plz # display an http log
# Optional
P2P_IMAGE_SERVICES_URL = url_of_image_services_endpoint
If you need to pass in your config, just create a new p2p object.
"""
# Try getting settings from Django
try:
from django.conf import settings
return P2P(
url=settings.P2P_API_URL,
auth_token=settings.P2P_API_KEY,
debug=settings.DEBUG,
image_services_url=getattr(
settings, 'P2P_IMAGE_SERVICES_URL', None)
)
except ImportError:
import os
# Try getting settings from environment variables
if 'P2P_API_KEY' in os.environ and 'P2P_API_URL' in os.environ:
return P2P(
url=os.environ['P2P_API_URL'],
auth_token=os.environ['P2P_API_KEY'],
debug=os.environ.get('P2P_API_DEBUG', False),
image_services_url=os.environ.get(
'P2P_IMAGE_SERVICES_URL', None)
)
raise P2PException("No connection settings available. Please put settings "
"in your environment variables or your Django config")
# API calls
class P2P(object):
"""
Get a connection to the P2P Content Services API::
p2p = P2P(my_p2p_url, my_auth_token)
You can send debug messages to stderr by using the keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True)
A P2P object can cache the API calls you make. Pass a new Cache_
object with the cache keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DictionaryCache())
A DictionaryCache just caches in a python variable. If you're using
Django caching::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DjangoCache())
"""
def __init__(self, url, auth_token,
debug=False, cache=NoCache(),
image_services_url=None,
product_affiliate_code='chinews',
source_code='chicagotribune',
webapp_name='tRibbit'):
self.config = {
'P2P_API_ROOT': url,
'P2P_API_KEY': auth_token,
'IMAGE_SERVICES_URL': image_services_url,
}
self.cache = cache
self.debug = debug
self.product_affiliate_code = product_affiliate_code
self.source_code = source_code
self.webapp_name = webapp_name
self.default_filter = {
'product_affiliate': self.product_affiliate_code,
'state': 'live'
}
self.default_content_item_query = {
'include': ['web_url', 'section', 'related_items', 'content_topics', 'embedded_items'],
'filter': self.default_filter
}
self.content_item_defaults = {
"content_item_type_code": "blurb",
"product_affiliate_code": self.product_affiliate_code,
"source_code": self.source_code,
"content_item_state_code": "live",
}
self.s = requests.Session()
self.s.mount('https://', TribAdapter())
def get_content_item(self, slug, query=None, force_update=False):
"""
Get a single content item by slug.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
Use the parameter `force_update=True` to update the cache for this
item and query.
"""
if not query:
query = self.default_content_item_query
ci = self.cache.get_content_item(slug=slug, query=query)
if ci is None:
j = self.get("/content_items/%s.json" % (slug), query)
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
elif force_update:
j = self.get("/content_items/%s.json" % (slug),
query, if_modified_since=ci['last_modified_time'])
if j:
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
return ci
def get_multi_content_items(self, ids, query=None, force_update=False):
"""
Get a bunch of content items at once. We need to use the content items
ids to use this API call.
The API only allows 25 items to be requested at once, so this function
breaks the list of ids into groups of 25 and makes multiple API calls.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
"""
ret = list()
ids_query = list()
if_modified_since = format_date_time(
mktime(datetime(2000, 1, 1).utctimetuple()))
if not query:
query = self.default_content_item_query
# Pull as many items out of cache as possible
ret = [
self.cache.get_content_item(
id=i, query=query) for i in ids
]
assert len(ids) == len(ret)
# Go through what we had in cache and see if we need to
# retrieve anything
for i in range(len(ret)):
if ret[i] is None:
ids_query.append({
"id": ids[i],
"if_modified_since": if_modified_since,
})
elif force_update:
ids_query.append({
"id": ids[i],
"if_modified_since": format_date_time(
mktime(ret[i]['last_modified_time'].utctimetuple())),
})
if len(ids_query) > 0:
# We can only request 25 things at a time
# so we're gonna break up the list into batches
max_items = 25
# we have to use <gasp>MATH</gasp>
num_items = len(ids_query)
# how many batches of max_items do we have?
num_batches = int(
math.ceil(float(num_items) / float(max_items)))
# make a list of indices where we should break the item list
index_breaks = [j * max_items for j in range(num_batches)]
# break up the items into batches of 25
batches = [ids_query[i:i + max_items] for i in index_breaks]
resp = list()
for items in batches:
multi_query = query.copy()
multi_query['content_items'] = items
resp += self.post_json(
'/content_items/multi.json', multi_query)
new_items = list()
remove_ids = list()
for i in range(len(ret)):
if ret[i] is None or force_update:
new_item = resp.pop(0)
assert ids[i] == new_item['id']
if new_item['status'] == 200:
ret[i] = new_item['body']['content_item']
new_items.append(new_item['body']['content_item'])
elif new_item['status'] == 404:
ret[i] = None
remove_ids.append(ids[i])
elif new_item['status'] == 304:
continue
else:
raise P2PException(
'%(status)s fetching %(id)s' % new_item)
if len(new_items) > 0:
for i in new_items:
self.cache.save_content_item(i, query=query)
try:
if len(remove_ids) > 0:
for i in remove_ids:
self.cache.remove_content_item(id=i)
except NotImplementedError:
pass
return ret
def update_content_item(self, content_item, slug=None):
"""
Update a content item.
Takes a single dictionary representing the content_item to be updated.
Refer to the P2P API docs for the content item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
content = content_item.copy()
if slug is None:
slug = content.pop('slug')
d = {'content_item': content}
resp = self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return resp
def add_topic_to_content_item(self, content_and_topic_data):
"""
Add a topic to a content item.
Takes a single dictionary containing the slug of the content item to be updated and the topic_id to be added, passing topic_id with "add_topic_ids" . See P2P API docs for further reference.
"""
if "slug" in content_and_topic_data.keys() and "topic_id" in content_and_topic_data.keys():
if content_and_topic_data["slug"] and content_and_topic_data["topic_id"]:
d = {'add_topic_ids': content_and_topic_data["topic_id"]}
resp = self.put_json("/content_items/%s.json" % content_and_topic_data["slug"], d)
try:
self.cache.remove_content_item(content_and_topic_data["slug"])
except NotImplementedError:
pass
else:
print "Missing slug or topic_id value"
else:
print "Missing slug or topic_id key"
def remove_topic_from_content_item(self, content_and_topic_data):
"""
Remove a topic from a content item.
Takes a single dictionary containing the slug of the content item to be updated and the topic_id to be removed, passing topic_id with "add_topic_ids" . See P2P API docs for further reference.
"""
if "slug" in content_and_topic_data.keys() and "topic_id" in content_and_topic_data.keys():
if content_and_topic_data["slug"] and content_and_topic_data["topic_id"]:
d = {'remove_topic_ids': content_and_topic_data["topic_id"]}
resp = self.put_json("/content_items/%s.json" % content_and_topic_data["slug"], d)
try:
self.cache.remove_content_item(content_and_topic_data["slug"])
except NotImplementedError:
pass
else:
print "Missing slug or topic_id value"
else:
print "Missing slug or topic_id key"
def create_content_item(self, content_item):
"""
Create a new content item.
Takes a single dictionary representing the new content item.
Refer to the P2P API docs for the content item field names.
"""
content = content_item.copy()
defaults = self.content_item_defaults.copy()
defaults.update(content)
data = {'content_item': defaults}
resp = self.post_json('/content_items.json', data)
return resp
def delete_content_item(self, slug):
"""
Delete the content item out of p2p
"""
result = self.delete(
'/content_items/%s.json' % slug)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return True if "destroyed successfully" in result else False
def create_or_update_content_item(self, content_item):
"""
Attempts to update a content item, if it doesn't exist, attempts to
create it::
create, response = p2p.create_or_update_content_item(item_dict)
TODO: swap the tuple that is returned.
"""
create = False
try:
response = self.update_content_item(content_item)
except P2PException:
response = self.create_content_item(content_item)
create = True
return (create, response)
def junk_content_item(self, slug):
"""
Sets a content item to junk status.
"""
return self.update_content_item({
'slug': slug,
'content_item_state_code': 'junk'
})
def search(self, params):
resp = self.get("/content_items/search.json", params)
return resp
def get_collection(self, code, query=None, force_update=False):
"""
Get the data for this collection. To get the items in a collection,
use get_collection_layout.
"""
if query is None:
query = {'filter': self.default_filter}
if force_update:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
else:
collection = self.cache.get_collection(code, query=query)
if collection is None:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
return collection
def create_collection(self, data):
"""
Create a new collection. Takes a single argument which should be a
dictionary of collection data.
Example:
p2p.create_collection({
'code': 'my_new_collection',
'name': 'My new collection',
'section_path': '/news/local',
// OPTIONAL PARAMS
'collection_type_code': 'misc', # default 'misc'
'last_modified_time': date, # defaults to now
'product_affiliate_code': 'chinews' # default to instance setting
})
"""
ret = self.post_json(
'/collections.json?id=%s' % data['code'],
{
'collection': {
'code': data['code'],
'name': data['name'],
'collection_type_code': data.get('collection_type_code',
'misc'),
'last_modified_time': data.get('last_modified_time',
datetime.utcnow()),
'sequence': 999
},
'product_affiliate_code': data.get(
'product_affiliate_code', self.product_affiliate_code),
'section_path': data['section_path']
})
if 'collection' in ret:
return ret['collection']
else:
raise P2PException(ret)
def delete_collection(self, code):
"""
Delete a collection
"""
ret = self.delete(
'/collections/%s.json' % code)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def push_into_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
ret = self.put_json(
'/collections/prepend.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def suppress_in_collection(
self, code, content_item_slugs, affiliates=[]):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/suppress.json?id=%s' % code,
{'items': [{
'slug': slug, 'affiliates': affiliates
} for slug in content_item_slugs]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def insert_position_in_collection(
self, code, slug, affiliates=[]):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/insert.json?id=%s' % code,
{'items': [{
'slug': slug, 'position': 1
}]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def push_into_content_item(self, slug, content_item_slugs):
"""
Push a list of content item slugs onto the top of the related
items list for a content item
"""
ret = self.put_json(
'/content_items/prepend_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def insert_into_content_item(self, slug, content_item_slugs, position=1):
"""
Insert a list of content item slugs into the related items list for
a content item, starting at the specified position
"""
ret = self.put_json(
'/content_items/insert_related_items.json?id=%s' % slug,
{'items': [{
'slug': content_item_slugs[i], 'position': position + i
} for i in range(len(content_item_slugs))]})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def append_into_content_item(self, slug, content_item_slugs):
"""
Convenience function to append a list of content item slugs to the end
of the related items list for a content item
"""
ci = self.get_content_item(slug)
ret = self.insert_into_content_item(
slug, content_item_slugs, position=(len(ci['related_items']) + 1))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_collection_layout(self, code, query=None, force_update=False):
if not query:
query = {
'include': 'items',
'filter': self.default_filter
}
if force_update:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(collection_layout, query=query)
else:
collection_layout = self.cache.get_collection_layout(
code, query=query)
if collection_layout is None:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(
collection_layout, query=query)
return collection_layout
def get_fancy_collection(self, code, with_collection=False,
limit_items=25, content_item_query=None,
collection_query=None, include_suppressed=False,
force_update=False):
"""
Make a few API calls to fetch all possible data for a collection
and its content items. Returns a collection layout with
extra 'collection' key on the layout, and a 'content_item' key
on each layout item.
"""
collection_layout = self.get_collection_layout(
code, query=collection_query, force_update=force_update)
if with_collection:
# Do we want more detailed data about the collection?
collection = self.get_collection(
code, query=collection_query, force_update=force_update)
collection_layout['collection'] = collection
if limit_items:
# We're only going to fetch limit_items number of things
# so cut out the extra items in the content_layout
collection_layout['items'] = \
collection_layout['items'][:limit_items]
# Process the list of collection layout items to gather ids to fetch,
# and to remove suppressed items, if necessary.
content_item_ids = list()
remove_these = list()
for ci in collection_layout['items']:
if not include_suppressed and float(ci['suppressed']) > 0:
remove_these.append(ci)
else:
content_item_ids.append(ci['contentitem_id'])
# If we're not including suppressed items, remove them from the data
if not include_suppressed:
for ci in remove_these:
collection_layout['items'].remove(ci)
# Retrieve all the content_items, 25 at a time
content_items = self.get_multi_content_items(
content_item_ids, query=content_item_query,
force_update=force_update)
# Loop through the collection items and add the corresponding content
# item data.
for ci in collection_layout['items']:
for ci2 in content_items:
if ci['contentitem_id'] == ci2['id']:
ci['content_item'] = ci2
break
return collection_layout
def get_fancy_content_item(self, slug, query=None,
related_items_query=None,
force_update=False):
if query is None:
query = deepcopy(self.default_content_item_query)
query['include'].append('related_items')
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get_content_item(
slug, query, force_update=force_update)
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']]
related_items = self.get_multi_content_items(
ids, related_items_query, force_update=force_update)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if(item is not None
and item_stub['relatedcontentitem_id'] == item['id']):
item_stub['content_item'] = item
return content_item
def get_section(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'include': 'default_section_path_collections'
}
if force_update:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
else:
section = self.cache.get_section(path, query)
if section is None:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
return section
def get_section_configs(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'webapp_name': self.webapp_name
}
if force_update:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
else:
section = self.cache.get_section_configs(path, query)
if section is None:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
return section
def get_fancy_section(self, path, force_update=False):
section = self.get_section(path, force_update)
config = self.get_section_configs(path, force_update)
collections = list()
for c in section['results']['default_section_path_collections']:
collections.append({
'collection_type_code': c['collection_type_code'],
'name': c['name'],
'collection': self.get_fancy_collection(c['code'])
})
fancy_section = config['results']['section_config']
fancy_section['collections'] = collections
fancy_section['path'] = path
return fancy_section
def get_thumb_for_slug(self, slug, force_update=False):
"""
Get information on how to display images associated with this slug
"""
url = "%s/photos/turbine/%s.json" % (
self.config['IMAGE_SERVICES_URL'], slug)
thumb = None
if force_update:
resp = self.s.get(
url,
headers=self.http_headers(),
verify=False)
if resp.ok:
thumb = resp.json()
self.cache.save_thumb(thumb)
else:
thumb = self.cache.get_thumb(slug)
if not thumb:
resp = self.s.get(
url,
headers=self.http_headers(),
verify=False)
if resp.ok:
thumb = resp.json()
self.cache.save_thumb(thumb)
return thumb
def get_nav(self, collection_code, domain=None):
"""
get a simple dictionary of text and links for a navigation collection
"""
nav = list()
domain = domain.replace('http://', '').replace('https://', '').replace('/', '')
top_level = self.get_collection_layout(collection_code)
for item in top_level['items']:
fancy_item = self.get_fancy_content_item(item['slug'])
if 'url' not in fancy_item:
print fancy_item
raise
sub_nav = list()
for sub_item in fancy_item['related_items']:
if 'url' in sub_item['content_item']:
url = sub_item['content_item']['url']
elif 'web_url' in sub_item['content_item']:
url = sub_item['content_item']['web_url']
else:
print sub_item['content_item']
raise
if not url.startswith('http'):
url = 'http://' + domain + url
sub_nav.append({
'text': sub_item['headline'] or sub_item['content_item']['title'],
'url': url,
'slug': sub_item['slug']
})
if fancy_item['url'].startswith('http'):
url = fancy_item['url']
path = url[url.find('/') + 1:url.rfind('/')]
else:
url = 'http://' + domain + fancy_item['url']
path = url[url.find('/', 7) + 1:url.rfind('/')]
nav.append({
'text': fancy_item['title'],
'url': url,
'slug': fancy_item['slug'],
'nav': sub_nav,
'path': path
})
return nav
# Utilities
def http_headers(self, content_type=None, if_modified_since=None):
h = {'Authorization': 'Bearer %(P2P_API_KEY)s' % self.config}
if content_type is not None:
h['content-type'] = content_type
if type(if_modified_since) == datetime:
h['If-Modified-Since'] = format_date_time(
mktime(if_modified_since.utctimetuple()))
elif if_modified_since is not None:
h['If-Modified-Since'] = if_modified_since
return h
def _check_for_errors(self, resp, req_url):
request_log = {
'REQ_URL': req_url,
'REQ_HEADERS': self.http_headers(),
'RESP_URL': resp.url,
'STATUS': resp.status_code,
'RESP_BODY': resp.content,
'RESP_HEADERS': resp.headers,
}
if self.debug:
for k, v in request_log.items():
log.debug('%s: %s' % (k, v))
if resp.status_code >= 500:
try:
data = resp.json()
if 'errors' in data:
raise P2PException(data['errors'][0], request_log)
except ValueError:
pass
resp.raise_for_status()
elif resp.status_code == 404:
raise P2PNotFound(resp.url, request_log)
elif resp.status_code >= 400:
if u'{"errors":{"slug":["has already been taken"]}}' == resp.content:
raise P2PSlugTaken(resp.url, request_log)
elif u'{"code":["has already been taken"]}' in resp.content:
raise P2PSlugTaken(resp.url, request_log)
try:
resp.json()
except ValueError:
pass
raise P2PException(resp.content, request_log)
return request_log
def get(self, url, query=None, if_modified_since=None):
if query is not None:
url += '?' + utils.dict_to_qs(query)
resp = self.s.get(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(if_modified_since=if_modified_since),
verify=False)
resp_log = self._check_for_errors(resp, url)
try:
ret = utils.parse_response(resp.json())
if 'ETag' in resp.headers:
ret['etag'] = resp.headers['ETag']
return ret
except ValueError:
log.error('JSON VALUE ERROR ON SUCCESSFUL RESPONSE %s' % resp_log)
raise
def delete(self, url):
resp = self.s.delete(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(),
verify=False)
self._check_for_errors(resp, url)
return utils.parse_response(resp.content)
def post_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.post(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=False)
resp_log = self._check_for_errors(resp, url)
if resp.content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('THERE WAS AN EXCEPTION WHILE TRYING TO PARSE YOUR JSON: %s' % resp_log)
raise
def put_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.put(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=False)
resp_log = self._check_for_errors(resp, url)
if resp.content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('THERE WAS AN EXCEPTION WHILE TRYING TO PARSE YOUR JSON: %s' % resp_log)
raise
class P2PException(Exception):
pass
class P2PSlugTaken(P2PException):
pass
class P2PNotFound(P2PException):
pass
|
|
"""
/*-------------------------------------------------------------------*/
/* */
/* Copyright IBM Corp. 2013 All Rights Reserved */
/* */
/*-------------------------------------------------------------------*/
/* */
/* NOTICE TO USERS OF THE SOURCE CODE EXAMPLES */
/* */
/* The source code examples provided by IBM are only intended to */
/* assist in the development of a working software program. */
/* */
/* International Business Machines Corporation provides the source */
/* code examples, both individually and as one or more groups, */
/* "as is" without warranty of any kind, either expressed or */
/* implied, including, but not limited to the warranty of */
/* non-infringement and the implied warranties of merchantability */
/* and fitness for a particular purpose. The entire risk */
/* as to the quality and performance of the source code */
/* examples, both individually and as one or more groups, is with */
/* you. Should any part of the source code examples prove defective, */
/* you (and not IBM or an authorized dealer) assume the entire cost */
/* of all necessary servicing, repair or correction. */
/* */
/* IBM does not warrant that the contents of the source code */
/* examples, whether individually or as one or more groups, will */
/* meet your requirements or that the source code examples are */
/* error-free. */
/* */
/* IBM may make improvements and/or changes in the source code */
/* examples at any time. */
/* */
/* Changes may be made periodically to the information in the */
/* source code examples; these changes may be reported, for the */
/* sample code included herein, in new editions of the examples. */
/* */
/* References in the source code examples to IBM products, programs, */
/* or services do not imply that IBM intends to make these */
/* available in all countries in which IBM operates. Any reference */
/* to the IBM licensed program in the source code examples is not */
/* intended to state or imply that IBM's licensed program must be */
/* used. Any functionally equivalent program may be used. */
/*-------------------------------------------------------------------*/
"""
import bottle
from bottle import *
import os,sys,logging, traceback, json, string, urllib, urllib2
from BeautifulSoup import BeautifulSoup
import httplib2
import cloudant
import pprint
import urllib
from twilio.rest import TwilioRestClient
# Configs from BlueMix
vcap_config = os.environ.get('VCAP_SERVICES')
decoded_config = json.loads(vcap_config)
dbname = "fabulous-price-finder"
account = None
for key, value in decoded_config.iteritems():
if decoded_config[key][0]['name'].startswith('Twilio'):
twilio_creds = decoded_config[key][0]['credentials']
twilio_authToken = twilio_creds['authToken']
twilio_accountSID = twilio_creds['accountSID']
twilioClient = TwilioRestClient(twilio_accountSID, twilio_authToken)
if key.startswith('cloudant'):
cloudant_creds = decoded_config[key][0]['credentials']
cloudant_host = cloudant_creds['host']
cloudant_port = int(cloudant_creds['port'])
cloudant_username = cloudant_creds['username']
cloudant_password = cloudant_creds['password']
cloudant_url = str(cloudant_creds['url'])
account = cloudant.Account(cloudant_username)
login = account.login(cloudant_username, cloudant_password)
assert login.status_code == 200
db = account.database(dbname)
response = db.put()
print response.json
def sendTextWithMessage(message):
message = twilioClient.messages.create(to="+16172836931", from_="+1857399-2773", body=message)
#Provide all the static css and js files under the static dir to browser
@route('/static/:filename#.*#')
def server_static(filename):
""" This is for JS files """
return static_file(filename, root='static')
# Displays the home page
@bottle.get("/")
def testFunc():
return bottle.template('home')
# Get the prices for all of the items stored in the database
@bottle.get('/getCurrentPrices')
def getCurrentPrices():
z = []
view = db.all_docs()
for doc in view.iter(params={'include_docs': True}):
getCurrentPrice(doc['doc'])
pass
return bottle.template('currentPrice')
# Get the current price of a particular item
def getCurrentPrice(item):
try:
http = httplib2.Http()
status, page = http.request(urllib.unquote_plus(item["url"]))
soup = BeautifulSoup(page)
price = soup.find(id=item["idToCheck"]).string
if price is not None:
sendTextWithMessage("The current price of %s is %s" % (item["name"], price))
d = db.document(item["url"])
resp = d.merge({ 'url': item["url"], 'price': price})
return bottle.template('currentPrice', price=price)
else:
return bottle.template('currentPriceError')
except:
return bottle.template('currentPriceError')
# Saves the item info in the database
@bottle.post('/recordItemInfo')
def recordItemInfo():
name = str(request.forms.get('name'))
url = urllib.quote_plus(request.forms.get('url'))
idToCheck = str(request.forms.get('idToCheck'))
# get document
d = db.document(url)
# merge updated information
resp = d.merge({ 'url': url, 'name': name, 'idToCheck': idToCheck})
bottle.redirect('/displayall')
# Displays all the records in the database
@bottle.get('/displayall')
def displayData():
z = []
view = db.all_docs()
for doc in view.iter(params={'include_docs': True}):
z.append(doc['doc'])
pass
cursor = list(z)
totinf = int(len(cursor))
return bottle.template ('dbdump',totinf=totinf,cursor=cursor)
# Removes all the records from the database
@bottle.post('/clearall')
def clearAll():
# destroy DB
del account[dbname]
# recreate DB
# bug: the db is not getting recreated
db = account.database(dbname)
return bottle.template ('dbdump',totinf=0,cursor=[])
# Removes only the selected stuff from the database
@bottle.post('/delselected')
def removeSelected():
s = urllib.quote_plus(request.forms.get('url'))
# document we want to delete
del_doc = db.document(s)
# iterate over all documents to find revision # for one we want to delete
view = db.all_docs()
for doc in view.iter(params={'include_docs': True}):
if (doc['doc']['url'] == s):
rev = doc['doc']['_rev']
del_doc.delete(rev).raise_for_status()
bottle.redirect('/displayall')
debug(True)
# Error Methods
@bottle.error(404)
def error404(error):
return 'Nothing here--sorry!'
application = bottle.default_app()
if __name__ == '__main__':
port = int(os.getenv('PORT', '8000'))
bottle.run(host='0.0.0.0', port=port)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from os import getcwd
from os.path import abspath
from pyscaffold.log import (
DEFAULT_LOGGER,
ColoredReportFormatter,
ReportFormatter,
ReportLogger,
logger,
configure_logger
)
from .log_helpers import ansi_regex, last_log, make_record, match_last_report
def test_default_handler_registered():
# When the module is imported,
# Then a default handler should be registered.
raw_logger = logging.getLogger(DEFAULT_LOGGER)
assert raw_logger.handlers
assert raw_logger.handlers[0] == logger.handler
def test_pass_handler(reset_logger):
# When the report logger is created with a handler
new_logger = ReportLogger(handler=logging.NullHandler())
assert isinstance(new_logger.handler, logging.NullHandler)
def test_default_formatter_registered():
# When the module is imported,
# Then a default formatter should be registered.
raw_logger = logging.getLogger(DEFAULT_LOGGER)
handler = raw_logger.handlers[0]
assert isinstance(handler.formatter, ReportFormatter)
def test_pass_formatter(reset_logger):
# When the report logger is created with a handler
formatter = logging.Formatter('%(levelname)s')
new_logger = ReportLogger(formatter=formatter)
assert new_logger.formatter == formatter
def test_report(tmpfolder, caplog, reset_logger):
# Given the logger level is set to INFO,
logging.getLogger(DEFAULT_LOGGER).setLevel(logging.INFO)
# When the report method is called,
logger.report('make', str(tmpfolder) + '/some/report')
# Then the message should be formatted accordingly.
match = match_last_report(caplog)
assert match['activity'] == 'make'
assert match['content'] == 'some/report'
# And relative paths should be used
out = caplog.text
assert '/tmp' not in out
assert 'some/report' in out
def test_indent(caplog, reset_logger):
# Given the logger level is set to INFO,
logging.getLogger(DEFAULT_LOGGER).setLevel(logging.INFO)
# And the nesting level is not changed
assert logger.nesting == 0
# When the report method is called within an indentation context,
with logger.indent():
logger.report('make', '/some/report')
# Then the spacing should be increased.
match = match_last_report(caplog)
assert match['spacing'] == ReportFormatter.SPACING * 2
# When report is called within a multi level indentation context,
count = 5
with logger.indent(count):
logger.report('make', '/some/report')
# Then the spacing should be increased accordingly.
match = match_last_report(caplog)
assert match['spacing'] == ReportFormatter.SPACING * (count + 1)
# When any other method is called with indentation,
count = 3
with logger.indent(count):
logger.info('something')
# Then the spacing should be added in the beginning
assert (ReportFormatter.SPACING * count + 'something') in last_log(caplog)
def test_copy(caplog, reset_logger):
# Given the logger level is set to INFO,
logging.getLogger(DEFAULT_LOGGER).setLevel(logging.INFO)
# And the nesting level is not changed
assert logger.nesting == 0
# And a copy of the logger is made withing a context,
count = 3
with logger.indent(count):
logger2 = logger.copy()
# When the original logger indentation level is changed,
with logger.indent(7):
logger.report('make', '/some/report')
# And the report method is called in the clone logger
logger2.report('call', '/other/logger')
# Then the spacing should not be increased.
match = match_last_report(caplog)
assert match['spacing'] == ReportFormatter.SPACING * (count + 1)
def test_other_methods(caplog, reset_logger):
# Given the logger level is properly set,
logging.getLogger(DEFAULT_LOGGER).setLevel(logging.DEBUG)
# When conventional methods are called on logger,
logger.debug('some-info!')
# Then they should bypass `report`-specific formatting
match = match_last_report(caplog)
assert not match
assert caplog.records[-1].levelno == logging.DEBUG
assert caplog.records[-1].message == 'some-info!'
def test_create_padding():
formatter = ReportFormatter()
for text in ['abcd', 'abcdefg', 'ab']:
padding = formatter.create_padding(text)
# Formatter should ensure activates are right padded
assert len(padding + text) == formatter.ACTIVITY_MAXLEN
def parent_dir():
return abspath('..')
def test_format_path():
formatter = ReportFormatter()
format = formatter.format_path
# Formatter should abbrev paths but keep other subjects unchanged
assert format('not a command') == 'not a command'
assert format('git commit') == 'git commit'
assert format('a random message') == 'a random message'
assert format(getcwd()) == '.'
assert format('../dir/../dir/..') == '..'
assert format('../dir/../dir/../foo') == '../foo'
assert format('/a') == '/a' # shorter absolute is better than relative
def test_format_target():
formatter = ReportFormatter()
format = formatter.format_target
assert format(None) == ''
assert format(getcwd()) == ''
assert format(parent_dir()) == "to '..'"
def test_format_context():
formatter = ReportFormatter()
format = formatter.format_context
assert format(None) == ''
assert format(getcwd()) == ''
assert format(parent_dir()) == "from '..'"
def test_format():
formatter = ReportFormatter()
def format(*args, **kwargs):
return formatter.format(make_record(*args, **kwargs)).lstrip()
assert format('run', 'ls -lf .') == 'run ls -lf .'
assert format('run', 'ls', context=parent_dir()) == "run ls from '..'"
assert (format('copy', getcwd(), target='../dir/../dir') ==
"copy . to '../dir'")
assert format('create', 'my/file', nesting=1) == 'create my/file'
def test_colored_format_target():
formatter = ColoredReportFormatter()
format = formatter.format_target
out = format(parent_dir())
assert ColoredReportFormatter.TARGET_PREFIX in out
assert ansi_regex('to').search(out)
def test_colored_format_context():
formatter = ColoredReportFormatter()
format = formatter.format_context
out = format(parent_dir())
assert ColoredReportFormatter.CONTEXT_PREFIX in out
assert ansi_regex('from').search(out)
def test_colored_activity():
formatter = ColoredReportFormatter()
format = formatter.format_activity
out = format('run')
assert ansi_regex('run').search(out)
def test_colored_format():
formatter = ColoredReportFormatter()
def format(*args, **kwargs):
return formatter.format(make_record(*args, **kwargs)).lstrip()
out = format('invoke', 'action')
assert ansi_regex('invoke').search(out)
assert ansi_regex('action').search(out)
def test_colored_report(tmpfolder, caplog, reset_logger):
# Given the logger is properly set,
logging.getLogger(DEFAULT_LOGGER).setLevel(logging.INFO)
logger.handler.setFormatter(ColoredReportFormatter())
# When the report method is called,
logger.report('make', str(tmpfolder) + '/some/report')
# Then the message should contain activity surrounded by ansi codes,
out = caplog.text
assert ansi_regex('make').search(out)
# And relative paths should be used
assert '/tmp' not in out
assert 'some/report' in out
def test_colored_others_methods(caplog, reset_logger):
# Given the logger is properly set,
logging.getLogger(DEFAULT_LOGGER).setLevel(logging.DEBUG)
logger.handler.setFormatter(ColoredReportFormatter())
# When conventional methods are called on logger,
logger.debug('some-info!')
# Then the message should be surrounded by ansi codes
out = caplog.text
assert ansi_regex('some-info!').search(out)
def test_configure_logger(monkeypatch, caplog, reset_logger):
# Given an environment that supports color,
monkeypatch.setattr('pyscaffold.termui.supports_color', lambda *_: True)
# when configure_logger in called,
opts = dict(log_level=logging.INFO)
configure_logger(opts)
# then the formatter should be changed to use colors,
logger.report('some', 'activity')
out = caplog.text
assert ansi_regex('some').search(out)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats
import matplotlib.lines
import matplotlib.pyplot as plt
import seaborn as sns
from antlia import filter as ff
from antlia import util
metrics_dtype = np.dtype([
('sinusoid amplitude', '<f8'),
('sinusoid period', '<f8'),
('starting velocity', '<f8'),
('starting steer range', '<i8', 2),
('filter cutoff', '<f8'),
('rider id', '<i8'),
('trial id', '<i8')
])
yfields = [('sinusoid amplitude', 'rad'),
('sinusoid period', 's'),
('starting velocity', 'm/s')]
ERROR_RATIO = 0.7
def plot_fft(rec, k_largest=None, max_freq=None):
util.check_valid_record(rec)
colors = sns.color_palette('Paired', 6)
base_color = colors[1]
k_color = colors[5]
dt = np.diff(rec['time']).mean()
# uses hamming window
freq, xf = ff.fft(rec['steer angle'], dt)
if max_freq is None:
max_index = len(rec)
else:
max_index = next(x for x in range(len(freq)) if freq[x] >= max_freq)
if k_largest is None:
k_largest_freq = None
else:
k_indices = sorted(np.argpartition(xf, -k_largest)[-k_largest:])
msg = '{}th largest element at freq {} Hz'.format(
k_largest, freq[k_indices[-1]])
assert k_indices[-1] <= max_index, msg
k_largest_freq = freq[k_indices]
indices = slice(0, max_index)
fig, ax = plt.subplots()
markerline, stemline, baseline = ax.stem(freq[indices],
xf[indices],
markerfmt=' ')
plt.setp(markerline, 'color', base_color)
plt.setp(stemline, 'color', base_color)
if k_largest is not None:
markerline, stemline, baseline = ax.stem(freq[k_indices],
xf[k_indices],
markerfmt=' ')
plt.setp(markerline, 'color', k_color)
plt.setp(stemline, 'color', k_color)
proxy = matplotlib.lines.Line2D([], [], color=k_color)
ax.legend([proxy],
['{} largest frequency components'.format(k_largest)])
ax.set_yscale('log')
ax.set_xlabel('frequency [Hz]')
ax.set_ylabel('amplitude')
return fig, ax, k_largest_freq
def filtered_steer(rec):
k_largest = 10
dt = np.diff(rec['time']).mean()
# uses hamming window
freq, xf = ff.fft(rec['steer angle'], dt)
max_index = len(rec)
k_indices = sorted(np.argpartition(xf, -k_largest)[-k_largest:])
msg = '{}th largest element at freq {} Hz'.format(
k_largest, freq[k_indices[-1]])
assert k_indices[-1] <= max_index, msg
k_largest_freq = freq[k_indices]
# sampling frequencies are inconsistent
#lowcut = k_largest_freq[2]
# use largest 'k largest frequency' smaller than 0.7 Hz
lowcut = k_largest_freq[next(x for x in reversed(range(k_largest))
if k_largest_freq[x] < 0.7)]
from scipy.signal import filtfilt, iirfilter
t = rec['time']
steer = rec['steer angle']
fs = np.round(1/np.diff(t).mean())
nyq = 0.5*fs
low = lowcut/nyq
order = 4
ripple_pass = 1
ripple_stop = 10
b, a = iirfilter(order, low, rp=ripple_pass, rs=ripple_stop,
btype='lowpass', analog=False, ftype='butter')
return filtfilt(b, a, steer), order, lowcut, ripple_pass, ripple_stop
def plot_filtered(rec):
colors = sns.color_palette('Paired', 12)
fig, ax = plt.subplots()
t = rec['time']
steer = rec['steer angle']
filt_steer, order, lowcut, ripple_pass, ripple_stop = filtered_steer(rec)
filt_steer -= steer.mean()
mod_steer = steer - steer.mean()
ax.plot(t, mod_steer, color=colors[1], alpha=0.4,
label='steer, mean subtracted {:0.2f}'.format(steer.mean()))
ax.plot(t, filt_steer, color=colors[1],
label=('steer, lowpass butter '
'order {:d}, fc {:0.2f} Hz, rs {:0.2f}, rp {:0.2f}'
', mean subtracted'
).format(order, lowcut, ripple_pass, ripple_stop))
error = filt_steer - mod_steer
ax.plot(t, error, color=colors[3], alpha=0.2,
label='error between measured and filtered steer angle')
ax.set_xlabel('time [s]')
ax.set_ylabel('steer angle [rad]')
# FIXME remove repeated code
event_groups = get_steer_event_indices(filt_steer)
first_turn = True
for event_range in reversed(event_groups):
for r0, r1 in event_range:
# don't plot past the end of the data
if r1 >= len(t):
r1 = len(t) - 1
sum_error = sum(error[r0:r1])
sum_filt = sum(filt_steer[r0:r1])
# if error ratio is too high, discard steering event
if sum_error/sum_filt < ERROR_RATIO:
# the first turn should be adjacent to other ranges
if first_turn and len(event_range) > 1:
alpha = 0.4
first_turn = False
amplitude = np.abs(filt_steer[r0:r1]).max()
period = 2*(t[r1] - t[r0])
ax.plot(t[r0:r1],
(np.sign(filt_steer[int(r0 + r1)//2])*
amplitude*np.sin(2*np.pi/period*
(t[r0:r1] - t[r0]))),
color=colors[9],
label=(
'sinusoid fit, '
'amplitude {:0.2f}, period {:0.2f}'.format(
amplitude, period)))
else:
alpha = 0.2
ax.axvspan(t[r0], t[r1], color=colors[5], alpha=alpha)
else:
ax.axvspan(t[r0], t[r1], color=colors[7], alpha=0.1)
ax.legend()
return fig, ax
def get_metrics(rec, window_size=55):
t = rec['time']
steer = rec['steer angle']
filt_steer, _, lowcut, _, _ = filtered_steer(rec)
filt_steer -= steer.mean()
mod_steer = steer - steer.mean()
error = filt_steer - mod_steer
event_groups = get_steer_event_indices(filt_steer)
first_turn = True
for event_range in reversed(event_groups):
for r0, r1 in event_range:
sum_error = sum(error[r0:r1])
sum_filt = sum(filt_steer[r0:r1])
# if error ratio is too high, discard steering event
if sum_error/sum_filt < ERROR_RATIO:
if first_turn and len(event_range) > 1:
first_turn = False
amplitude = np.abs(filt_steer[r0:r1]).max()
period = 2*(t[r1] - t[r0])
vf = ff.moving_average(rec['speed'],
window_size,
window_size/2)
v0 = vf[r0]
assert v0 > 1.0, 'velocity is too low'
break
if first_turn:
break
assert first_turn, 'turn not detected'
return np.array([(amplitude,
period,
v0,
(r0, r1),
lowcut,
0,
0)], dtype=metrics_dtype)
def get_steer_event_indices(filt_steer):
"""merged_range contains a list of lists of tuples there the list of tuple
elements are contiguous.
example:
[
[(a, b), (b, c), (c, d)],
[(e, f), (f, g), (g, h), (h, i)]
]
"""
# identify steering event
sigma = filt_steer.std()
steer_event_indices = np.argwhere(np.abs(filt_steer) > sigma)
event_range = util.get_contiguous_numbers(steer_event_indices)
zero_crossings = np.insert(
np.array([0, len(filt_steer)]),
1,
np.squeeze(np.argwhere(np.diff(np.sign(filt_steer)))))
# expand ranges to nearest zero crossing
merged_range = []
while event_range:
r0, r1 = event_range[0]
# may have the case where the event range is already contained
# within the merged range
if merged_range:
m0, m1 = merged_range[-1][-1]
if m0 <= r0 and m1 >= r1:
event_range.pop(0)
continue
z0, z1 = zero_crossings[:2]
assert r0 < r1, 'invalid range'
assert z0 < z1, 'zero crossings out of order'
if z0 <= r0 and z1 >= r1:
if merged_range and (merged_range[-1][-1][1] == z0):
merged_range[-1].append((z0, z1))
else:
merged_range.append([(z0, z1)])
event_range.pop(0)
zero_crossings = zero_crossings[1:]
elif z0 >= r0 and z0 <= r1:
assert False, 'should not have zero crossing within range'
elif z1 >= r0 and z1 <= r1:
assert False, 'should not have zero crossing within range'
elif z1 < r0:
zero_crossings = zero_crossings[1:]
elif z0 > r1:
zero_crossings = zero_crossings[2:]
else:
assert False, 'unhandled case'
return merged_range
def plot_histograms(stats):
colors = sns.husl_palette(6, s=.8, l=.5)
fig, axes = plt.subplots(1, 3)
fig.suptitle('histograms of braking events')
axes = axes.ravel()
field = [('sinusoid amplitude [rad]', 'sinusoid amplitude', None),
('sinusoid period [s]', 'sinusoid period', None),
('starting velocity [m/s]', 'starting velocity', None)]
for ax, f, c in zip(axes, field, colors):
label, fieldname, func = f
x = stats[fieldname]
if func is not None:
x = func(x)
sns.distplot(x, ax=ax, color=c, label=label, kde=False)
ax.legend()
return fig, axes
def plot_bivariates(stats):
colors = sns.husl_palette(stats['rider id'].max() + 1, l=.7)
riders = np.unique(stats['rider id'])
proxy_lines = []
for rid in riders:
c = colors[rid - 1]
l = matplotlib.lines.Line2D([], [],
linestyle='', marker='o', markerfacecolor=c,
label='rider {}'.format(rid))
proxy_lines.append(l)
grids = []
for yf in yfields[:-1]:
name, unit = yf
x = stats['starting velocity']
y = stats[name]
g = sns.JointGrid(x=x, y=y)
g.plot_marginals(sns.distplot, kde=False,
color=sns.xkcd_palette(['charcoal'])[0])
g.plot_joint(plt.scatter,
color=list(map(lambda x: colors[x - 1], stats['rider id'])))
g.ax_joint.legend(handles=proxy_lines, ncol=2, title=
'pearson r = {:.2g}, p = {:.2g}'.format(
*scipy.stats.pearsonr(x, y)))
g.set_axis_labels('starting velocity [m/s]', '{} [{}]'.format(name, unit))
g.fig.suptitle('scatterplots of steering events')
g.fig.set_size_inches(12.76, 7.19) # fix size for pdf save
grids.append(g)
return grids
def plot_swarms(stats):
fig, axes = plt.subplots(3, 1, sharex=True)
fig.suptitle('swarm plot of steering metrics per rider')
axes = axes.ravel()
import pandas as pd
df = pd.DataFrame(stats[[
'sinusoid amplitude',
'sinusoid period',
'starting velocity',
#'starting steer range',
'rider id',
'trial id',
]])
for yf, ax in zip(yfields, axes):
y = yf[0]
sns.swarmplot(x='rider id', y=y, ax=ax, data=df, hue='rider id')
ax.set_ylabel('{} [{}]'.format(yf[0], yf[1]))
ax.legend().remove()
ax.set_xlabel('rider id')
return fig, axes
|
|
from test.lib.testing import eq_
from sqlalchemy import *
from test.lib import *
from test.lib.schema import Table, Column
from sqlalchemy.types import TypeDecorator
from test.lib import fixtures
class ReturningTest(fixtures.TestBase, AssertsExecutionResults):
__requires__ = 'returning',
def setup(self):
meta = MetaData(testing.db)
global table, GoofyType
class GoofyType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is None:
return None
return "FOO" + value
def process_result_value(self, value, dialect):
if value is None:
return None
return value + "BAR"
table = Table('tables', meta,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('persons', Integer),
Column('full', Boolean),
Column('goofy', GoofyType(50))
)
table.create(checkfirst=True)
def teardown(self):
table.drop()
@testing.exclude('firebird', '<', (2, 0), '2.0+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_column_targeting(self):
result = table.insert().returning(table.c.id, table.c.full).execute({'persons': 1, 'full': False})
row = result.first()
assert row[table.c.id] == row['id'] == 1
assert row[table.c.full] == row['full'] == False
result = table.insert().values(persons=5, full=True, goofy="somegoofy").\
returning(table.c.persons, table.c.full, table.c.goofy).execute()
row = result.first()
assert row[table.c.persons] == row['persons'] == 5
assert row[table.c.full] == row['full'] == True
eq_(row[table.c.goofy], row['goofy'])
eq_(row['goofy'], "FOOsomegoofyBAR")
@testing.fails_on('firebird', "fb can't handle returning x AS y")
@testing.exclude('firebird', '<', (2, 0), '2.0+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_labeling(self):
result = table.insert().values(persons=6).\
returning(table.c.persons.label('lala')).execute()
row = result.first()
assert row['lala'] == 6
@testing.fails_on('firebird', "fb/kintersbasdb can't handle the bind params")
@testing.fails_on('oracle+zxjdbc', "JDBC driver bug")
@testing.exclude('firebird', '<', (2, 0), '2.0+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_anon_expressions(self):
result = table.insert().values(goofy="someOTHERgoofy").\
returning(func.lower(table.c.goofy, type_=GoofyType)).execute()
row = result.first()
assert row[0] == "foosomeothergoofyBAR"
result = table.insert().values(persons=12).\
returning(table.c.persons + 18).execute()
row = result.first()
assert row[0] == 30
@testing.exclude('firebird', '<', (2, 1), '2.1+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_update_returning(self):
table.insert().execute([{'persons': 5, 'full': False}, {'persons': 3, 'full': False}])
result = table.update(table.c.persons > 4, dict(full=True)).returning(table.c.id).execute()
eq_(result.fetchall(), [(1,)])
result2 = select([table.c.id, table.c.full]).order_by(table.c.id).execute()
eq_(result2.fetchall(), [(1,True),(2,False)])
@testing.exclude('firebird', '<', (2, 0), '2.0+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_insert_returning(self):
result = table.insert().returning(table.c.id).execute({'persons': 1, 'full': False})
eq_(result.fetchall(), [(1,)])
@testing.fails_on('postgresql', '')
@testing.fails_on('oracle+cx_oracle', '')
@testing.crashes('mssql+mxodbc', 'Raises an error')
def test_executemany():
# return value is documented as failing with psycopg2/executemany
result2 = table.insert().returning(table).execute(
[{'persons': 2, 'full': False}, {'persons': 3, 'full': True}])
if testing.against('mssql+zxjdbc'):
# jtds apparently returns only the first row
eq_(result2.fetchall(), [(2, 2, False, None)])
elif testing.against('firebird', 'mssql', 'oracle'):
# Multiple inserts only return the last row
eq_(result2.fetchall(), [(3, 3, True, None)])
else:
# nobody does this as far as we know (pg8000?)
eq_(result2.fetchall(), [(2, 2, False, None), (3, 3, True, None)])
test_executemany()
@testing.exclude('firebird', '<', (2, 1), '2.1+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
@testing.fails_on_everything_except('postgresql', 'firebird')
def test_literal_returning(self):
if testing.against("postgresql"):
literal_true = "true"
else:
literal_true = "1"
result4 = testing.db.execute('insert into tables (id, persons, "full") '
'values (5, 10, %s) returning persons' % literal_true)
eq_([dict(row) for row in result4], [{'persons': 10}])
@testing.exclude('firebird', '<', (2, 1), '2.1+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_delete_returning(self):
table.insert().execute([{'persons': 5, 'full': False}, {'persons': 3, 'full': False}])
result = table.delete(table.c.persons > 4).returning(table.c.id).execute()
eq_(result.fetchall(), [(1,)])
result2 = select([table.c.id, table.c.full]).order_by(table.c.id).execute()
eq_(result2.fetchall(), [(2,False),])
class SequenceReturningTest(fixtures.TestBase):
__requires__ = 'returning',
def setup(self):
meta = MetaData(testing.db)
global table, seq
seq = Sequence('tid_seq')
table = Table('tables', meta,
Column('id', Integer, seq, primary_key=True),
Column('data', String(50))
)
table.create(checkfirst=True)
def teardown(self):
table.drop()
def test_insert(self):
r = table.insert().values(data='hi').returning(table.c.id).execute()
assert r.first() == (1, )
assert seq.execute() == 2
class KeyReturningTest(fixtures.TestBase, AssertsExecutionResults):
"""test returning() works with columns that define 'key'."""
__requires__ = 'returning',
def setup(self):
meta = MetaData(testing.db)
global table
table = Table('tables', meta,
Column('id', Integer, primary_key=True, key='foo_id', test_needs_autoincrement=True),
Column('data', String(20)),
)
table.create(checkfirst=True)
def teardown(self):
table.drop()
@testing.exclude('firebird', '<', (2, 0), '2.0+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_insert(self):
result = table.insert().returning(table.c.foo_id).execute(data='somedata')
row = result.first()
assert row[table.c.foo_id] == row['id'] == 1
result = table.select().execute().first()
assert row[table.c.foo_id] == row['id'] == 1
class ImplicitReturningFlag(fixtures.TestBase):
def test_flag_turned_off(self):
e = engines.testing_engine(options={'implicit_returning':False})
assert e.dialect.implicit_returning is False
c = e.connect()
assert e.dialect.implicit_returning is False
def test_flag_turned_on(self):
e = engines.testing_engine(options={'implicit_returning':True})
assert e.dialect.implicit_returning is True
c = e.connect()
assert e.dialect.implicit_returning is True
def test_flag_turned_default(self):
supports = [False]
def go():
supports[0] = True
testing.requires.returning(go)()
e = engines.testing_engine()
# starts as False. This is because all of Firebird,
# Postgresql, Oracle, SQL Server started supporting RETURNING
# as of a certain version, and the flag is not set until
# version detection occurs. If some DB comes along that has
# RETURNING in all cases, this test can be adjusted.
assert e.dialect.implicit_returning is False
# version detection on connect sets it
c = e.connect()
assert e.dialect.implicit_returning is supports[0]
|
|
#!/usr/bin/env python3
# Copyright 2013-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, struct
SHT_STRTAB = 3
DT_NEEDED = 1
DT_RPATH = 15
DT_RUNPATH = 29
DT_STRTAB = 5
DT_SONAME = 14
DT_MIPS_RLD_MAP_REL = 1879048245
class DataSizes():
def __init__(self, ptrsize, is_le):
if is_le:
p = '<'
else:
p = '>'
self.Half = p+'h'
self.HalfSize = 2
self.Word = p+'I'
self.WordSize = 4
self.Sword = p+'i'
self.SwordSize = 4
if ptrsize == 64:
self.Addr = p+'Q'
self.AddrSize = 8
self.Off = p+'Q'
self.OffSize = 8
self.XWord = p+'Q'
self.XWordSize = 8
self.Sxword = p+'q'
self.SxwordSize = 8
else:
self.Addr = p+'I'
self.AddrSize = 4
self.Off = p+'I'
self.OffSize = 4
class DynamicEntry(DataSizes):
def __init__(self, ifile, ptrsize, is_le):
super().__init__(ptrsize, is_le)
self.ptrsize = ptrsize
if ptrsize == 64:
self.d_tag = struct.unpack(self.Sxword, ifile.read(self.SxwordSize))[0];
self.val = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0];
else:
self.d_tag = struct.unpack(self.Sword, ifile.read(self.SwordSize))[0]
self.val = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
def write(self, ofile):
if self.ptrsize == 64:
ofile.write(struct.pack(self.Sxword, self.d_tag))
ofile.write(struct.pack(self.XWord, self.val))
else:
ofile.write(struct.pack(self.Sword, self.d_tag))
ofile.write(struct.pack(self.Word, self.val))
class SectionHeader(DataSizes):
def __init__(self, ifile, ptrsize, is_le):
super().__init__(ptrsize, is_le)
if ptrsize == 64:
is_64 = True
else:
is_64 = False
#Elf64_Word
self.sh_name = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_type = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_flags = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_flags = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Addr
self.sh_addr = struct.unpack(self.Addr, ifile.read(self.AddrSize))[0];
#Elf64_Off
self.sh_offset = struct.unpack(self.Off, ifile.read(self.OffSize))[0]
#Elf64_Xword
if is_64:
self.sh_size = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_size = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Word
self.sh_link = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Word
self.sh_info = struct.unpack(self.Word, ifile.read(self.WordSize))[0];
#Elf64_Xword
if is_64:
self.sh_addralign = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_addralign = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
#Elf64_Xword
if is_64:
self.sh_entsize = struct.unpack(self.XWord, ifile.read(self.XWordSize))[0]
else:
self.sh_entsize = struct.unpack(self.Word, ifile.read(self.WordSize))[0]
class Elf(DataSizes):
def __init__(self, bfile, verbose=True):
self.bfile = bfile
self.verbose = verbose
self.bf = open(bfile, 'r+b')
try:
(self.ptrsize, self.is_le) = self.detect_elf_type()
super().__init__(self.ptrsize, self.is_le)
self.parse_header()
self.parse_sections()
self.parse_dynamic()
except:
self.bf.close()
raise
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.bf.close()
def detect_elf_type(self):
data = self.bf.read(6)
if data[1:4] != b'ELF':
# This script gets called to non-elf targets too
# so just ignore them.
if self.verbose:
print('File "%s" is not an ELF file.' % self.bfile)
sys.exit(0)
if data[4] == 1:
ptrsize = 32
elif data[4] == 2:
ptrsize = 64
else:
sys.exit('File "%s" has unknown ELF class.' % self.bfile)
if data[5] == 1:
is_le = True
elif data[5] == 2:
is_le = False
else:
sys.exit('File "%s" has unknown ELF endianness.' % self.bfile)
return (ptrsize, is_le)
def parse_header(self):
self.bf.seek(0)
self.e_ident = struct.unpack('16s', self.bf.read(16))[0]
self.e_type = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_machine = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_version = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_entry = struct.unpack(self.Addr, self.bf.read(self.AddrSize))[0]
self.e_phoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_shoff = struct.unpack(self.Off, self.bf.read(self.OffSize))[0]
self.e_flags = struct.unpack(self.Word, self.bf.read(self.WordSize))[0]
self.e_ehsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_phnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shentsize = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shnum = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
self.e_shstrndx = struct.unpack(self.Half, self.bf.read(self.HalfSize))[0]
def parse_sections(self):
self.bf.seek(self.e_shoff)
self.sections = []
for i in range(self.e_shnum):
self.sections.append(SectionHeader(self.bf, self.ptrsize, self.is_le))
def read_str(self):
arr = []
x = self.bf.read(1)
while x != b'\0':
arr.append(x)
x = self.bf.read(1)
if x == b'':
raise RuntimeError('Tried to read past the end of the file')
return b''.join(arr)
def find_section(self, target_name):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
if name == target_name:
return i
def parse_dynamic(self):
sec = self.find_section(b'.dynamic')
self.dynamic = []
self.bf.seek(sec.sh_offset)
while True:
e = DynamicEntry(self.bf, self.ptrsize, self.is_le)
self.dynamic.append(e)
if e.d_tag == 0:
break
def print_section_names(self):
section_names = self.sections[self.e_shstrndx]
for i in self.sections:
self.bf.seek(section_names.sh_offset + i.sh_name)
name = self.read_str()
print(name.decode())
def print_soname(self):
soname = None
strtab = None
for i in self.dynamic:
if i.d_tag == DT_SONAME:
soname = i
if i.d_tag == DT_STRTAB:
strtab = i
self.bf.seek(strtab.val + soname.val)
print(self.read_str())
def get_entry_offset(self, entrynum):
sec = self.find_section(b'.dynstr')
for i in self.dynamic:
if i.d_tag == entrynum:
return sec.sh_offset + i.val
return None
def print_rpath(self):
offset = self.get_entry_offset(DT_RPATH)
if offset is None:
print("This file does not have an rpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_runpath(self):
offset = self.get_entry_offset(DT_RUNPATH)
if offset is None:
print("This file does not have a runpath.")
else:
self.bf.seek(offset)
print(self.read_str())
def print_deps(self):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
print(name)
def fix_deps(self, prefix):
sec = self.find_section(b'.dynstr')
deps = []
for i in self.dynamic:
if i.d_tag == DT_NEEDED:
deps.append(i)
for i in deps:
offset = sec.sh_offset + i.val
self.bf.seek(offset)
name = self.read_str()
if name.startswith(prefix):
basename = name.split(b'/')[-1]
padding = b'\0'*(len(name) - len(basename))
newname = basename + padding
assert(len(newname) == len(name))
self.bf.seek(offset)
self.bf.write(newname)
def fix_rpath(self, new_rpath):
# The path to search for can be either rpath or runpath.
# Fix both of them to be sure.
self.fix_rpathtype_entry(new_rpath, DT_RPATH)
self.fix_rpathtype_entry(new_rpath, DT_RUNPATH)
def fix_rpathtype_entry(self, new_rpath, entrynum):
if isinstance(new_rpath, str):
new_rpath = new_rpath.encode('utf8')
rp_off = self.get_entry_offset(entrynum)
if rp_off is None:
if self.verbose:
print('File does not have rpath. It should be a fully static executable.')
return
self.bf.seek(rp_off)
old_rpath = self.read_str()
if len(old_rpath) < len(new_rpath):
sys.exit("New rpath must not be longer than the old one.")
self.bf.seek(rp_off)
self.bf.write(new_rpath)
self.bf.write(b'\0'*(len(old_rpath) - len(new_rpath) + 1))
if len(new_rpath) == 0:
self.remove_rpath_entry(entrynum)
def remove_rpath_entry(self, entrynum):
sec = self.find_section(b'.dynamic')
for (i, entry) in enumerate(self.dynamic):
if entry.d_tag == entrynum:
rpentry = self.dynamic[i]
rpentry.d_tag = 0
self.dynamic = self.dynamic[:i] + self.dynamic[i+1:] + [rpentry]
break;
# DT_MIPS_RLD_MAP_REL is relative to the offset of the tag. Adjust it consequently.
for entry in self.dynamic[i:]:
if entry.d_tag == DT_MIPS_RLD_MAP_REL:
entry.val += 2 * (self.ptrsize // 8)
break
self.bf.seek(sec.sh_offset)
for entry in self.dynamic:
entry.write(self.bf)
return None
def run(args):
if len(args) < 1 or len(args) > 2:
print('This application resets target rpath.')
print('Don\'t run this unless you know what you are doing.')
print('%s: <binary file> <prefix>' % sys.argv[0])
exit(1)
with Elf(args[0]) as e:
if len(args) == 1:
e.print_rpath()
e.print_runpath()
else:
new_rpath = args[1]
e.fix_rpath(new_rpath)
return 0
if __name__ == '__main__':
run(sys.argv[1:])
|
|
#!/usr/bin/env python
"""Bootstrap setuptools installation
To use setuptools in your package's setup.py, include this
file in the same directory and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
To require a specific version of setuptools, set a download
mirror, or use an alternate download directory, simply supply
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "3.1"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
def _python_cmd(*args):
"""
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
# extracting the archive
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("setuptools>=" + version)
return
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir, download_delay)
except pkg_resources.VersionConflict as VC_err:
if imported:
msg = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""").format(VC_err=VC_err, version=version)
sys.stderr.write(msg)
sys.exit(2)
# otherwise, reload ok
del pkg_resources, sys.modules['pkg_resources']
return _do_download(version, download_base, to_dir, download_delay)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""
Use Python to download the file, even though it cannot authenticate the
connection.
"""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main():
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
archive = download_setuptools(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
)
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
|
import errno
import json
import logging
import os
import select
import subprocess
import threading
from io import BytesIO
from typing import Any, Dict, List, Mapping, Text, Tuple, Union
from pkg_resources import resource_stream
class JavascriptException(Exception):
pass
_logger = logging.getLogger("cwltool")
JSON = Union[Dict[Text, Any], List[Any], Text, int, float, bool, None]
localdata = threading.local()
have_node_slim = False
# minimum acceptable version of nodejs engine
minimum_node_version_str = '0.10.26'
def check_js_threshold_version(working_alias):
# type: (str) -> bool
"""Checks if the nodeJS engine version on the system
with the allowed minimum version.
https://github.com/nodejs/node/blob/master/CHANGELOG.md#nodejs-changelog
"""
# parse nodejs version into int Tuple: 'v4.2.6\n' -> [4, 2, 6]
current_version_str = subprocess.check_output(
[working_alias, "-v"]).decode('ascii')
current_version = [int(v) for v in current_version_str.strip().strip('v').split('.')]
minimum_node_version = [int(v) for v in minimum_node_version_str.split('.')]
if current_version >= minimum_node_version:
return True
else:
return False
def new_js_proc():
# type: () -> subprocess.Popen
res = resource_stream(__name__, 'cwlNodeEngine.js')
nodecode = res.read()
required_node_version, docker = (False,)*2
nodejs = None
trynodes = ("nodejs", "node")
for n in trynodes:
try:
if subprocess.check_output([n, "--eval", "process.stdout.write('t')"]) != "t":
continue
else:
nodejs = subprocess.Popen([n, "--eval", nodecode],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
required_node_version = check_js_threshold_version(n)
break
except subprocess.CalledProcessError:
pass
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if nodejs is None or nodejs is not None and required_node_version is False:
try:
nodeimg = "node:slim"
global have_node_slim
if not have_node_slim:
dockerimgs = subprocess.check_output(["docker", "images", nodeimg])
if len(dockerimgs.split("\n")) <= 1:
nodejsimg = subprocess.check_output(["docker", "pull", nodeimg])
_logger.info("Pulled Docker image %s %s", nodeimg, nodejsimg)
have_node_slim = True
nodejs = subprocess.Popen(["docker", "run",
"--attach=STDIN", "--attach=STDOUT", "--attach=STDERR",
"--sig-proxy=true", "--interactive",
"--rm", nodeimg, "node", "--eval", nodecode],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
docker = True
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
except subprocess.CalledProcessError:
pass
# docker failed and nodejs not on system
if nodejs is None:
raise JavascriptException(
u"cwltool requires Node.js engine to evaluate Javascript "
"expressions, but couldn't find it. Tried %s, docker run "
"node:slim" % u", ".join(trynodes))
# docker failed, but nodejs is installed on system but the version is below the required version
if docker is False and required_node_version is False:
raise JavascriptException(
u'cwltool requires minimum v{} version of Node.js engine.'.format(minimum_node_version_str),
u'Try updating: https://docs.npmjs.com/getting-started/installing-node')
return nodejs
def execjs(js, jslib, timeout=None, debug=False): # type: (Union[Mapping, Text], Any, int, bool) -> JSON
if not hasattr(localdata, "proc") or localdata.proc.poll() is not None:
localdata.proc = new_js_proc()
nodejs = localdata.proc
fn = u"\"use strict\";\n%s\n(function()%s)()" %\
(jslib, js if isinstance(js, basestring) and len(js) > 1 and js[0] == '{' else ("{return (%s);}" % js))
killed = []
def term():
try:
killed.append(True)
nodejs.kill()
except OSError:
pass
if timeout is None:
timeout = 20
tm = threading.Timer(timeout, term)
tm.start()
stdin_buf = BytesIO(json.dumps(fn) + "\n")
stdout_buf = BytesIO()
stderr_buf = BytesIO()
rselect = [nodejs.stdout, nodejs.stderr] # type: List[BytesIO]
wselect = [nodejs.stdin] # type: List[BytesIO]
while (len(wselect) + len(rselect)) > 0:
rready, wready, _ = select.select(rselect, wselect, [])
try:
if nodejs.stdin in wready:
b = stdin_buf.read(select.PIPE_BUF)
if b:
os.write(nodejs.stdin.fileno(), b)
else:
wselect = []
for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
if pipes[0] in rready:
b = os.read(pipes[0].fileno(), select.PIPE_BUF)
if b:
pipes[1].write(b)
else:
rselect.remove(pipes[0])
if stdout_buf.getvalue().endswith("\n"):
rselect = []
except OSError as e:
break
tm.cancel()
stdin_buf.close()
stdoutdata = stdout_buf.getvalue()
stderrdata = stderr_buf.getvalue()
def fn_linenum(): # type: () -> Text
lines = fn.splitlines()
ofs = 0
maxlines = 99
if len(lines) > maxlines:
ofs = len(lines) - maxlines
lines = lines[-maxlines:]
return u"\n".join(u"%02i %s" % (i + ofs + 1, b) for i, b in enumerate(lines))
def stdfmt(data): # type: (unicode) -> unicode
if "\n" in data:
return "\n" + data.strip()
return data
nodejs.poll()
if debug:
info = u"returncode was: %s\nscript was:\n%s\nstdout was: %s\nstderr was: %s\n" %\
(nodejs.returncode, fn_linenum(), stdfmt(stdoutdata), stdfmt(stderrdata))
else:
info = stdfmt(stderrdata)
if nodejs.poll() not in (None, 0):
if killed:
raise JavascriptException(u"Long-running script killed after %s seconds: %s" % (timeout, info))
else:
raise JavascriptException(info)
else:
try:
return json.loads(stdoutdata)
except ValueError as e:
raise JavascriptException(u"%s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" %
(e, fn_linenum(), stdoutdata, stderrdata))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import tempfile
from absl.testing import parameterized
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import tf_inspect
@parameterized.named_parameters(
dict(testcase_name="ReloadOnce", cycles=1),
dict(testcase_name="ReloadTwice", cycles=2),
dict(testcase_name="ReloadThrice", cycles=3))
class LoadTest(test.TestCase, parameterized.TestCase):
def cycle(self, obj, cycles, signatures=None):
to_save = obj
# TODO(vbardiovsky): It would be nice if exported protos reached a fixed
# point w.r.t. saving/restoring, ideally after 2nd saving.
for _ in range(cycles):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(to_save, path, signatures)
loaded = load.load(path)
to_save = loaded
return loaded
def test_structure_import(self, cycles):
root = tracking.AutoTrackable()
root.dep_one = tracking.AutoTrackable()
root.dep_two = tracking.AutoTrackable()
root.dep_two.dep = tracking.AutoTrackable()
root.dep_three = root.dep_two.dep
imported = self.cycle(root, cycles)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
def test_variables(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1., trainable=True)
root.v2 = variables.Variable(2., trainable=False)
imported = self.cycle(root, cycles)
self.assertEqual(imported.v1.numpy(), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEqual(imported.v2.numpy(), 2.0)
self.assertFalse(imported.v2.trainable)
def test_capture_variables(self, cycles):
root = tracking.AutoTrackable()
root.weights = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
imported = self.cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
imported.weights.assign(4.0)
self.assertEqual(8., imported.f(constant_op.constant(2.)).numpy())
def test_control_outputs(self, cycles):
exported = tracking.AutoTrackable()
exported.v = variables.Variable(1.)
exported.f = def_function.function(
lambda: exported.v.assign(2., name="should_be_control_output"))
exported_graph = exported.f.get_concrete_function().graph
self.assertIn(
exported_graph.get_operation_by_name("should_be_control_output"),
exported_graph.control_outputs)
imported = self.cycle(exported, cycles)
# Calling get_concrete_function wraps in a second call operation; we want to
# inspect the original function body for the control output; digging into
# graph.as_graph_def() and its FunctionDefLibrary is another option.
imported_concrete, = imported.f._concrete_functions
imported_graph = imported_concrete.graph
self.assertIn(
imported_graph.get_operation_by_name("should_be_control_output"),
imported_graph.control_outputs)
def _make_asset(self, contents):
filename = tempfile.mktemp(prefix=self.get_temp_dir())
with open(filename, "w") as f:
f.write(contents)
return filename
def test_assets(self, cycles):
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = tracking.AutoTrackable()
root.asset1 = tracking.TrackableAsset(file1)
root.asset2 = tracking.TrackableAsset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = load.load(load_dir)
with open(imported.asset1.asset_path.numpy(), "r") as f:
self.assertEqual("contents 1", f.read())
with open(imported.asset2.asset_path.numpy(), "r") as f:
self.assertEqual("contents 2", f.read())
def test_capture_assets(self, cycles):
root = tracking.AutoTrackable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
imported = self.cycle(root, cycles)
original_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_capture_assets_in_graph(self, cycles):
root = tracking.AutoTrackable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
original_output = root.f().numpy()
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = load.load(path)
imported_tensor = imported.f()
with monitored_session.MonitoredSession() as sess:
imported_output = sess.run(imported_tensor)
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_dedup_assets(self, cycles):
vocab = self._make_asset("contents")
root = tracking.AutoTrackable()
root.asset1 = tracking.TrackableAsset(vocab)
root.asset2 = tracking.TrackableAsset(vocab)
imported = self.cycle(root, cycles)
self.assertEqual(imported.asset1.asset_path.numpy(),
imported.asset2.asset_path.numpy())
def test_implicit_input_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.))
root.f(constant_op.constant(1))
imported = self.cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
imported = self.cycle(root, cycles)
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_explicit_save_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
imported = self.cycle(
root, cycles, {
"f":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self, cycles):
f = def_function.function(
lambda x: x*2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.AutoTrackable()
root.g = g
imported = self.cycle(root, cycles)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self, cycles):
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_function_with_default_none_input(self, cycles):
def func(x, dtype=None):
if dtype:
return array_ops.zeros(shape=x.shape, dtype=dtype)
else:
return array_ops.zeros(shape=x.shape, dtype=dtypes.float32)
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertAllEqual([0.0, 0.0, 0.0],
root.f(constant_op.constant([1, 2, 3])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0],
root.f(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0, 0.0],
root.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([0, 0, 0],
root.f(
constant_op.constant([1.0, 2.0, 3.0]),
dtype=dtypes.int32).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertEqual(4, len(concrete_functions))
imported = self.cycle(root, cycles)
self.assertAllEqual([0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3]),
None).numpy())
self.assertAllEqual([0.0, 0.0, 0.0],
imported.f(constant_op.constant([1.0, 2.0,
3.0])).numpy())
self.assertAllEqual([0.0, 0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([0, 0, 0],
imported.f(
constant_op.constant([1.0, 2.0, 3.0]),
dtype=dtypes.int32).numpy())
def test_function_no_return(self, cycles):
class TrackableWithOneVariable(tracking.AutoTrackable):
def __init__(self, initial_value=0.0):
super(TrackableWithOneVariable, self).__init__()
self.variable = variables.Variable(initial_value)
@def_function.function
def increase(self, by=1.0):
self.variable.assign_add(by)
obj = TrackableWithOneVariable(5.0)
obj.increase(constant_op.constant(10.0))
self.assertEqual(15.0, obj.variable.numpy())
obj.increase()
self.assertEqual(16.0, obj.variable.numpy())
imported = self.cycle(obj, cycles)
imported.increase(constant_op.constant(10.0))
self.assertEqual(26.0, imported.variable.numpy())
imported.increase(constant_op.constant(1.0))
self.assertEqual(27.0, imported.variable.numpy())
def test_structured_inputs(self, cycles):
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
# Note: by only calling f(input1) before serialization, only inputs with
# matching signature will be valid on the loaded model.
self.assertEqual(31, root.f(input1).numpy())
imported = self.cycle(root, cycles)
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported.f(input2)
self.assertEqual(31, imported.f(input1).numpy())
self.assertEqual(32, imported.f(input3).numpy())
def test_structured_output(self, cycles):
# Use fields with non-alphabetical order
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = tracking.AutoTrackable()
root.f = def_function.function(func)
result = root.f(constant_op.constant(2), constant_op.constant(3))
self.assertEqual(5, result[0].a.numpy())
self.assertEqual(6, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(3, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
imported = self.cycle(root, cycles)
result = imported.f(constant_op.constant(2), constant_op.constant(5))
self.assertEqual(7, result[0].a.numpy())
self.assertEqual(10, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(5, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
def test_optimizer(self, cycles):
class _HasOptimizer(module.Module):
def __init__(self):
super(_HasOptimizer, self).__init__()
self.layer = core.Dense(1)
self.optimizer = adam.Adam(0.01)
@def_function.function
def __call__(self, x):
return self.layer(x)
@def_function.function
def train(self, x, y):
with backprop.GradientTape() as tape:
predicted = self(x)
loss = math_ops.reduce_sum(math_ops.abs(y - predicted))
train_vars = self.layer.trainable_variables
grads = tape.gradient(loss, train_vars)
self.optimizer.apply_gradients(zip(grads, train_vars))
root = _HasOptimizer()
train_input = dict(x=constant_op.constant([[1.]]),
y=constant_op.constant([[2.]]))
root.train(**train_input)
imported = self.cycle(root, cycles)
self.assertAllClose(root.optimizer.learning_rate.numpy(),
imported.optimizer.learning_rate.numpy())
self.assertAllClose(root(constant_op.constant([[-0.5]])),
imported(constant_op.constant([[-0.5]])))
root.train(**train_input)
imported.train(**train_input)
self.assertAllClose(root(constant_op.constant([[-0.5]])),
imported(constant_op.constant([[-0.5]])))
def test_positional_arguments(self, cycles):
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_additional_kwargs(self, cycles):
def func(x, training=False, **options):
del options
if training:
return 2 * x
else:
return 7
root = tracking.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
self.assertEqual(7, root.f(x, learning_rate=0.5, epochs=3).numpy())
imported = self.cycle(root, cycles)
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call.*"):
imported.f(x, learning_rate=0.5, epochs=4)
self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy())
def test_member_function(self, cycles):
class TrackableWithMember(tracking.AutoTrackable):
def __init__(self):
super(TrackableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = TrackableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self, cycles):
class M(tracking.AutoTrackable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.)
return x * self.var
m = M()
self.cycle(m, cycles)
self.assertEqual(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self, cycles):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
g = def_function.function(
lambda x: x*weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = g
imported = self.cycle(root, cycles)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_nested_backprop(self, cycles):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
# Note: this function gets called from other function defs via a
# "PartitionedCall" op node.
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)])
def mul(x, y):
return x * y
# Note: this function gets called from other function defs via a
# "StatefulPartitionedCall" op node.
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def f(x):
return mul(weight.read_value(), x)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def g(x):
return f(x) + bias,
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def h(x):
return g(x) + bias,
root = tracking.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = h
imported = self.cycle(root, cycles)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 2.0])
def test_callable(self, cycles):
class M1(tracking.AutoTrackable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def __call__(self, x):
return x
root = tracking.AutoTrackable()
root.m1 = M1()
root.m2 = tracking.AutoTrackable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
imported = self.cycle(root, cycles)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self, cycles):
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
root = tracking.AutoTrackable()
root.__call__ = tracking.AutoTrackable()
root.__call__.__call__ = tracking.AutoTrackable()
root.__call__.__call__.__call__ = func
imported = self.cycle(root, cycles)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
def test_load_in_graph_mode(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = load.load(path)
var_v1 = imported.v1
output = imported.f(constant_op.constant(2.))
with monitored_session.MonitoredSession() as sess:
self.assertEqual(1.0, sess.run(var_v1))
self.assertEqual(4.0, sess.run(output))
def test_load_in_func_graph(self, cycles):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
closure = tracking.AutoTrackable()
@def_function.function
def func(x):
if not hasattr(closure, "model"):
closure.model = load.load(path)
return closure.model.f(x)
inputs = constant_op.constant(2.)
self.assertEqual(4.0, func(inputs).numpy())
def test_soft_matching(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertEqual(1, len(concrete_functions))
imported = self.cycle(root, cycles)
with self.assertRaisesRegexp(ValueError, "Python inputs incompatible"):
# We cannot call the function with a constant of shape ().
imported.f(constant_op.constant(2)).numpy()
# TODO(vbardiovsky): When classes are revived with input_signatures, we
# should also check that the calls below are not generating any more
# concrete functions.
self.assertAllEqual([2, 4, 6, 8],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([2, 4, 6],
imported.f(constant_op.constant([1, 2, 3])).numpy())
def test_get_concrete_function(self, cycles):
@def_function.function
def func(x, training=False):
if training:
return 2 * x
else:
return 3 * x
func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True)
func.get_concrete_function(tensor_spec.TensorSpec([None], dtypes.float32))
root = tracking.AutoTrackable()
root.f = func
imported = self.cycle(root, cycles)
concrete = imported.f.get_concrete_function(
training=True, x=tensor_spec.TensorSpec([None], dtypes.int32))
self.assertAllEqual([2, 4, 6, 8],
concrete(x=constant_op.constant([1, 2, 3, 4])).numpy())
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True)
def test_concrete_function(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertAllEqual([2, 4, 6, 8],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy())
self.assertAllEqual([2, 4, 6],
imported.f(constant_op.constant([1, 2, 3])).numpy())
def test_concrete_function_arg_names(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertAllEqual([2, 4, 6],
imported.f(x=constant_op.constant([1, 2, 3])).numpy())
def test_concrete_function_no_signature(self, cycles):
@def_function.function
def func(x):
return 2 * x
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(constant_op.constant([1]))
self.assertAllEqual([4], root.f(constant_op.constant([2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertAllEqual([6],
imported.f(constant_op.constant([3])).numpy())
def test_concrete_function_backprop(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.float32)])
def func(x):
return x ** 2.
root = tracking.AutoTrackable()
root.f = func.get_concrete_function()
def _compute_gradient(function):
with backprop.GradientTape() as tape:
inp = constant_op.constant(1.)
tape.watch(inp)
output = function(inp)
return tape.gradient(output, inp)
self.assertEqual(2., _compute_gradient(root.f).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertEqual(2., _compute_gradient(imported.f).numpy())
def test_revived_concrete_function_kwargs(self, cycles):
@def_function.function
def func(x, y):
return x * (y + 1.)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.float32))
self.assertEqual(8., root.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = self.cycle(root, cycles, signatures={})
self.assertEqual(8., imported.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
def test_revived_concrete_function_tensorspec_kwargs(self, cycles):
@def_function.function
def func(*args):
x, y = args
return x * (y + 1.)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32, name="x"),
tensor_spec.TensorSpec([], dtypes.float32, name="y"))
self.assertEqual(8., root.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
imported = self.cycle(root, cycles, signatures={})
self.assertEqual(8., imported.f(y=constant_op.constant(3.),
x=constant_op.constant(2.)).numpy())
def test_concrete_function_variable_argument(self, cycles):
# TODO(allenl): Fix variables in input signatures.
self.skipTest("Need to fix encoding of variables in inputs signatures")
capture = variables.Variable(0)
@def_function.function
def func(v):
v.assign_add(1)
capture.assign_sub(1)
vsave = variables.Variable(1)
root = tracking.AutoTrackable()
root.f = func.get_concrete_function(vsave)
root.capture = capture
self.assertEqual(1, vsave.numpy())
root.f(vsave)
self.assertEqual(2, vsave.numpy())
self.assertEqual(-1, capture.numpy())
imported = self.cycle(root, cycles)
vload = variables.Variable(1)
imported.f(vload)
self.assertEqual(2, vload.numpy())
imported.f(v=vload)
self.assertEqual(3, vload.numpy())
self.assertEqual(-3, imported.capture.numpy())
self.assertEqual(-1, capture.numpy())
def test_function_and_component(self, cycles):
@def_function.function
def func(v):
return v + 1
root = tracking.AutoTrackable()
root.func = func
root.concrete_func = func.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int32))
one = constant_op.constant(1)
self.assertEqual(2, root.func(one).numpy())
self.assertEqual(2, root.concrete_func(one).numpy())
imported = self.cycle(root, cycles)
self.assertEqual(2, imported.func(one).numpy())
self.assertEqual(2, imported.concrete_func(one).numpy())
def test_dict(self, cycles):
root = tracking.AutoTrackable()
root.variables = dict(a=variables.Variable(1.))
root.variables["b"] = variables.Variable(2.)
root.variables["c"] = 1
root.funcs = dict(
a=def_function.function(lambda: constant_op.constant(100.)))
root.funcs["conc"] = root.funcs["a"].get_concrete_function()
imported = self.cycle(root, cycles)
self.assertEqual(1., imported.variables["a"].numpy())
self.assertEqual(2., imported.variables["b"].numpy())
self.assertEqual(set(["a", "b"]), set(imported.variables.keys()))
self.assertEqual(100., imported.funcs["a"]().numpy())
self.assertEqual(100., imported.funcs["conc"]().numpy())
def test_list(self, cycles):
root = tracking.AutoTrackable()
root.variables = [variables.Variable(1.)]
root.variables.append(1)
root.variables.append(variables.Variable(3.))
imported = self.cycle(root, cycles)
self.assertEqual(1., imported.variables[0].numpy())
self.assertEqual(3., imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertEqual(3, len(imported.variables))
def test_functions_list(self, cycles):
root = tracking.AutoTrackable()
v1 = variables.Variable(1.)
root.losses = [def_function.function(lambda: math_ops.reduce_sum(v1 ** 2))]
root.variables = [v1]
@def_function.function
def _v2_loss():
if len(root.variables) == 1:
v2 = variables.Variable(2.)
root.variables.append(v2)
return math_ops.reduce_sum(root.variables[1] ** 2)
root.losses.append(_v2_loss)
self.assertAllClose([1., 4.], [loss() for loss in root.losses])
imported = self.cycle(root, cycles)
self.assertAllClose([1., 4.], [loss() for loss in imported.losses])
imported.variables[0].assign(3.)
imported.variables[1].assign(4.)
self.assertAllClose([9., 16.], [loss() for loss in imported.losses])
def test_captured_constant(self, cycles):
const = array_ops.zeros([100])
root = tracking.AutoTrackable()
root.f = def_function.function(lambda: const + 1.)
root.g = def_function.function(lambda: const + 2.)
self.assertAllClose(array_ops.ones([100]), root.f())
self.assertAllClose(2. * array_ops.ones([100]), root.g())
imported = self.cycle(root, cycles)
self.assertAllClose(array_ops.ones([100]), imported.f())
self.assertAllClose(2. * array_ops.ones([100]), imported.g())
# TODO(b/123408994): Use the public get_concrete_function.
f_concrete = imported.f._list_all_concrete_functions_for_serialization()[0]
g_concrete = imported.g._list_all_concrete_functions_for_serialization()[0]
self.assertLen(f_concrete.captured_inputs, 1)
self.assertLen(g_concrete.captured_inputs, 1)
# We should be using the same captured EagerTensor in both functions, not
# duplicating the constant.
self.assertIs(f_concrete.captured_inputs[0],
g_concrete.captured_inputs[0])
def test_functions_accessed_once(self, cycles):
class Exported(tracking.AutoTrackable):
def __init__(self):
self._counter = 0
@property
def make_func(self):
@def_function.function
def f():
return constant_op.constant(self._counter)
f.get_concrete_function() # force a trace
self._counter += 1
return f
exported = Exported()
imported = self.cycle(exported, cycles)
self.assertEqual(0, imported.make_func().numpy())
self.assertEqual(1, exported.make_func().numpy())
def test_overwritten_signatures_error(self, cycles):
exported = tracking.AutoTrackable()
exported.f = def_function.function(lambda: constant_op.constant(1.))
imported = self.cycle(
exported, cycles,
signatures={"key": exported.f.get_concrete_function()})
self.assertEqual(1., imported.signatures["key"]()["output_0"].numpy())
imported.signatures = {"key1": imported.signatures["key"]}
with self.assertRaisesRegexp(ValueError, "signatures"):
save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir()))
def test_signature_loading(self, cycles):
class Exported(tracking.AutoTrackable):
def __init__(self):
self.v = variables.Variable(3.)
@def_function.function
def do(self, x):
return self.v * x
exported = Exported()
imported = self.cycle(
exported,
cycles=1,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)))
for _ in range(cycles - 1):
imported = self.cycle(imported, cycles=1, signatures=imported.signatures)
self.assertEqual(["serving_default"], list(imported.signatures.keys()))
imported_function = imported.signatures["serving_default"]
two = constant_op.constant(2.)
self.assertEqual(6., imported_function(x=two)["output_0"].numpy())
imported.v.assign(4.)
self.assertEqual(8., imported_function(x=two)["output_0"].numpy())
self.assertEqual(8., imported_function(two)["output_0"].numpy())
with self.assertRaises(TypeError):
# The signatures mapping is immutable
imported.signatures["random_key"] = 3
def test_multiple_argument_signatures_no_positional(self, cycles):
class Exported(tracking.AutoTrackable):
@def_function.function
def do(self, x, y):
return x + y
exported = Exported()
imported = self.cycle(
exported, cycles=1, signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)))
for _ in range(cycles - 1):
imported = self.cycle(imported, cycles=1, signatures=imported.signatures)
with self.assertRaises(TypeError):
imported.signatures["serving_default"](
constant_op.constant(1.),
y=constant_op.constant(2.))
self.assertEqual(
{"output_0": 3.},
self.evaluate(imported.signatures["serving_default"](
x=constant_op.constant(1.),
y=constant_op.constant(2.))))
def _make_model_with_tables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1_initializer = lookup_ops.KeyValueTensorInitializer(keys, values)
table1 = lookup_ops.HashTable(table1_initializer, default_val)
table2_file = self._make_asset("test\nfoo\nbrain\n")
table2_initializer = lookup_ops.TextFileIdTableInitializer(table2_file)
table2 = lookup_ops.HashTable(table2_initializer, default_val)
def _make_lookup_function(table):
signature = [tensor_spec.TensorSpec(None, dtypes.string)]
return def_function.function(input_signature=signature)(
lambda x: table.lookup(x)) # pylint: disable=unnecessary-lambda
root = tracking.AutoTrackable()
root.table1 = table1
root.lookup1 = _make_lookup_function(table1)
root.table2 = table2
root.lookup2 = _make_lookup_function(table2)
return root
def test_table(self, cycles):
root = self._make_model_with_tables()
imported = self.cycle(root, cycles, signatures={})
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
self.assertAllEqual([0, -1, -1, 2], imported.lookup1(keys).numpy())
self.assertAllEqual([2, 0, 1, -1], imported.lookup2(keys).numpy())
def test_table_in_graph(self, cycles):
root = self._make_model_with_tables()
if cycles > 1:
root = self.cycle(root, cycles - 1)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = self.cycle(root, 1)
with ops.Graph().as_default():
imported = load.load(path)
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
output1 = imported.lookup1(keys)
output2 = imported.lookup2(keys)
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual([0, -1, -1, 2], sess.run(output1))
self.assertAllEqual([2, 0, 1, -1], sess.run(output2))
def test_perserve_argspec(self, cycles):
def f(a, b, c): # pylint: disable=unused-argument
return None
original_fullargspec = tf_inspect.getfullargspec(f)
root = tracking.AutoTrackable()
root.f = def_function.function(f)
imported = self.cycle(root, cycles)
restored_fullargspec = tf_inspect.getfullargspec(imported.f)
self.assertEqual(original_fullargspec, restored_fullargspec)
def test_canonicalize_inputs(self, cycles):
@def_function.function(autograph=False)
def func(a=1, b=2, c=3, training=True):
if training:
return [a, b, c, training]
else:
return [c, b, a, training]
# TODO(b/123501567): Work-around to trigger generic traces of a function
# with extra non tensor args.
signature = 3*[tensor_spec.TensorSpec(None, dtypes.float32)]
@def_function.function(input_signature=signature)
def trigger(a, b, c):
func(a, b, c, True)
func(a, b, c, False)
trigger.get_concrete_function()
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
with self.assertRaisesRegexp(ValueError,
"Could not find matching function"):
root.f(["hello", 1.0])
def test_prefer_specific_trace(self, cycles):
@def_function.function(autograph=False)
def func(a):
if isinstance(a, int):
return a
else:
return a + 1
self.assertAllEqual(2, func(2).numpy())
self.assertAllEqual(3, func(constant_op.constant(2)).numpy())
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
self.assertAllEqual(2, root.f(2).numpy())
self.assertAllEqual(4, root.f(3).numpy())
self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy())
self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy())
def test_partial(self, cycles):
# TODO(b/124441704): Figure out the story for FunctionSpec vs partial.
self.skipTest("Partial does not work for serialization.")
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.ones([1])))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(), [1.0])
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(), [1.0])
def test_partial_with_non_tensor_defaults(self, cycles):
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, y=5))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional(self, cycles):
# TODO(b/124441704): Figure out the story for FunctionSpec vs partial.
self.skipTest("Partial does not work for serialization.")
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, constant_op.constant(5)))
root = tracking.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = self.cycle(root, cycles)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_passed_fn_as_default(self, cycles):
# TODO(b/124441704): Figure out the story for FunctionSpec vs partial.
self.skipTest("Partial does not work for serialization.")
def f(x, y):
return x(3) + y
def my_func(a):
return 2 * a
func = def_function.function(functools.partial(f, my_func))
root = tracking.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
root = self.cycle(root, cycles)
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
def test_convert_to_input_signature(self, cycles):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)])
def func(x):
return x
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
self.assertEqual([2], root.f([2]).numpy())
def test_named_tuple(self, cycles):
class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])):
pass
@def_function.function
def f(x):
return x.a + x.b
f.get_concrete_function(
NamedTupleType(
a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"),
b=tensor_spec.TensorSpec(None, dtypes.float32, name="b")))
obj = tracking.AutoTrackable()
obj.__call__ = f
imported = self.cycle(obj, cycles)
self.assertAllClose(3.,
imported(NamedTupleType(a=constant_op.constant(1.),
b=constant_op.constant(2.))))
def test_extra_args(self, cycles):
@def_function.function
def f(x):
return math_ops.add(x["a"], 1.)
# Trigger a trace.
f({"a": constant_op.constant(2.0)})
obj = tracking.AutoTrackable()
obj.__call__ = f
imported = self.cycle(obj, cycles)
self.assertEqual(4.0, imported({"a": 3.0}).numpy())
with self.assertRaisesRegexp(ValueError,
"Could not find matching function to call"):
imported({"a": 2.0, "b": 3.0})
def test_shapes_available(self, cycles):
@def_function.function(input_signature=[
tensor_spec.TensorSpec([None, 3], dtypes.int32),
tensor_spec.TensorSpec([None, 2], dtypes.int32)
])
def func(x, y):
return array_ops.concat([x, y], axis=1)
root = tracking.AutoTrackable()
root.f = func
root = self.cycle(root, cycles)
imported_graph = root.f.get_concrete_function().graph
input_x, input_y = imported_graph.inputs
self.assertEqual([None, 3], input_x.shape.as_list())
self.assertEqual([None, 2], input_y.shape.as_list())
output, = imported_graph.outputs
self.assertEqual([None, 5], output.shape.as_list())
signature = root.signatures["serving_default"]
self.assertEqual(
[None, 3], signature.inputs[0].shape.as_list())
self.assertEqual(
[None, 2], signature.inputs[1].shape.as_list())
self.assertEqual(
[None, 5], signature.outputs[0].shape.as_list())
def test_dense_features_layer(self, cycles):
columns = [feature_column_v2.numeric_column("x"),
feature_column_v2.numeric_column("y")]
layer = feature_column_v2.DenseFeatures(columns)
model = sequential.Sequential([layer])
model_input = {"x": constant_op.constant([[1.]]),
"y": constant_op.constant([[2.]])}
self.assertAllClose([[1., 2.]], model.predict(model_input))
loaded = self.cycle(model, cycles)
output, = loaded._default_save_signature(model_input).values()
self.assertAllClose([[1., 2.]], output)
signature_output, = loaded.signatures["serving_default"](
**model_input).values()
self.assertAllClose([[1., 2.]], signature_output)
def test_dense_features_layer_fit(self, cycles):
columns = [feature_column_v2.numeric_column("x")]
model = sequential.Sequential(
[feature_column_v2.DenseFeatures(columns),
core.Dense(1)])
model_input = {"x": constant_op.constant([[1.]])}
model.compile(optimizer="adam", loss="mse")
model.fit(model_input, constant_op.constant([[3.]]))
loaded = self.cycle(model, cycles)
loaded._default_save_signature(model_input)
loaded.signatures["serving_default"](**model_input)
class SingleCycleTests(test.TestCase, parameterized.TestCase):
def test_load_with_tags(self):
root = tracking.AutoTrackable()
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with self.assertRaises(ValueError):
load.load(path, tags=[tag_constants.EVAL])
load.load(path, tags=[tag_constants.SERVING])
load.load(path, tags=tag_constants.SERVING)
load.load(path, tags=set([tag_constants.SERVING]))
def test_docstring_examples(self):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
exported = util.Checkpoint(v=variables.Variable(3.))
exported.f = def_function.function(
lambda x: exported.v * x,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)])
save.save(exported, path)
imported = load.load(path)
self.assertEqual(3., imported.v.numpy())
self.assertEqual(6., imported.f(x=constant_op.constant(2.)).numpy())
save.save(exported, path, exported.f.get_concrete_function())
imported = load.load(path)
f = imported.signatures["serving_default"]
self.assertAllEqual(
[[-3.]],
f(x=constant_op.constant([[-1.]]))["output_0"].numpy())
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
"""
test_django-watchman
------------
Tests for `django-watchman` views module.
"""
from __future__ import unicode_literals
import json
import sys
import unittest
from importlib import reload
from unittest.mock import patch
import django
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core import mail
from django.test import TestCase as DjangoTestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from watchman import checks, views
class AuthenticatedUser(AnonymousUser):
@property
def is_authenticated(self):
class CallableTrue(object):
def __call__(self, *args, **kwargs):
return True
def __bool__(self):
return True
__nonzero__ = __bool__
return CallableTrue()
# Initialize Django
django.setup()
# Silence MIDDLEWARE_CLASSES warning as this is not an actual Django project
settings.SILENCED_SYSTEM_CHECKS = ["1_7.W001"]
def reload_settings():
# Reload settings - and all dependent modules - from scratch
reload(sys.modules["watchman.settings"])
reload(sys.modules["watchman.decorators"])
reload(sys.modules["watchman.views"])
class TestWatchman(unittest.TestCase):
def setUp(self):
# Ensure that every test executes with separate settings
reload_settings()
def test_response_content_type_json(self):
request = RequestFactory().get("/")
response = views.status(request)
self.assertEqual(response["Content-Type"], "application/json")
def test_response_contains_expected_checks(self):
expected_checks = [
"caches",
"databases",
"storage",
]
request = RequestFactory().get("/")
response = views.status(request)
content = json.loads(response.content.decode("utf-8"))
self.assertCountEqual(expected_checks, content.keys())
def test_check_database_handles_exception(self):
response = checks._check_database("foo")
self.assertFalse(response["foo"]["ok"])
self.assertEqual(
response["foo"]["error"], "The connection 'foo' doesn't exist."
)
def test_check_cache_handles_exception(self):
response = checks._check_cache("foo")
self.assertFalse(response["foo"]["ok"])
self.assertIn(
response["foo"]["error"],
"The connection 'foo' doesn't exist.",
)
def test_response_skipped_checks(self):
expected_checks = [
"caches",
"storage",
]
request = RequestFactory().get(
"/",
data={
"skip": "watchman.checks.databases",
},
)
response = views.status(request)
content = json.loads(response.content.decode("utf-8"))
self.assertCountEqual(expected_checks, content.keys())
def test_response_is_404_for_checked_and_skipped_check(self):
# This is a bit of a weird one, basically if you explicitly include and
# skip the same check, you should get back a 404 as they cancel each
# other out
request = RequestFactory().get(
"/",
data={
"check": "watchman.checks.email",
"skip": "watchman.checks.email",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 404)
@patch("watchman.checks._check_databases")
def test_response_only_single_check(self, patched_check_databases):
patched_check_databases.return_value = []
request = RequestFactory().get(
"/",
data={
"check": "watchman.checks.databases",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertCountEqual({"databases": []}, content)
def test_response_404_when_none_specified(self):
request = RequestFactory().get(
"/",
data={
"check": "",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode("utf-8"))
self.assertCountEqual({"message": "No checks found", "error": 404}, content)
@override_settings(WATCHMAN_TOKEN="ABCDE")
@override_settings(WATCHMAN_AUTH_DECORATOR="watchman.decorators.token_required")
def test_login_not_required_with_get_param(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp(), but before self.tearDown()
reload_settings()
request = RequestFactory().get(
"/",
data={
"watchman-token": "ABCDE",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 200)
@override_settings(WATCHMAN_TOKEN="ABCDE")
def test_version_header_not_included_when_token_auth_fails(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp(), but before self.tearDown()
reload_settings()
request = RequestFactory().get("/")
response = views.status(request)
self.assertEqual(response.status_code, 403)
self.assertFalse(response.has_header("X-Watchman-Version"))
@override_settings(WATCHMAN_TOKEN="ABCDE")
@override_settings(WATCHMAN_AUTH_DECORATOR="watchman.decorators.token_required")
def test_login_not_required_with_authorization_header(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp(), but before self.tearDown()
reload_settings()
request = RequestFactory().get(
"/", HTTP_AUTHORIZATION='WATCHMAN-TOKEN Token="ABCDE"'
)
response = views.status(request)
self.assertEqual(response.status_code, 200)
@override_settings(WATCHMAN_TOKEN="123-456-ABCD")
@override_settings(WATCHMAN_AUTH_DECORATOR="watchman.decorators.token_required")
def test_login_not_required_with_authorization_header_dashes_in_token(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp(), but before self.tearDown()
reload_settings()
request = RequestFactory().get(
"/", HTTP_AUTHORIZATION='WATCHMAN-TOKEN Token="123-456-ABCD"'
)
response = views.status(request)
self.assertEqual(response.status_code, 200)
@override_settings(WATCHMAN_TOKEN="ABCDE")
@override_settings(WATCHMAN_AUTH_DECORATOR="watchman.decorators.token_required")
def test_login_fails_with_invalid_get_param(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp(), but before self.tearDown()
reload_settings()
request = RequestFactory().get(
"/",
data={
"watchman-token": "12345",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 403)
@override_settings(WATCHMAN_TOKEN="ABCDE")
@override_settings(WATCHMAN_AUTH_DECORATOR="watchman.decorators.token_required")
def test_login_fails_with_invalid_authorization_header(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp(), but before self.tearDown()
reload_settings()
request = RequestFactory().get(
"/", HTTP_AUTHORIZATION='WATCHMAN-TOKEN Token="12345"'
)
response = views.status(request)
self.assertEqual(response.status_code, 403)
@override_settings(
WATCHMAN_AUTH_DECORATOR="django.contrib.auth.decorators.login_required"
)
def test_response_when_login_required_is_redirect(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
request = RequestFactory().get("/")
request.user = AnonymousUser()
response = views.status(request)
self.assertEqual(response.status_code, 302)
@override_settings(
WATCHMAN_AUTH_DECORATOR="django.contrib.auth.decorators.login_required"
)
def test_response_when_login_required(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
request = RequestFactory().get("/")
request.user = AuthenticatedUser()
response = views.status(request)
self.assertEqual(response.status_code, 200)
def test_response_version_header_missing_by_default(self):
request = RequestFactory().get("/")
response = views.status(request)
self.assertFalse(response.has_header("X-Watchman-Version"))
@override_settings(EXPOSE_WATCHMAN_VERSION=True)
def test_response_version_header(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
request = RequestFactory().get("/")
response = views.status(request)
self.assertTrue(response.has_header("X-Watchman-Version"))
@patch("watchman.checks._check_databases")
@override_settings(WATCHMAN_ERROR_CODE=503)
def test_custom_error_code(self, patched_check_databases):
reload_settings()
# Fake a DB error, ensure we get our error code
patched_check_databases.return_value = [
{
"foo": {
"ok": False,
"error": "Fake DB Error",
"stacktrace": "Fake DB Stack Trace",
},
}
]
request = RequestFactory().get(
"/",
data={
"check": "watchman.checks.databases",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 503)
@patch("watchman.checks._check_databases")
def test_default_error_code(self, patched_check_databases):
reload_settings()
# Fake a DB error, ensure we get our error code
patched_check_databases.return_value = [
{
"foo": {
"ok": False,
"error": "Fake DB Error",
"stacktrace": "Fake DB Stack Trace",
},
}
]
request = RequestFactory().get(
"/",
data={
"check": "watchman.checks.databases",
},
)
response = views.status(request)
self.assertEqual(response.status_code, 500)
class TestWatchmanDashboard(unittest.TestCase):
def setUp(self):
# Ensure that every test executes with separate settings
reload_settings()
def test_dashboard_response_code(self):
request = RequestFactory().get("/")
response = views.dashboard(request)
self.assertEqual(response.status_code, 200)
def test_response_version_header_and_html_missing_by_default(self):
request = RequestFactory().get("/")
response = views.dashboard(request)
self.assertFalse(response.has_header("X-Watchman-Version"))
self.assertNotIn("Watchman version:", response.content.decode())
@override_settings(EXPOSE_WATCHMAN_VERSION=True)
def test_response_has_version_header_and_html(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
request = RequestFactory().get("/")
response = views.dashboard(request)
self.assertTrue(response.has_header("X-Watchman-Version"))
self.assertIn("Watchman version:", response.content.decode())
class TestPing(unittest.TestCase):
def setUp(self):
# Ensure that every test executes with separate settings
reload_settings()
def test_returns_pong(self):
request = RequestFactory().get("/")
response = views.ping(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode(), "pong")
self.assertEqual(response["Content-Type"], "text/plain")
class TestBareStatus(unittest.TestCase):
def setUp(self):
# Ensure that every test executes with separate settings
reload_settings()
def test_bare_status_success(self):
request = RequestFactory().get("/")
response = views.bare_status(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode(), "")
@patch("watchman.checks._check_databases")
@override_settings(WATCHMAN_ERROR_CODE=503)
def test_bare_status_error(self, patched_check_databases):
reload_settings()
# Fake a DB error, ensure we get our error code
patched_check_databases.return_value = [
{
"foo": {
"ok": False,
"error": "Fake DB Error",
"stacktrace": "Fake DB Stack Trace",
},
}
]
request = RequestFactory().get(
"/",
data={
"check": "watchman.checks.databases",
},
)
response = views.bare_status(request)
self.assertEqual(response.status_code, 503)
self.assertEqual(response.content.decode(), "")
@patch("watchman.checks._check_databases")
def test_bare_status_default_error(self, patched_check_databases):
reload_settings()
# Fake a DB error, ensure we get our error code
patched_check_databases.return_value = [
{
"foo": {
"ok": False,
"error": "Fake DB Error",
"stacktrace": "Fake DB Stack Trace",
},
}
]
request = RequestFactory().get(
"/",
data={
"check": "watchman.checks.databases",
},
)
response = views.bare_status(request)
self.assertEqual(response.status_code, 500)
self.assertEqual(response.content.decode(), "")
class TestEmailCheck(DjangoTestCase):
def setUp(self):
# Ensure that every test executes with separate settings
reload_settings()
def def_test_email_with_default_recipient(self):
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_recipients = ["[email protected]"]
self.assertEqual(sent_email.to, expected_recipients)
@override_settings(WATCHMAN_EMAIL_RECIPIENTS=["[email protected]"])
def def_test_email_with_custom_recipient(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_recipients = ["[email protected]"]
self.assertEqual(sent_email.to, expected_recipients)
@override_settings(WATCHMAN_EMAIL_RECIPIENTS=["[email protected]", "[email protected]"])
def def_test_email_with_multiple_recipients(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_recipients = ["[email protected]", "[email protected]"]
self.assertEqual(sent_email.to, expected_recipients)
def test_email_check_with_default_headers(self):
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_headers = {
"X-DJANGO-WATCHMAN": True,
}
self.assertEqual(sent_email.extra_headers, expected_headers)
@override_settings(WATCHMAN_EMAIL_HEADERS={"foo": "bar"})
def test_email_check_with_custom_headers(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_headers = {
"X-DJANGO-WATCHMAN": True,
"foo": "bar",
}
self.assertEqual(sent_email.extra_headers, expected_headers)
def def_test_email_with_default_sender(self):
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_sender = "[email protected]"
self.assertEqual(sent_email.from_email, expected_sender)
@override_settings(WATCHMAN_EMAIL_SENDER="[email protected]")
def def_test_email_with_custom_sender(self):
# Have to manually reload settings here because override_settings
# happens after self.setUp()
reload_settings()
checks._check_email()
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
sent_email = mail.outbox[0]
expected_sender = "[email protected]"
self.assertEqual(sent_email.from_email, expected_sender)
|
|
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage Jenkins plugin module registry.
import logging
import operator
import pkg_resources
import re
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
logger = logging.getLogger(__name__)
class ModuleRegistry(object):
entry_points_cache = {}
def __init__(self, config, plugins_list=None):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.global_config = config
if plugins_list is None:
self.plugins_dict = {}
else:
self.plugins_dict = self._get_plugins_info_dict(plugins_list)
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(key=operator.attrgetter('sequence'))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = mod
@staticmethod
def _get_plugins_info_dict(plugins_list):
def mutate_plugin_info(plugin_info):
"""
We perform mutations on a single member of plugin_info here, then
return a dictionary with the longName and shortName of the plugin
mapped to its plugin info dictionary.
"""
version = plugin_info.get('version', '0')
plugin_info['version'] = re.sub(r'(.*)-(?:SNAPSHOT|BETA)',
r'\g<1>.preview', version)
aliases = []
for key in ['longName', 'shortName']:
value = plugin_info.get(key, None)
if value is not None:
aliases.append(value)
plugin_info_dict = {}
for name in aliases:
plugin_info_dict[name] = plugin_info
return plugin_info_dict
list_of_dicts = [mutate_plugin_info(v) for v in plugins_list]
plugins_info_dict = {}
for d in list_of_dicts:
plugins_info_dict.update(d)
return plugins_info_dict
def get_plugin_info(self, plugin_name):
""" This method is intended to provide information about plugins within
a given module's implementation of Base.gen_xml. The return value is a
dictionary with data obtained directly from a running Jenkins instance.
This allows module authors to differentiate generated XML output based
on information such as specific plugin versions.
:arg string plugin_name: Either the shortName or longName of a plugin
as see in a query that looks like:
``http://<jenkins-hostname>/pluginManager/api/json?pretty&depth=2``
During a 'test' run, it is possible to override JJB's query to a live
Jenkins instance by passing it a path to a file containing a YAML list
of dictionaries that mimics the plugin properties you want your test
output to reflect::
jenkins-jobs test -p /path/to/plugins-info.yaml
Below is example YAML that might be included in
/path/to/plugins-info.yaml.
.. literalinclude:: /../../tests/cmd/fixtures/plugins-info.yaml
"""
return self.plugins_dict.get(plugin_name, {})
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
def dispatch(self, component_type,
parser, xml_parent,
component, template_data={}):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YAML Parser
:arg Element xml_parent: the parent XML element
:arg dict template_data: values that should be interpolated into
the component definition
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
component_list_type = self.modules_by_component_type[component_type] \
.component_list_type
if isinstance(component, dict):
# The component is a singleton dictionary of name: dict(args)
name, component_data = next(iter(component.items()))
if template_data:
# Template data contains values that should be interpolated
# into the component definition
allow_empty_variables = self.global_config \
and self.global_config.has_section('job_builder') \
and self.global_config.has_option(
'job_builder', 'allow_empty_variables') \
and self.global_config.getboolean(
'job_builder', 'allow_empty_variables')
component_data = deep_format(
component_data, template_data, allow_empty_variables)
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
eps = ModuleRegistry.entry_points_cache.get(component_list_type)
if eps is None:
module_eps = list(pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type)))
eps = {}
for module_ep in module_eps:
if module_ep.name in eps:
raise JenkinsJobsException(
"Duplicate entry point found for component type: "
"'{0}', '{0}',"
"name: '{1}'".format(component_type, name))
eps[module_ep.name] = module_ep
ModuleRegistry.entry_points_cache[component_list_type] = eps
logger.debug("Cached entry point group %s = %s",
component_list_type, eps)
if name in eps:
func = eps[name].load()
func(parser, xml_parent, component_data)
else:
# Otherwise, see if it's defined as a macro
component = parser.data.get(component_type, {}).get(name)
if component:
for b in component[component_list_type]:
# Pass component_data in as template data to this function
# so that if the macro is invoked with arguments,
# the arguments are interpolated into the real defn.
self.dispatch(component_type,
parser, xml_parent, b, component_data)
else:
raise JenkinsJobsException("Unknown entry point or macro '{0}'"
" for component type: '{1}'.".
format(name, component_type))
|
|
# -*- coding: utf-8 -*-
import httplib as http
import pkgutil
import mock
from nose import SkipTest
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase
from osf_tests import factories
from framework.auth.oauth_scopes import CoreScopes
from api.base.settings.defaults import API_BASE
from api.search.permissions import IsAuthenticatedOrReadOnlyForSearch
from api.wb.views import MoveFileMetadataView, CopyFileMetadataView
from api.crossref.views import ParseCrossRefConfirmation
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from api.base.permissions import TokenHasScope
from website.settings import DEBUG_MODE
from website import maintenance
import importlib
URLS_MODULES = []
for loader, name, _ in pkgutil.iter_modules(['api']):
if name != 'base' and name != 'test':
try:
URLS_MODULES.append(
importlib.import_module('api.{}.urls'.format(name))
)
except ImportError:
pass
VIEW_CLASSES = []
for mod in URLS_MODULES:
urlpatterns = mod.urlpatterns
for patt in urlpatterns:
if hasattr(patt, 'url_patterns'):
# Namespaced list of patterns
for subpatt in patt.url_patterns:
VIEW_CLASSES.append(subpatt.callback.cls)
else:
VIEW_CLASSES.append(patt.callback.cls)
class TestApiBaseViews(ApiTestCase):
def setUp(self):
super(TestApiBaseViews, self).setUp()
self.EXCLUDED_VIEWS = [MoveFileMetadataView, CopyFileMetadataView, ParseCrossRefConfirmation]
def test_root_returns_200(self):
res = self.app.get('/{}'.format(API_BASE))
assert_equal(res.status_code, 200)
def test_does_not_exist_returns_404(self):
res = self.app.get(
'/{}{}'.format(API_BASE, "notapage"),
expect_errors=True
)
assert_equal(res.status_code, 404)
def test_does_not_exist_formatting(self):
if DEBUG_MODE:
raise SkipTest
else:
url = '/{}{}/'.format(API_BASE, 'notapage')
res = self.app.get(url, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': 'Not found.'})
def test_view_classes_have_minimal_set_of_permissions_classes(self):
base_permissions = [
TokenHasScope,
(IsAuthenticated, IsAuthenticatedOrReadOnly, IsAuthenticatedOrReadOnlyForSearch)
]
for view in VIEW_CLASSES:
if view in self.EXCLUDED_VIEWS:
continue
for cls in base_permissions:
if isinstance(cls, tuple):
has_cls = any([c in view.permission_classes for c in cls])
assert_true(
has_cls,
"{0} lacks the appropriate permission classes".format(view)
)
else:
assert_in(
cls,
view.permission_classes,
"{0} lacks the appropriate permission classes".format(view)
)
for key in ['read', 'write']:
scopes = getattr(view, 'required_{}_scopes'.format(key), None)
assert_true(bool(scopes))
for scope in scopes:
assert_is_not_none(scope)
if key == 'write':
assert_not_in(CoreScopes.ALWAYS_PUBLIC, scopes)
def test_view_classes_support_embeds(self):
for view in VIEW_CLASSES:
if view in self.EXCLUDED_VIEWS:
continue
assert_true(
hasattr(view, '_get_embed_partial'),
"{0} lacks embed support".format(view)
)
def test_view_classes_define_or_override_serializer_class(self):
for view in VIEW_CLASSES:
has_serializer_class = getattr(view, 'serializer_class', None) or \
getattr(view, 'get_serializer_class', None)
assert_true(
has_serializer_class,
"{0} should include serializer class or override get_serializer_class()".format(view)
)
@mock.patch(
'osf.models.OSFUser.is_confirmed',
mock.PropertyMock(return_value=False)
)
def test_unconfirmed_user_gets_error(self):
user = factories.AuthUserFactory()
res = self.app.get(
'/{}nodes/'.format(API_BASE),
auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch(
'osf.models.OSFUser.is_disabled',
mock.PropertyMock(return_value=True)
)
def test_disabled_user_gets_error(self):
user = factories.AuthUserFactory()
res = self.app.get(
'/{}nodes/'.format(API_BASE),
auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestStatusView(ApiTestCase):
def test_status_view(self):
url = '/{}status/'.format(API_BASE)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_in('maintenance', res.json)
assert_equal(res.json['maintenance'], None)
def test_status_view_with_maintenance(self):
maintenance.set_maintenance(message='test')
url = '/{}status/'.format(API_BASE)
res = self.app.get(url)
m = maintenance.get_maintenance()
assert_equal(res.status_code, 200)
assert_equal(res.json['maintenance']['level'], 1)
assert_equal(res.json['maintenance']['start'], m['start'])
assert_equal(res.json['maintenance']['end'], m['end'])
assert_equal(res.json['maintenance']['message'], 'test')
class TestJSONAPIBaseView(ApiTestCase):
def setUp(self):
super(TestJSONAPIBaseView, self).setUp()
self.user = factories.AuthUserFactory()
self.node = factories.ProjectFactory(creator=self.user)
self.url = '/{0}nodes/{1}/'.format(API_BASE, self.node._id)
for i in range(5):
factories.ProjectFactory(parent=self.node, creator=self.user)
for i in range(5):
factories.ProjectFactory(parent=self.node)
@mock.patch(
'api.base.serializers.JSONAPISerializer.to_representation',
autospec=True
)
def test_request_added_to_serializer_context(self, mock_to_representation):
self.app.get(self.url, auth=self.user.auth)
assert_in('request', mock_to_representation.call_args[0][0].context)
def test_reverse_sort_possible(self):
response = self.app.get(
'http://localhost:8000/v2/users/me/nodes/?sort=-title',
auth=self.user.auth
)
assert_equal(response.status_code, 200)
class TestSwaggerDocs(ApiTestCase):
def test_swagger_docs_redirect_to_root(self):
res = self.app.get('/v2/docs/')
assert_equal(res.status_code, 302)
assert_equal(res.location, '/v2/')
|
|
#
# upip - Package manager for MicroPython
#
# Copyright (c) 2015-2018 Paul Sokolovsky
#
# Licensed under the MIT license.
#
import sys
import gc
import uos as os
import uerrno as errno
import ujson as json
import uzlib
import upip_utarfile as tarfile
gc.collect()
debug = False
index_urls = ["https://micropython.org/pi", "https://pypi.org/pypi"]
install_path = None
cleanup_files = []
gzdict_sz = 16 + 15
file_buf = bytearray(512)
class NotFoundError(Exception):
pass
def op_split(path):
if path == "":
return ("", "")
r = path.rsplit("/", 1)
if len(r) == 1:
return ("", path)
head = r[0]
if not head:
head = "/"
return (head, r[1])
def op_basename(path):
return op_split(path)[1]
# Expects *file* name
def _makedirs(name, mode=0o777):
ret = False
s = ""
comps = name.rstrip("/").split("/")[:-1]
if comps[0] == "":
s = "/"
for c in comps:
if s and s[-1] != "/":
s += "/"
s += c
try:
os.mkdir(s)
ret = True
except OSError as e:
if e.errno != errno.EEXIST and e.errno != errno.EISDIR:
raise e
ret = False
return ret
def save_file(fname, subf):
global file_buf
with open(fname, "wb") as outf:
while True:
sz = subf.readinto(file_buf)
if not sz:
break
outf.write(file_buf, sz)
def install_tar(f, prefix):
meta = {}
for info in f:
# print(info)
fname = info.name
try:
fname = fname[fname.index("/") + 1 :]
except ValueError:
fname = ""
save = True
for p in ("setup.", "PKG-INFO", "README"):
# print(fname, p)
if fname.startswith(p) or ".egg-info" in fname:
if fname.endswith("/requires.txt"):
meta["deps"] = f.extractfile(info).read()
save = False
if debug:
print("Skipping", fname)
break
if save:
outfname = prefix + fname
if info.type != tarfile.DIRTYPE:
if debug:
print("Extracting " + outfname)
_makedirs(outfname)
subf = f.extractfile(info)
save_file(outfname, subf)
return meta
def expandhome(s):
if "~/" in s:
h = os.getenv("HOME")
s = s.replace("~/", h + "/")
return s
import ussl
import usocket
warn_ussl = True
def url_open(url):
global warn_ussl
if debug:
print(url)
proto, _, host, urlpath = url.split("/", 3)
try:
port = 443
if ":" in host:
host, port = host.split(":")
port = int(port)
ai = usocket.getaddrinfo(host, port, 0, usocket.SOCK_STREAM)
except OSError as e:
fatal("Unable to resolve %s (no Internet?)" % host, e)
# print("Address infos:", ai)
ai = ai[0]
s = usocket.socket(ai[0], ai[1], ai[2])
try:
# print("Connect address:", addr)
s.connect(ai[-1])
if proto == "https:":
s = ussl.wrap_socket(s, server_hostname=host)
if warn_ussl:
print("Warning: %s SSL certificate is not validated" % host)
warn_ussl = False
# MicroPython rawsocket module supports file interface directly
s.write("GET /%s HTTP/1.0\r\nHost: %s:%s\r\n\r\n" % (urlpath, host, port))
l = s.readline()
protover, status, msg = l.split(None, 2)
if status != b"200":
if status == b"404" or status == b"301":
raise NotFoundError("Package not found")
raise ValueError(status)
while 1:
l = s.readline()
if not l:
raise ValueError("Unexpected EOF in HTTP headers")
if l == b"\r\n":
break
except Exception as e:
s.close()
raise e
return s
def get_pkg_metadata(name):
for url in index_urls:
try:
f = url_open("%s/%s/json" % (url, name))
except NotFoundError:
continue
try:
return json.load(f)
finally:
f.close()
raise NotFoundError("Package not found")
def fatal(msg, exc=None):
print("Error:", msg)
if exc and debug:
raise exc
sys.exit(1)
def install_pkg(pkg_spec, install_path):
data = get_pkg_metadata(pkg_spec)
latest_ver = data["info"]["version"]
packages = data["releases"][latest_ver]
del data
gc.collect()
assert len(packages) == 1
package_url = packages[0]["url"]
print("Installing %s %s from %s" % (pkg_spec, latest_ver, package_url))
package_fname = op_basename(package_url)
f1 = url_open(package_url)
try:
f2 = uzlib.DecompIO(f1, gzdict_sz)
f3 = tarfile.TarFile(fileobj=f2)
meta = install_tar(f3, install_path)
finally:
f1.close()
del f3
del f2
gc.collect()
return meta
def install(to_install, install_path=None):
# Calculate gzip dictionary size to use
global gzdict_sz
sz = gc.mem_free() + gc.mem_alloc()
if sz <= 65536:
gzdict_sz = 16 + 12
if install_path is None:
install_path = get_install_path()
if install_path[-1] != "/":
install_path += "/"
if not isinstance(to_install, list):
to_install = [to_install]
print("Installing to: " + install_path)
# sets would be perfect here, but don't depend on them
installed = []
try:
while to_install:
if debug:
print("Queue:", to_install)
pkg_spec = to_install.pop(0)
if pkg_spec in installed:
continue
meta = install_pkg(pkg_spec, install_path)
installed.append(pkg_spec)
if debug:
print(meta)
deps = meta.get("deps", "").rstrip()
if deps:
deps = deps.decode("utf-8").split("\n")
to_install.extend(deps)
except Exception as e:
print(
"Error installing '{}': {}, packages may be partially installed".format(pkg_spec, e),
file=sys.stderr,
)
def get_install_path():
global install_path
if install_path is None:
# sys.path[0] is current module's path
install_path = sys.path[1]
install_path = expandhome(install_path)
return install_path
def cleanup():
for fname in cleanup_files:
try:
os.unlink(fname)
except OSError:
print("Warning: Cannot delete " + fname)
def help():
print(
"""\
upip - Simple PyPI package manager for MicroPython
Usage: micropython -m upip install [-p <path>] <package>... | -r <requirements.txt>
import upip; upip.install(package_or_list, [<path>])
If <path> is not given, packages will be installed into sys.path[1]
(can be set from MICROPYPATH environment variable, if current system
supports that)."""
)
print("Current value of sys.path[1]:", sys.path[1])
print(
"""\
Note: only MicroPython packages (usually, named micropython-*) are supported
for installation, upip does not support arbitrary code in setup.py.
"""
)
def main():
global debug
global index_urls
global install_path
install_path = None
if len(sys.argv) < 2 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
help()
return
if sys.argv[1] != "install":
fatal("Only 'install' command supported")
to_install = []
i = 2
while i < len(sys.argv) and sys.argv[i][0] == "-":
opt = sys.argv[i]
i += 1
if opt == "-h" or opt == "--help":
help()
return
elif opt == "-p":
install_path = sys.argv[i]
i += 1
elif opt == "-r":
list_file = sys.argv[i]
i += 1
with open(list_file) as f:
while True:
l = f.readline()
if not l:
break
if l[0] == "#":
continue
to_install.append(l.rstrip())
elif opt == "-i":
index_urls = [sys.argv[i]]
i += 1
elif opt == "--debug":
debug = True
else:
fatal("Unknown/unsupported option: " + opt)
to_install.extend(sys.argv[i:])
if not to_install:
help()
return
install(to_install)
if not debug:
cleanup()
if __name__ == "__main__":
main()
|
|
import csv
import logging.config
from datetime import datetime
import numpy as np
import requests
from django.core.exceptions import ValidationError
from django.db import close_old_connections
from django.db.utils import OperationalError
from lxml import html
from scipy.sparse import dok_matrix
from sklearn.metrics import pairwise_distances
from crawler.models import App, AppDescription, Category, \
CategoryDescription, Developer, AppCategory, SimilarApp
logger = logging.getLogger(__name__)
DETAILS_URL = 'http://play.google.com/store/apps/details?id={}&hl={}'
DATE_MASK = {'en': '%B %d, %Y', 'pt_BR': '%d de %B de %Y'}
def save_developer(developer):
(developer, developer_created) = \
Developer.objects.get_or_create(name=developer.name)
return developer
def save_app(app):
(app, app_created) = \
App.objects.get_or_create(package_name=app.package_name,
defaults={
'icon_url': app.icon_url,
'size': app.size,
'publication_date': app.publication_date,
'rating': app.rating,
'version': app.version,
'content_rating': app.content_rating,
'developer': app.developer,
})
return app
def save_app_description(app_description):
app_description = \
AppDescription.objects.get_or_create(app=app_description.app,
locale=app_description.locale,
defaults={'name': app_description.name,
'description': app_description.description})
return app_description
def save_category(category):
(category, category_created) = \
Category.objects.get_or_create(key=category.key)
return category
def save_category_description(category_description):
(category_description, category_description_created) = \
CategoryDescription.objects.get_or_create(category=category_description.category,
locale=category_description.locale,
defaults={'name': category_description.name})
return category_description
def save_app_category(app_category):
(app_category, app_category_created) = \
AppCategory.objects.get_or_create(app=app_category.app,
category=app_category.category)
return app_category
class Crawler:
def __init__(self):
pass
def populate_category_description(
self,
content,
category,
loc,
):
category_desc = CategoryDescription()
category_desc.name = self.extract_category_desc(content)
category_desc.locale = loc
category_desc.category = category
return category_desc
def populate_category(self, content):
category = Category()
category.key = self.extract_category_key(content)
return category
def populate_app_description(
self,
content,
app,
loc,
):
app_description = AppDescription()
app_description.name = self.extract_name(content)
app_description.description = self.extract_description(content)
app_description.locale = loc
app_description.app = app
return app_description
def populate_app(
self,
content,
app_package,
developer,
loc,
):
app = App()
app.package_name = app_package
app.icon_url = self.extract_icon_url(content)
app.size = self.extract_file_size(content)
app.publication_date = self.extract_update_date(content, loc)
app.version = self.extract_version(content)
app.rating = self.extract_rating(content)
app.content_rating = self.extract_content_rating(content)
app.developer = developer
return app
def populate_developer(self, content):
developer = Developer()
developer.name = self.extract_developer(content)
return developer
def populate_app_category(self, app, category):
app_category = AppCategory()
app_category.app = app
app_category.category = category
return app_category
@staticmethod
def extract_name(content):
# return content.xpath('//div[@class="id-app-title"]')[0].text_content()
name = ''
names = content.xpath('//div[@class="id-app-title"]')
if names:
name = names[0].text_content().strip()
return name.encode('utf-8')
@staticmethod
def extract_description(content):
# return content.xpath('//div[@itemprop="description"]')[0].text_content().strip()
description = ''
descriptions = content.xpath('//div[@itemprop="description"]')
if descriptions:
description = descriptions[0].text_content().strip()
return description.encode('utf-8')
@staticmethod
def extract_icon_url(content):
return 'http:{}'.format(content.xpath('//img[@class="cover-image"]/@src'
)[0]).encode('utf-8')
@staticmethod
def extract_update_date(content, loc):
# return datetime.strptime(content.xpath('//div[@itemprop="datePublished"]')[0].text_content(), DATE_MASK[loc])
update_date = ''
update_dates = content.xpath('//div[@itemprop="datePublished"]')
if update_dates:
update_date_str = update_dates[0].text_content().strip()
update_date = datetime.strptime(update_date_str,
DATE_MASK[loc])
return update_date
@staticmethod
def extract_content_rating(content):
# return content.xpath('//div[@itemprop="contentRating"]')[0].text_content()
content_rating = ''
content_ratings = \
content.xpath('//div[@itemprop="contentRating"]')
if content_ratings:
content_rating = content_ratings[0].text_content().strip()
return content_rating
@staticmethod
def extract_file_size(content):
# return content.xpath('//div[@itemprop="fileSize"]')[0].text_content().strip()
size = 0
sizes = content.xpath('//div[@itemprop="fileSize"]')
if sizes:
size = sizes[0].text_content().strip()
return size
@staticmethod
def extract_version(content):
# return content.xpath('//div[@itemprop="softwareVersion"]')[0].text_content().strip()
version = 0
versions = content.xpath('//div[@itemprop="softwareVersion"]')
if versions:
version = versions[0].text_content().strip()
return version
@staticmethod
def extract_rating(content):
# return content.xpath('//div[@class="score"]')[0].text_content()
rating = ''
ratings = content.xpath('//div[@class="score"]')
if ratings:
rating = ratings[0].text_content().strip()
return rating
@staticmethod
def extract_developer(content):
# return content.xpath('//a[@class="document-subtitle primary"]/span[@itemprop="name"]')[0].text_content()
developer = ''
developers = \
content.xpath('//a[@class="document-subtitle primary"]/span[@itemprop="name"]'
)
if developers:
developer = developers[0].text_content().strip()
return developer.encode('utf-8')
@staticmethod
def extract_category_desc(content):
# return content.xpath('//span[@itemprop="genre"]')[0].text_content()
category = ''
categories = content.xpath('//span[@itemprop="genre"]')
if categories:
category = categories[0].text_content().strip()
return category.encode('utf-8')
@staticmethod
def extract_category_key(content):
# return content.xpath('//a[@class="document-subtitle category"]/@href')[0].split('/')[-1]
category_key = ''
category_urls = \
content.xpath('//a[@class="document-subtitle category"]/@href'
)
if category_urls:
category_url = category_urls[0]
category_key = category_url.split('/')[-1].strip()
return category_key.encode('utf-8')
@staticmethod
def extract_similars(content):
similars = \
content.xpath(
'//div[@class="rec-cluster"]//div[@class="card no-rationale square-cover apps small"]/@data-docid'
)
return similars
def get_details(self, app_package, loc):
url = DETAILS_URL.format(app_package, loc)
try:
response = requests.get(url, timeout=1.0)
if response.status_code != 200:
return
except requests.exceptions.Timeout, e:
print 'Timeout Error\n' + str(e)
return
except requests.exceptions.ConnectionError, e:
print 'Connection Err\n' + str(e)
return
content = html.fromstring(response.text)
developer = self.populate_developer(content)
developer = save_developer(developer)
app = self.populate_app(content, app_package, developer, loc)
app = save_app(app)
app_description = self.populate_app_description(content, app,
loc)
app_description = save_app_description(app_description)
category = self.populate_category(content)
category = save_category(category)
category_description = \
self.populate_category_description(content, category, loc)
category_description = \
save_category_description(category_description)
app_category = self.populate_app_category(app, category)
save_app_category(app_category)
similars = self.extract_similars(content)
return {
'developer': developer,
'app': app,
'app_description': app_description,
'category': category,
'category_description': category_description,
'similars': similars,
}
def crawl(self, app_packages, date):
with open('error-apps-id-list-{}.txt'.format(date), 'w') as \
error_file:
with open('similar-apps-{}.csv'.format(date), 'wb') as \
similar_file:
crawled_count = 0
total_count = 0
for app_package in app_packages:
package = app_package.rstrip()
try:
total_count += 1
app_details_map = self.get_details(package, 'en'
)
if not app_details_map:
error_file.write(package + '\n')
print '{} Not Found'.format(package)
continue
if app_details_map.get('similars'):
for similar in \
app_details_map.get('similars'):
self.write_in_csv(package, similar,
similar_file)
crawled_count += 1
except IOError, e:
print 'Error on parsing'
error_file.write(package + '\n')
pass
except ValidationError, ve:
error_file.write(package + '\n')
print 'Error on validation'
pass
return (crawled_count, total_count)
@staticmethod
def write_in_csv(package, similar, similar_file):
csv_writer = csv.writer(similar_file, delimiter=';',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([package, similar])
def get_features_total_count(features):
count = 0
for feature_key in features.keys():
count += len(features[feature_key])
return count
class AppClassifier:
similar_apps = []
apps_list = []
features = dict()
should_persist = False
offset = 0
target_area = None
def __init__(
self,
apps,
features=None,
boundary=0.5,
should_persist=False,
offset=0,
target_area=None,
):
if len(apps) < 2:
raise ValueError('Invalid list of apps. It should have more than 1 element.'
)
self.apps_list = apps
if features:
self.features = features
self.similarity_boundary = boundary
self.should_persist = should_persist
self.offset = offset
self.target_area = target_area
def create_utility_matrix(self):
app_count = len(self.apps_list)
total_col = get_features_total_count(self.features)
utility_matrix = dok_matrix((app_count, total_col),
dtype=np.int)
for (mat_row, app) in enumerate(self.apps_list):
self.evaluate_category(app, mat_row, utility_matrix)
self.evaluate_developer(app, mat_row, utility_matrix)
return utility_matrix
def evaluate_developer(
self,
app,
mat_row,
utility_matrix,
):
dev_name = app.developer_name()
mat_col2 = self.features['developer'][dev_name]
utility_matrix[mat_row, mat_col2] = 1
def evaluate_category(
self,
app,
mat_row,
utility_matrix,
):
cat_key = app.category_key()
if cat_key.startswith('GAME') and 'GAME' \
in self.features['category']:
game_col = self.features['category']['GAME']
utility_matrix[mat_row, game_col] = 1
cat_col = self.features['category'][cat_key]
utility_matrix[mat_row, cat_col] = 1
def is_similar(self, u, v):
cos_dist = self.cosine_distance(u, v)
return cos_dist < self.similarity_boundary
def is_close_enough(self, cos_dist):
return cos_dist < self.similarity_boundary
def find_similar_apps(self):
logger.debug('Starting find_similar_apps at {}'.format(datetime.now()))
if not self.target_area:
similar_apps = \
self.find_similar_apps_with_offset(self.offset)
else:
similar_apps = \
self.find_similar_apps_in_area(self.target_area)
logger.debug('Finished find_similar_apps at {}'.format(datetime.now()))
return similar_apps
def find_similar_apps_with_offset(self, offset):
logger.debug('Starting find_similar_apps_with_offset with {}'.format(offset))
apps_count = len(self.apps_list)
utility_matrix = self.create_utility_matrix()
for row in range(offset, apps_count - 1):
for column in range(row + 1, apps_count):
self.calculate_similarity(self.apps_list[row],
self.apps_list[column],
utility_matrix.getrow(row),
utility_matrix.getrow(column))
logger.debug('Finished row {}'.format(row))
logger.debug('Finished find_similar_apps_with_offset')
return self.similar_apps
def find_similar_apps_in_area(self, area):
logger.debug('Starting in ({}, {}) to ({}, {})'.format(area[0][0],
area[0][1], area[1][0], area[1][1]))
utility_matrix = self.create_utility_matrix()
starting_row = area[0][0]
ending_row = area[1][0]
starting_column = area[0][1]
ending_column = area[1][1]
for row in range(starting_row, ending_row):
actual_starting_column = starting_column
if starting_column <= row:
actual_starting_column = row + 1
for column in range(actual_starting_column, ending_column):
self.calculate_similarity(self.apps_list[row],
self.apps_list[column],
utility_matrix.getrow(row),
utility_matrix.getrow(column))
logger.debug('Finished row {}'.format(row))
return self.similar_apps
def calculate_similarity(
self,
one_app,
another_app,
one_app_features,
another_app_features,
):
cos_dist = self.cosine_distance(another_app_features,
one_app_features)
if self.is_close_enough(cos_dist):
logger.debug('{} and {} - distance: {}'.format(one_app,
another_app, cos_dist))
self.similar_apps.append((one_app, another_app, cos_dist))
if self.should_persist:
similar = SimilarApp()
similar.source_package = one_app.package_name
similar.similar_package = another_app.package_name
similar.distance = cos_dist
try:
close_old_connections()
similar.save()
except OperationalError:
logger.debug('Fail to save;{};{};{}'.format(one_app,
another_app, cos_dist))
@staticmethod
def cosine_distance(other_row, row):
return pairwise_distances(row, other_row, 'cosine')[0][0]
|
|
import subprocess
import binascii
from zope.interface import implements
from lbrynet.lbryfile.StreamDescriptor import save_sd_info
from lbrynet.cryptstream.client.CryptStreamDownloader import CryptStreamDownloader
from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager
from lbrynet.core.StreamDescriptor import StreamMetadata
from lbrynet.interfaces import IStreamDownloaderFactory
from lbrynet.lbryfile.client.EncryptedFileMetadataHandler import EncryptedFileMetadataHandler
import os
from twisted.internet import defer, threads, reactor
from twisted.python.procutils import which
import logging
import traceback
log = logging.getLogger(__name__)
class EncryptedFileDownloader(CryptStreamDownloader):
"""Classes which inherit from this class download LBRY files"""
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed):
CryptStreamDownloader.__init__(self, peer_finder, rate_limiter, blob_manager,
payment_rate_manager, wallet, upload_allowed)
self.stream_hash = stream_hash
self.stream_info_manager = stream_info_manager
self.suggested_file_name = None
self._calculated_total_bytes = None
def set_stream_info(self):
if self.key is None:
d = self.stream_info_manager.get_stream_info(self.stream_hash)
def set_stream_info(stream_info):
key, stream_name, suggested_file_name = stream_info
self.key = binascii.unhexlify(key)
self.stream_name = binascii.unhexlify(stream_name)
self.suggested_file_name = binascii.unhexlify(suggested_file_name)
d.addCallback(set_stream_info)
return d
else:
return defer.succeed(True)
def delete_data(self):
d1 = self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
def get_blob_hashes(blob_infos):
return [b[0] for b in blob_infos if b[0] is not None]
d1.addCallback(get_blob_hashes)
d2 = self.stream_info_manager.get_sd_blob_hashes_for_stream(self.stream_hash)
def combine_blob_hashes(results):
blob_hashes = []
for success, result in results:
if success is True:
blob_hashes.extend(result)
return blob_hashes
def delete_blobs(blob_hashes):
self.blob_manager.delete_blobs(blob_hashes)
return True
dl = defer.DeferredList([d1, d2], fireOnOneErrback=True)
dl.addCallback(combine_blob_hashes)
dl.addCallback(delete_blobs)
return dl
def stop(self, err=None):
d = self._close_output()
d.addCallback(lambda _: CryptStreamDownloader.stop(self, err=err))
return d
def _get_progress_manager(self, download_manager):
return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager)
def _start(self):
d = self._setup_output()
d.addCallback(lambda _: CryptStreamDownloader._start(self))
return d
def _setup_output(self):
pass
def _close_output(self):
pass
def get_total_bytes(self):
d = self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
def calculate_size(blobs):
return sum([b[3] for b in blobs])
d.addCallback(calculate_size)
return d
def get_total_bytes_cached(self):
if self._calculated_total_bytes is None or self._calculated_total_bytes == 0:
if self.download_manager is None:
return 0
else:
self._calculated_total_bytes = self.download_manager.calculate_total_bytes()
return self._calculated_total_bytes
def get_bytes_left_to_output(self):
if self.download_manager is not None:
return self.download_manager.calculate_bytes_left_to_output()
else:
return 0
def get_bytes_left_to_download(self):
if self.download_manager is not None:
return self.download_manager.calculate_bytes_left_to_download()
else:
return 0
def _get_metadata_handler(self, download_manager):
return EncryptedFileMetadataHandler(self.stream_hash, self.stream_info_manager, download_manager)
class EncryptedFileDownloaderFactory(object):
implements(IStreamDownloaderFactory)
def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager,
wallet):
self.peer_finder = peer_finder
self.rate_limiter = rate_limiter
self.blob_manager = blob_manager
self.stream_info_manager = stream_info_manager
self.wallet = wallet
def can_download(self, sd_validator):
return True
def make_downloader(self, metadata, options, payment_rate_manager, **kwargs):
payment_rate_manager.min_blob_data_payment_rate = options[0]
upload_allowed = options[1]
def save_source_if_blob(stream_hash):
if metadata.metadata_source == StreamMetadata.FROM_BLOB:
d = self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, metadata.source_blob_hash)
else:
d = defer.succeed(True)
d.addCallback(lambda _: stream_hash)
return d
def create_downloader(stream_hash):
downloader = self._make_downloader(stream_hash, payment_rate_manager,
metadata.validator.raw_info, upload_allowed)
d = downloader.set_stream_info()
d.addCallback(lambda _: downloader)
return d
d = save_sd_info(self.stream_info_manager, metadata.validator.raw_info)
d.addCallback(save_source_if_blob)
d.addCallback(create_downloader)
return d
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed):
pass
class EncryptedFileSaver(EncryptedFileDownloader):
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager,
payment_rate_manager, wallet, download_directory, upload_allowed, file_name=None):
EncryptedFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed)
self.download_directory = download_directory
self.file_name = file_name
self.file_written_to = None
self.file_handle = None
def __str__(self):
if self.file_written_to is not None:
return str(self.file_written_to)
else:
return str(self.file_name)
def set_stream_info(self):
d = EncryptedFileDownloader.set_stream_info(self)
def set_file_name():
if self.file_name is None:
if self.suggested_file_name:
self.file_name = os.path.basename(self.suggested_file_name)
else:
self.file_name = os.path.basename(self.stream_name)
d.addCallback(lambda _: set_file_name())
return d
def stop(self, err=None):
d = EncryptedFileDownloader.stop(self, err=err)
d.addCallback(lambda _: self._delete_from_info_manager())
return d
def _get_progress_manager(self, download_manager):
return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager,
delete_blob_after_finished=not self.upload_allowed)
def _setup_output(self):
def open_file():
if self.file_handle is None:
file_name = self.file_name
if not file_name:
file_name = "_"
if os.path.exists(os.path.join(self.download_directory, file_name)):
ext_num = 1
def _get_file_name(ext):
if len(file_name.split(".")):
fn = ''.join(file_name.split(".")[:-1])
file_ext = ''.join(file_name.split(".")[-1])
return fn + "-" + str(ext) + "." + file_ext
else:
return file_name + "_" + str(ext)
while os.path.exists(os.path.join(self.download_directory,
_get_file_name(ext_num))):
ext_num += 1
file_name = _get_file_name(ext_num)
try:
self.file_handle = open(os.path.join(self.download_directory, file_name), 'wb')
self.file_written_to = os.path.join(self.download_directory, file_name)
except IOError:
log.error(traceback.format_exc())
raise ValueError("Failed to open %s. Make sure you have permission to save files to that"
" location." % str(os.path.join(self.download_directory,
file_name)))
return threads.deferToThread(open_file)
def _close_output(self):
self.file_handle, file_handle = None, self.file_handle
def close_file():
if file_handle is not None:
name = file_handle.name
file_handle.close()
if self.completed is False:
os.remove(name)
return threads.deferToThread(close_file)
def _get_write_func(self):
def write_func(data):
if self.stopped is False and self.file_handle is not None:
self.file_handle.write(data)
return write_func
def _delete_from_info_manager(self):
return self.stream_info_manager.delete_stream(self.stream_hash)
class EncryptedFileSaverFactory(EncryptedFileDownloaderFactory):
def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager,
wallet, download_directory):
EncryptedFileDownloaderFactory.__init__(self, peer_finder, rate_limiter, blob_manager,
stream_info_manager, wallet)
self.download_directory = download_directory
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed):
return EncryptedFileSaver(stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager,
self.stream_info_manager, payment_rate_manager, self.wallet,
self.download_directory, upload_allowed)
@staticmethod
def get_description():
return "Save"
class EncryptedFileOpener(EncryptedFileDownloader):
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager,
payment_rate_manager, wallet, upload_allowed):
EncryptedFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed)
self.process = None
self.process_log = None
def stop(self, err=None):
d = EncryptedFileDownloader.stop(self, err=err)
d.addCallback(lambda _: self._delete_from_info_manager())
return d
def _get_progress_manager(self, download_manager):
return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager,
delete_blob_after_finished=not self.upload_allowed)
def _setup_output(self):
def start_process():
if os.name == "nt":
paths = [r'C:\Program Files\VideoLAN\VLC\vlc.exe',
r'C:\Program Files (x86)\VideoLAN\VLC\vlc.exe']
for p in paths:
if os.path.exists(p):
vlc_path = p
break
else:
raise ValueError("You must install VLC media player to stream files")
else:
vlc_path = 'vlc'
self.process_log = open("vlc.out", 'a')
try:
self.process = subprocess.Popen([vlc_path, '-'], stdin=subprocess.PIPE,
stdout=self.process_log, stderr=self.process_log)
except OSError:
raise ValueError("VLC media player could not be opened")
d = threads.deferToThread(start_process)
return d
def _close_output(self):
if self.process is not None:
self.process.stdin.close()
self.process = None
return defer.succeed(True)
def _get_write_func(self):
def write_func(data):
if self.stopped is False and self.process is not None:
try:
self.process.stdin.write(data)
except IOError:
reactor.callLater(0, self.stop)
return write_func
def _delete_from_info_manager(self):
return self.stream_info_manager.delete_stream(self.stream_hash)
class EncryptedFileOpenerFactory(EncryptedFileDownloaderFactory):
def can_download(self, sd_validator):
if which('vlc'):
return True
elif os.name == "nt":
paths = [r'C:\Program Files\VideoLAN\VLC\vlc.exe',
r'C:\Program Files (x86)\VideoLAN\VLC\vlc.exe']
for p in paths:
if os.path.exists(p):
return True
return False
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed):
return EncryptedFileOpener(stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager,
self.stream_info_manager, payment_rate_manager, self.wallet, upload_allowed)
@staticmethod
def get_description():
return "Stream"
|
|
import json
import logging
import markdown
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from ception.activities.models import Activity
from ception.articles.content_parser import get_mapping_array
from ception.articles.diff_parser import DiffParser
from ception.articles.merge import summary_edit
from ception.articles.simple_parser import SimpleParser, CleanParser
class Article(models.Model):
DRAFT = 'D'
PUBLISHED = 'P'
STATUS = (
(DRAFT, 'Draft'),
(PUBLISHED, 'Published'),
)
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, null=True, blank=True)
content = models.TextField(max_length=60000)
description = models.TextField(max_length=5000)
status = models.CharField(max_length=1, choices=STATUS, default=DRAFT)
sentence_count = models.IntegerField(default=-1)
create_user = models.ForeignKey(User)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(blank=True, null=True)
update_user = models.ForeignKey(User, null=True, blank=True, related_name="+")
class Meta:
verbose_name = _("Article")
verbose_name_plural = _("Articles")
# ordering = ("-update_date",)
ordering = ("title",)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.pk:
super(Article, self).save(*args, **kwargs)
else:
self.update_date = timezone.now()
if not self.slug:
slug_str = "%s %s" % (self.pk, self.title.lower())
self.slug = slugify(slug_str)
super(Article, self).save(*args, **kwargs)
def get_content_as_markdown(self):
return markdown.markdown(self.content, safe_mode='escape')
@staticmethod
def get_published():
articles = Article.objects.filter(status=Article.PUBLISHED)
return articles
def create_tags(self, tags):
tags = tags.strip()
tag_list = tags.split(' ')
for tag in tag_list:
if tag:
t, created = Tag.objects.get_or_create(tag=tag.lower(), article=self)
def get_tags(self):
return Tag.objects.filter(article=self)
def get_summary(self):
if len(self.content) > 255:
return u'{0}...'.format(self.content[:255])
else:
return self.content
def get_summary_as_markdown(self):
return markdown.markdown(self.get_summary(), safe_mode='escape')
def get_comments(self):
return ArticleComment.objects.filter(article=self)
def get_sentences(self):
parser = SimpleParser()
parser.feed(self.content)
return parser.sentence_array
def get_or_create_version_by_user(self, user):
version_set = ArticleVersion.objects.filter(edit_user=user, origin=self)
if len(version_set) == 0:
version = ArticleVersion()
version.edit_user = user
version.content = self.content
version.origin = self
version.save()
else:
version = version_set[0]
return version
def compute_summary(self, user):
version_set = ArticleVersion.get_versions(self, user).exclude(edit_user=user)
origin_sentences = self.get_sentences()
version_info_array = []
for v in version_set:
try:
version_info_array.append(json.loads(v.info_array_json))
except Exception, e:
print repr(e)
print 'Load json failed:', repr(v.info_array_json)
version_info_array.append([{'single': '', 'edited': ''}] * (self.sentence_count + 1))
summary_list = [{'Error': 'Error'}]
for i in xrange(1, self.sentence_count + 1):
sentence_list = [origin_sentences[i]]
for version_info in version_info_array:
if len(version_info) != self.sentence_count + 1:
logging.error("Invalid version_info: " + repr(version_info))
continue
else:
sentence = version_info[i]
if sentence and sentence["single"] and sentence["edited"]:
sentence_list.append(CleanParser.get_clean_text(sentence["sentence"]))
if len(sentence_list) <= 2:
html_str = ""
data = {}
conflicted = False
else:
html_str, data, conflicted = summary_edit(sentence_list)
summary_list.append({'html_str': html_str, 'data': data, 'conflicted': conflicted})
return summary_list
class ArticleVersion(models.Model):
origin = models.ForeignKey(Article)
content = models.TextField(max_length=60000)
diff_content = models.TextField(max_length=60000)
info_array_json = models.TextField(max_length=100000, null=True)
edit_date = models.DateTimeField(auto_now_add=True)
edit_user = models.ForeignKey(User)
slug = models.SlugField(max_length=255, null=True, blank=True)
class Meta:
verbose_name = _("Version")
verbose_name_plural = _("Versions")
ordering = ("-edit_date",)
def __unicode__(self):
return self.origin.title + " Edited by " + self.edit_user.username
def save(self, *args, **kwargs):
if not self.pk:
super(ArticleVersion, self).save(*args, **kwargs)
else:
self.edit_date = timezone.now()
if not self.slug:
slug_str = "%s %s Edited" % (self.pk, self.origin.title.lower())
self.slug = slugify(slug_str)
self.prepocess()
print "Saved:", self.__unicode__(),self.edit_date
super(ArticleVersion, self).save(*args, **kwargs)
def get_votes(self, sentence_id):
up_votes = Activity.objects.filter(activity_type=Activity.UP_VOTE, version_id=self.pk,
sentence_id=sentence_id).count()
down_votes = Activity.objects.filter(activity_type=Activity.DOWN_VOTE, version_id=self.pk,
sentence_id=sentence_id).count()
return up_votes - down_votes
def prepocess(self):
self.content = self.content.replace('class=\"current\"', '').replace('class="previous"', '')
self.compute_diff()
self.info_array_json = json.dumps(get_mapping_array(self.content))
def compute_diff(self):
sp = SimpleParser()
sp.feed(self.origin.content)
parser = DiffParser(sp.sentence_array)
parser.feed(self.content)
self.diff_content = parser.diff_content
def get_sentence_comments(self):
sentence_comments_array = [{'html': u'Error', 'count': -1}]
for i in range(1, self.origin.sentence_count + 1):
comment_dict = {
'html': u'',
'count': 0
}
for comment in ArticleSentenceComment.objects.filter(parent=self, sentence_id=i):
comment_dict['html'] += render_to_string('articles/partial/partial_sentence_comment.html',
{'comment': comment})
comment_dict['count'] += 1
sentence_comments_array.append(comment_dict)
return sentence_comments_array
def get_sentence_vote(self, user):
sentence_comment_array = [{'count': 0, 'state': "N"}]
for i in range(1, self.origin.sentence_count + 1):
activity = Activity.objects.filter(Q(activity_type=Activity.UP_VOTE) | Q(activity_type=Activity.DOWN_VOTE),
user=user, sentence_id=i, version_id=self.pk)
user_state = "N"
if activity:
user_state = activity.first().activity_type
sentence_comment_array.append({'count': self.get_votes(i), 'state': user_state})
return sentence_comment_array
@staticmethod
def get_versions(article, user):
versions = ArticleVersion.objects.filter(origin=article)
# versions = ArticleVersion.objects.filter(origin=article) if user.is_staff else ArticleVersion.objects.filter(origin=article, edit_user__is_staff=True)
# versions = versions | ArticleVersion.objects.filter(origin=article, edit_user=user)
versions = versions.order_by('edit_user__username')
return versions
# def get_comments(self):
# return ArticleComment.objects.filter(article=self)
class Tag(models.Model):
tag = models.CharField(max_length=50)
article = models.ForeignKey(Article)
class Meta:
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
unique_together = (('tag', 'article'),)
index_together = [['tag', 'article'],]
def __unicode__(self):
return self.tag
@staticmethod
def get_popular_tags():
tags = Tag.objects.all()
count = {}
for tag in tags:
if tag.article.status == Article.PUBLISHED:
if tag.tag in count:
count[tag.tag] = count[tag.tag] + 1
else:
count[tag.tag] = 1
sorted_count = sorted(count.items(), key=lambda t: t[1], reverse=True)
return sorted_count[:20]
class ArticleSentenceComment(models.Model):
parent = models.ForeignKey(ArticleVersion)
sentence_id = models.IntegerField(null=True, blank=True)
date = models.DateTimeField(auto_now_add=True)
comment = models.CharField(max_length=500, blank=False, null=False)
user = models.ForeignKey(User)
class Meta:
verbose_name = _("Article Sentence Comment")
verbose_name_plural = _("Article Sentence Comments")
ordering = ("date",)
def __unicode__(self):
return u'{0} - "{1}" - "{2}"'.format(self.user.username, self.parent, self.sentence_id)
class ArticleComment(models.Model):
article = models.ForeignKey(Article)
comment = models.CharField(max_length=500)
date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
class Meta:
verbose_name = _("Article Comment")
verbose_name_plural = _("Article Comments")
ordering = ("-date",)
def __unicode__(self):
return u'{0} - {1}'.format(self.user.username, self.article.title)
class ArticleSentenceSummary(models.Model):
article = models.ForeignKey(Article)
sid = models.IntegerField(null=True, blank=True)
content = models.TextField(max_length=60000)
class Meta:
verbose_name = _("Article Sentence Summary")
verbose_name_plural = _("Article Sentence Summarys")
def __unicode__(self):
return u'{0} - {1}'.format(self.article.title, self.sid)
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from lxml import etree
import six
from testtools import matchers
from keystone import auth
from keystone.common import authorization
from keystone.common import cache
from keystone.common import serializer
from keystone import config
from keystone import middleware
from keystone.openstack.common import timeutils
from keystone.policy.backends import rules
from keystone import tests
from keystone.tests import rest
CONF = config.CONF
DEFAULT_DOMAIN_ID = 'default'
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase):
def config_files(self):
config_files = super(RestfulTestCase, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def setup_database(self):
tests.setup_database()
def teardown_database(self):
tests.teardown_database()
def generate_paste_config(self):
new_paste_file = None
try:
new_paste_file = tests.generate_paste_config(self.EXTENSION_TO_ADD)
except AttributeError:
# no need to report this error here, as most tests will not have
# EXTENSION_TO_ADD defined.
pass
finally:
return new_paste_file
def remove_generated_paste_config(self):
try:
tests.remove_generated_paste_config(self.EXTENSION_TO_ADD)
except AttributeError:
pass
def setUp(self, app_conf='keystone'):
"""Setup for v3 Restful Test Cases.
"""
new_paste_file = self.generate_paste_config()
self.addCleanup(self.remove_generated_paste_config)
if new_paste_file:
app_conf = 'config:%s' % (new_paste_file)
super(RestfulTestCase, self).setUp(app_conf=app_conf)
self.empty_context = {'environment': {}}
#drop the policy rules
self.addCleanup(rules.reset)
self.addCleanup(self.teardown_database)
def load_backends(self):
self.setup_database()
# ensure the cache region instance is setup
cache.configure_cache_region(cache.REGION)
super(RestfulTestCase, self).load_backends()
def load_fixtures(self, fixtures):
self.load_sample_data()
def load_sample_data(self):
self.domain_id = uuid.uuid4().hex
self.domain = self.new_domain_ref()
self.domain['id'] = self.domain_id
self.assignment_api.create_domain(self.domain_id, self.domain)
self.project_id = uuid.uuid4().hex
self.project = self.new_project_ref(
domain_id=self.domain_id)
self.project['id'] = self.project_id
self.assignment_api.create_project(self.project_id, self.project)
self.user_id = uuid.uuid4().hex
self.user = self.new_user_ref(domain_id=self.domain_id)
self.user['id'] = self.user_id
self.identity_api.create_user(self.user_id, self.user)
self.default_domain_project_id = uuid.uuid4().hex
self.default_domain_project = self.new_project_ref(
domain_id=DEFAULT_DOMAIN_ID)
self.default_domain_project['id'] = self.default_domain_project_id
self.assignment_api.create_project(self.default_domain_project_id,
self.default_domain_project)
self.default_domain_user_id = uuid.uuid4().hex
self.default_domain_user = self.new_user_ref(
domain_id=DEFAULT_DOMAIN_ID)
self.default_domain_user['id'] = self.default_domain_user_id
self.identity_api.create_user(self.default_domain_user_id,
self.default_domain_user)
# create & grant policy.json's default role for admin_required
self.role_id = uuid.uuid4().hex
self.role = self.new_role_ref()
self.role['id'] = self.role_id
self.role['name'] = 'admin'
self.assignment_api.create_role(self.role_id, self.role)
self.assignment_api.add_role_to_user_and_project(
self.user_id, self.project_id, self.role_id)
self.assignment_api.add_role_to_user_and_project(
self.default_domain_user_id, self.default_domain_project_id,
self.role_id)
self.assignment_api.add_role_to_user_and_project(
self.default_domain_user_id, self.project_id,
self.role_id)
self.region_id = uuid.uuid4().hex
self.region = self.new_region_ref()
self.region['id'] = self.region_id
self.catalog_api.create_region(
self.region.copy())
self.service_id = uuid.uuid4().hex
self.service = self.new_service_ref()
self.service['id'] = self.service_id
self.catalog_api.create_service(
self.service_id,
self.service.copy())
self.endpoint_id = uuid.uuid4().hex
self.endpoint = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint['id'] = self.endpoint_id
self.catalog_api.create_endpoint(
self.endpoint_id,
self.endpoint.copy())
# The server adds 'enabled' and defaults to True.
self.endpoint['enabled'] = True
def new_ref(self):
"""Populates a ref with attributes common to all API entities."""
return {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
def new_region_ref(self):
ref = self.new_ref()
# Region doesn't have name or enabled.
del ref['name']
del ref['enabled']
ref['parent_region_id'] = None
return ref
def new_service_ref(self):
ref = self.new_ref()
ref['type'] = uuid.uuid4().hex
return ref
def new_endpoint_ref(self, service_id, **kwargs):
ref = self.new_ref()
del ref['enabled'] # enabled is optional
ref['interface'] = uuid.uuid4().hex[:8]
ref['service_id'] = service_id
ref['url'] = uuid.uuid4().hex
ref['region'] = uuid.uuid4().hex
ref.update(kwargs)
return ref
def new_domain_ref(self):
ref = self.new_ref()
return ref
def new_project_ref(self, domain_id):
ref = self.new_ref()
ref['domain_id'] = domain_id
return ref
def new_user_ref(self, domain_id, project_id=None):
ref = self.new_ref()
ref['domain_id'] = domain_id
ref['email'] = uuid.uuid4().hex
ref['password'] = uuid.uuid4().hex
if project_id:
ref['default_project_id'] = project_id
return ref
def new_group_ref(self, domain_id):
ref = self.new_ref()
ref['domain_id'] = domain_id
return ref
def new_credential_ref(self, user_id, project_id=None):
ref = self.new_ref()
ref['user_id'] = user_id
ref['blob'] = uuid.uuid4().hex
ref['type'] = uuid.uuid4().hex
if project_id:
ref['project_id'] = project_id
return ref
def new_role_ref(self):
ref = self.new_ref()
return ref
def new_policy_ref(self):
ref = self.new_ref()
ref['blob'] = uuid.uuid4().hex
ref['type'] = uuid.uuid4().hex
return ref
def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None):
ref = self.new_ref()
ref['trustor_user_id'] = trustor_user_id
ref['trustee_user_id'] = trustee_user_id
ref['impersonation'] = impersonation or False
ref['project_id'] = project_id
ref['remaining_uses'] = remaining_uses
if isinstance(expires, six.string_types):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = timeutils.strtime(
timeutils.utcnow() + datetime.timedelta(**expires),
fmt=TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
return ref
def create_new_default_project_for_user(self, user_id, domain_id,
enable_project=True):
ref = self.new_project_ref(domain_id=domain_id)
ref['enabled'] = enable_project
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': user_id},
body=body)
self.assertValidUserResponse(r)
return project
def admin_request(self, *args, **kwargs):
"""Translates XML responses to dicts.
This implies that we only have to write assertions for JSON.
"""
r = super(RestfulTestCase, self).admin_request(*args, **kwargs)
if r.headers.get('Content-Type') == 'application/xml':
r.result = serializer.from_xml(etree.tostring(r.result))
return r
def get_scoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
},
'scope': {
'project': {
'id': self.project['id'],
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_requested_token(self, auth):
"""Request the specific token we want."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body=auth)
return r.headers.get('X-Subject-Token')
def v3_request(self, path, **kwargs):
# Check if the caller has passed in auth details for
# use in requesting the token
auth_arg = kwargs.pop('auth', None)
if auth_arg:
token = self.get_requested_token(auth_arg)
else:
token = kwargs.pop('token', None)
if not token:
token = self.get_scoped_token()
path = '/v3' + path
return self.admin_request(path=path, token=token, **kwargs)
def get(self, path, **kwargs):
r = self.v3_request(method='GET', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 200)
return r
def head(self, path, **kwargs):
r = self.v3_request(method='HEAD', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def post(self, path, **kwargs):
r = self.v3_request(method='POST', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 201)
return r
def put(self, path, **kwargs):
r = self.v3_request(method='PUT', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def patch(self, path, **kwargs):
r = self.v3_request(method='PATCH', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 200)
return r
def delete(self, path, **kwargs):
r = self.v3_request(method='DELETE', path=path, **kwargs)
if 'expected_status' not in kwargs:
self.assertResponseStatus(r, 204)
return r
def assertValidErrorResponse(self, r):
if r.headers.get('Content-Type') == 'application/xml':
resp = serializer.from_xml(etree.tostring(r.result))
else:
resp = r.result
self.assertIsNotNone(resp.get('error'))
self.assertIsNotNone(resp['error'].get('code'))
self.assertIsNotNone(resp['error'].get('title'))
self.assertIsNotNone(resp['error'].get('message'))
self.assertEqual(int(resp['error']['code']), r.status_code)
def assertValidListLinks(self, links):
self.assertIsNotNone(links)
self.assertIsNotNone(links.get('self'))
self.assertThat(links['self'], matchers.StartsWith('http://localhost'))
self.assertIn('next', links)
if links['next'] is not None:
self.assertThat(links['next'],
matchers.StartsWith('http://localhost'))
self.assertIn('previous', links)
if links['previous'] is not None:
self.assertThat(links['previous'],
matchers.StartsWith('http://localhost'))
def assertValidListResponse(self, resp, key, entity_validator, ref=None,
expected_length=None, keys_to_check=None):
"""Make assertions common to all API list responses.
If a reference is provided, it's ID will be searched for in the
response, and asserted to be equal.
"""
entities = resp.result.get(key)
self.assertIsNotNone(entities)
if expected_length is not None:
self.assertEqual(len(entities), expected_length)
elif ref is not None:
# we're at least expecting the ref
self.assertNotEmpty(entities)
# collections should have relational links
self.assertValidListLinks(resp.result.get('links'))
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidEntity(entity, keys_to_check=keys_to_check)
entity_validator(entity)
if ref:
entity = [x for x in entities if x['id'] == ref['id']][0]
self.assertValidEntity(entity, ref=ref,
keys_to_check=keys_to_check)
entity_validator(entity, ref)
return entities
def assertValidResponse(self, resp, key, entity_validator, *args,
**kwargs):
"""Make assertions common to all API responses."""
entity = resp.result.get(key)
self.assertIsNotNone(entity)
keys = kwargs.pop('keys_to_check', None)
self.assertValidEntity(entity, keys_to_check=keys, *args, **kwargs)
entity_validator(entity, *args, **kwargs)
return entity
def assertValidEntity(self, entity, ref=None, keys_to_check=None):
"""Make assertions common to all API entities.
If a reference is provided, the entity will also be compared against
the reference.
"""
if keys_to_check is not None:
keys = keys_to_check
else:
keys = ['name', 'description', 'enabled']
for k in ['id'] + keys:
msg = '%s unexpectedly None in %s' % (k, entity)
self.assertIsNotNone(entity.get(k), msg)
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('self'))
self.assertThat(entity['links']['self'],
matchers.StartsWith('http://localhost'))
self.assertIn(entity['id'], entity['links']['self'])
if ref:
for k in keys:
msg = '%s not equal: %s != %s' % (k, ref[k], entity[k])
self.assertEqual(ref[k], entity[k])
return entity
# auth validation
def assertValidISO8601ExtendedFormatDatetime(self, dt):
try:
return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
except Exception:
msg = '%s is not a valid ISO 8601 extended format date time.' % dt
raise AssertionError(msg)
self.assertIsInstance(dt, datetime.datetime)
def assertValidTokenResponse(self, r, user=None):
self.assertTrue(r.headers.get('X-Subject-Token'))
token = r.result['token']
self.assertIsNotNone(token.get('expires_at'))
expires_at = self.assertValidISO8601ExtendedFormatDatetime(
token['expires_at'])
self.assertIsNotNone(token.get('issued_at'))
issued_at = self.assertValidISO8601ExtendedFormatDatetime(
token['issued_at'])
self.assertTrue(issued_at < expires_at)
self.assertIn('user', token)
self.assertIn('id', token['user'])
self.assertIn('name', token['user'])
self.assertIn('domain', token['user'])
self.assertIn('id', token['user']['domain'])
if user is not None:
self.assertEqual(user['id'], token['user']['id'])
self.assertEqual(user['name'], token['user']['name'])
self.assertEqual(user['domain_id'], token['user']['domain']['id'])
return token
def assertValidUnscopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidTokenResponse(r, *args, **kwargs)
self.assertNotIn('roles', token)
self.assertNotIn('catalog', token)
self.assertNotIn('project', token)
self.assertNotIn('domain', token)
return token
def assertValidScopedTokenResponse(self, r, *args, **kwargs):
require_catalog = kwargs.pop('require_catalog', True)
endpoint_filter = kwargs.pop('endpoint_filter', False)
ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0)
token = self.assertValidTokenResponse(r, *args, **kwargs)
if require_catalog:
self.assertIn('catalog', token)
if isinstance(token['catalog'], list):
# only test JSON
for service in token['catalog']:
for endpoint in service['endpoints']:
self.assertNotIn('enabled', endpoint)
self.assertNotIn('legacy_endpoint_id', endpoint)
self.assertNotIn('service_id', endpoint)
# sub test for the OS-EP-FILTER extension enabled
if endpoint_filter:
# verify the catalog hs no more than the endpoints
# associated in the catalog using the ep filter assoc
self.assertTrue(len(token['catalog']) < ep_filter_assoc + 1)
else:
self.assertNotIn('catalog', token)
self.assertIn('roles', token)
self.assertTrue(token['roles'])
for role in token['roles']:
self.assertIn('id', role)
self.assertIn('name', role)
return token
def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('project', token)
self.assertIn('id', token['project'])
self.assertIn('name', token['project'])
self.assertIn('domain', token['project'])
self.assertIn('id', token['project']['domain'])
self.assertIn('name', token['project']['domain'])
self.assertEqual(self.role_id, token['roles'][0]['id'])
return token
def assertValidProjectTrustScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidProjectScopedTokenResponse(r, *args, **kwargs)
trust = token.get('OS-TRUST:trust')
self.assertIsNotNone(trust)
self.assertIsNotNone(trust.get('id'))
self.assertIsInstance(trust.get('impersonation'), bool)
self.assertIsNotNone(trust.get('trustor_user'))
self.assertIsNotNone(trust.get('trustee_user'))
self.assertIsNotNone(trust['trustor_user'].get('id'))
self.assertIsNotNone(trust['trustee_user'].get('id'))
def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('domain', token)
self.assertIn('id', token['domain'])
self.assertIn('name', token['domain'])
return token
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
del token['token']['expires_at']
del token['token']['issued_at']
return token
a_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['expires_at'])
b_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['expires_at'])
self.assertCloseEnoughForGovernmentWork(a_expires_at, b_expires_at)
a_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['issued_at'])
b_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['issued_at'])
self.assertCloseEnoughForGovernmentWork(a_issued_at, b_issued_at)
return self.assertDictEqual(normalize(a), normalize(b))
# region validation
def assertValidRegionListResponse(self, resp, *args, **kwargs):
#NOTE(jaypipes): I have to pass in a blank keys_to_check parameter
# below otherwise the base assertValidEntity method
# tries to find a "name" and an "enabled" key in the
# returned ref dicts. The issue is, I don't understand
# how the service and endpoint entity assertions below
# actually work (they don't raise assertions), since
# AFAICT, the service and endpoint tables don't have
# a "name" column either... :(
return self.assertValidListResponse(
resp,
'regions',
self.assertValidRegion,
keys_to_check=[],
*args,
**kwargs)
def assertValidRegionResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'region',
self.assertValidRegion,
keys_to_check=[],
*args,
**kwargs)
def assertValidRegion(self, entity, ref=None):
self.assertIsNotNone(entity.get('description'))
if ref:
self.assertEqual(ref['description'], entity['description'])
return entity
# service validation
def assertValidServiceListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'services',
self.assertValidService,
*args,
**kwargs)
def assertValidServiceResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'service',
self.assertValidService,
*args,
**kwargs)
def assertValidService(self, entity, ref=None):
self.assertIsNotNone(entity.get('type'))
self.assertIsInstance(entity.get('enabled'), bool)
if ref:
self.assertEqual(ref['type'], entity['type'])
return entity
# endpoint validation
def assertValidEndpointListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'endpoints',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpointResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'endpoint',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpoint(self, entity, ref=None):
self.assertIsNotNone(entity.get('interface'))
self.assertIsNotNone(entity.get('service_id'))
self.assertIsInstance(entity['enabled'], bool)
# this is intended to be an unexposed implementation detail
self.assertNotIn('legacy_endpoint_id', entity)
if ref:
self.assertEqual(ref['interface'], entity['interface'])
self.assertEqual(ref['service_id'], entity['service_id'])
return entity
# domain validation
def assertValidDomainListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'domains',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomainResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'domain',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomain(self, entity, ref=None):
if ref:
pass
return entity
# project validation
def assertValidProjectListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'projects',
self.assertValidProject,
*args,
**kwargs)
def assertValidProjectResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'project',
self.assertValidProject,
*args,
**kwargs)
def assertValidProject(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
return entity
# user validation
def assertValidUserListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'users',
self.assertValidUser,
*args,
**kwargs)
def assertValidUserResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'user',
self.assertValidUser,
*args,
**kwargs)
def assertValidUser(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
self.assertIsNotNone(entity.get('email'))
self.assertIsNone(entity.get('password'))
self.assertNotIn('tenantId', entity)
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
self.assertEqual(ref['email'], entity['email'])
if 'default_project_id' in ref:
self.assertIsNotNone(ref['default_project_id'])
self.assertEqual(ref['default_project_id'],
entity['default_project_id'])
return entity
# group validation
def assertValidGroupListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'groups',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroupResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'group',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroup(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# credential validation
def assertValidCredentialListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'credentials',
self.assertValidCredential,
*args,
**kwargs)
def assertValidCredentialResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'credential',
self.assertValidCredential,
*args,
**kwargs)
def assertValidCredential(self, entity, ref=None):
self.assertIsNotNone(entity.get('user_id'))
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['user_id'], entity['user_id'])
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
self.assertEqual(ref.get('project_id'), entity.get('project_id'))
return entity
# role validation
def assertValidRoleListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'roles',
self.assertValidRole,
keys_to_check=['name'],
*args,
**kwargs)
def assertValidRoleResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'role',
self.assertValidRole,
keys_to_check=['name'],
*args,
**kwargs)
def assertValidRole(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
def assertValidRoleAssignmentListResponse(self, resp, ref=None,
expected_length=None):
entities = resp.result.get('role_assignments')
if expected_length is not None:
self.assertEqual(len(entities), expected_length)
elif ref is not None:
# we're at least expecting the ref
self.assertNotEmpty(entities)
# collections should have relational links
self.assertValidListLinks(resp.result.get('links'))
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidRoleAssignment(entity)
if ref:
self.assertValidRoleAssignment(entity, ref)
return entities
def assertValidRoleAssignment(self, entity, ref=None, url=None):
self.assertIsNotNone(entity.get('role'))
self.assertIsNotNone(entity.get('scope'))
# Only one of user or group should be present
self.assertIsNotNone(entity.get('user') or
entity.get('group'))
self.assertIsNone(entity.get('user') and
entity.get('group'))
# Only one of domain or project should be present
self.assertIsNotNone(entity['scope'].get('project') or
entity['scope'].get('domain'))
self.assertIsNone(entity['scope'].get('project') and
entity['scope'].get('domain'))
if entity['scope'].get('project'):
self.assertIsNotNone(entity['scope']['project'].get('id'))
else:
self.assertIsNotNone(entity['scope']['domain'].get('id'))
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('assignment'))
if ref:
if ref.get('user'):
self.assertEqual(ref['user']['id'], entity['user']['id'])
if ref.get('group'):
self.assertEqual(ref['group']['id'], entity['group']['id'])
if ref.get('role'):
self.assertEqual(ref['role']['id'], entity['role']['id'])
if ref['scope'].get('project'):
self.assertEqual(ref['scope']['project']['id'],
entity['scope']['project']['id'])
if ref['scope'].get('domain'):
self.assertEqual(ref['scope']['domain']['id'],
entity['scope']['domain']['id'])
if url:
self.assertIn(url, entity['links']['assignment'])
def assertRoleAssignmentInListResponse(
self, resp, ref, link_url=None, expected=1):
found_count = 0
for entity in resp.result.get('role_assignments'):
try:
self.assertValidRoleAssignment(
entity, ref=ref, url=link_url)
except Exception:
# It doesn't match, so let's go onto the next one
pass
else:
found_count += 1
self.assertEqual(found_count, expected)
def assertRoleAssignmentNotInListResponse(
self, resp, ref, link_url=None):
self.assertRoleAssignmentInListResponse(
resp, ref=ref, link_url=link_url, expected=0)
# policy validation
def assertValidPolicyListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'policies',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicyResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'policy',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicy(self, entity, ref=None):
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
return entity
# trust validation
def assertValidTrustListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'trusts',
self.assertValidTrustSummary,
*args,
**kwargs)
def assertValidTrustResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'trust',
self.assertValidTrust,
*args,
**kwargs)
def assertValidTrustSummary(self, entity, ref=None):
return self.assertValidTrust(entity, ref, summary=True)
def assertValidTrust(self, entity, ref=None, summary=False):
self.assertIsNotNone(entity.get('trustor_user_id'))
self.assertIsNotNone(entity.get('trustee_user_id'))
self.assertIn('expires_at', entity)
if entity['expires_at'] is not None:
self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at'])
if summary:
# Trust list contains no roles, but getting a specific
# trust by ID provides the detailed response containing roles
self.assertNotIn('roles', entity)
self.assertIn('project_id', entity)
else:
for role in entity['roles']:
self.assertIsNotNone(role)
self.assertValidEntity(role)
self.assertValidRole(role)
self.assertValidListLinks(entity.get('roles_links'))
# always disallow role xor project_id (neither or both is allowed)
has_roles = bool(entity.get('roles'))
has_project = bool(entity.get('project_id'))
self.assertFalse(has_roles ^ has_project)
if ref:
self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id'])
self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id'])
self.assertEqual(ref['project_id'], entity['project_id'])
if entity.get('expires_at') or ref.get('expires_at'):
entity_exp = self.assertValidISO8601ExtendedFormatDatetime(
entity['expires_at'])
ref_exp = self.assertValidISO8601ExtendedFormatDatetime(
ref['expires_at'])
self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp)
else:
self.assertEqual(ref.get('expires_at'),
entity.get('expires_at'))
return entity
def build_auth_scope(self, project_id=None, project_name=None,
project_domain_id=None, project_domain_name=None,
domain_id=None, domain_name=None, trust_id=None):
scope_data = {}
if project_id or project_name:
scope_data['project'] = {}
if project_id:
scope_data['project']['id'] = project_id
else:
scope_data['project']['name'] = project_name
if project_domain_id or project_domain_name:
project_domain_json = {}
if project_domain_id:
project_domain_json['id'] = project_domain_id
else:
project_domain_json['name'] = project_domain_name
scope_data['project']['domain'] = project_domain_json
if domain_id or domain_name:
scope_data['domain'] = {}
if domain_id:
scope_data['domain']['id'] = domain_id
else:
scope_data['domain']['name'] = domain_name
if trust_id:
scope_data['OS-TRUST:trust'] = {}
scope_data['OS-TRUST:trust']['id'] = trust_id
return scope_data
def build_password_auth(self, user_id=None, username=None,
user_domain_id=None, user_domain_name=None,
password=None):
password_data = {'user': {}}
if user_id:
password_data['user']['id'] = user_id
else:
password_data['user']['name'] = username
if user_domain_id or user_domain_name:
password_data['user']['domain'] = {}
if user_domain_id:
password_data['user']['domain']['id'] = user_domain_id
else:
password_data['user']['domain']['name'] = user_domain_name
password_data['user']['password'] = password
return password_data
def build_token_auth(self, token):
return {'id': token}
def build_authentication_request(self, token=None, user_id=None,
username=None, user_domain_id=None,
user_domain_name=None, password=None,
**kwargs):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_data = {}
auth_data['identity'] = {'methods': []}
if token:
auth_data['identity']['methods'].append('token')
auth_data['identity']['token'] = self.build_token_auth(token)
if user_id or username:
auth_data['identity']['methods'].append('password')
auth_data['identity']['password'] = self.build_password_auth(
user_id, username, user_domain_id, user_domain_name, password)
if kwargs:
auth_data['scope'] = self.build_auth_scope(**kwargs)
return {'auth': auth_data}
def build_external_auth_request(self, remote_user,
remote_domain=None, auth_data=None):
context = {'environment': {'REMOTE_USER': remote_user}}
if remote_domain:
context['environment']['REMOTE_DOMAIN'] = remote_domain
if not auth_data:
auth_data = self.build_authentication_request()['auth']
no_context = None
auth_info = auth.controllers.AuthInfo.create(no_context, auth_data)
auth_context = {'extras': {}, 'method_names': []}
return context, auth_info, auth_context
class VersionTestCase(RestfulTestCase):
def test_get_version(self):
pass
#NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py
# because we need the token
class AuthContextMiddlewareTestCase(RestfulTestCase):
def _mock_request_object(self, token_id):
class fake_req:
headers = {middleware.AUTH_TOKEN_HEADER: token_id}
environ = {}
return fake_req()
def test_auth_context_build_by_middleware(self):
# test to make sure AuthContextMiddleware successful build the auth
# context from the incoming auth token
admin_token = self.get_scoped_token()
req = self._mock_request_object(admin_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id'],
self.user['id'])
def test_auth_context_override(self):
overridden_context = 'OVERRIDDEN_CONTEXT'
# this token should not be used
token = uuid.uuid4().hex
req = self._mock_request_object(token)
req.environ[authorization.AUTH_CONTEXT_ENV] = overridden_context
application = None
middleware.AuthContextMiddleware(application).process_request(req)
# make sure overridden context take precedence
self.assertEqual(req.environ.get(authorization.AUTH_CONTEXT_ENV),
overridden_context)
def test_admin_token_auth_context(self):
# test to make sure AuthContextMiddleware does not attempt to build
# auth context if the incoming auth token is the special admin token
req = self._mock_request_object(CONF.admin_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertDictEqual(req.environ.get(authorization.AUTH_CONTEXT_ENV),
{})
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""Pythonic simple SOAP Server implementation"""
from __future__ import unicode_literals
import sys
if sys.version > '3':
unicode = str
import datetime
import sys
import logging
import warnings
import re
import traceback
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from http.server import BaseHTTPRequestHandler, HTTPServer
from . import __author__, __copyright__, __license__, __version__
from .simplexml import SimpleXMLElement, TYPE_MAP, Date, Decimal
log = logging.getLogger(__name__)
# Deprecated?
NS_RX = re.compile(r'xmlns:(\w+)="(.+?)"')
class SoapDispatcher(object):
"""Simple Dispatcher for SOAP Server"""
def __init__(self, name, documentation='', action='', location='',
namespace=None, prefix=False,
soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
soap_ns='soap',
namespaces={},
pretty=False,
debug=False,
**kwargs):
"""
:param namespace: Target namespace; xmlns=targetNamespace
:param prefix: Prefix for target namespace; xmlns:prefix=targetNamespace
:param namespaces: Specify additional namespaces; example: {'external': 'http://external.mt.moboperator'}
:param pretty: Prettifies generated xmls
:param debug: Use to add tracebacks in generated xmls.
Multiple namespaces
===================
It is possible to support multiple namespaces.
You need to specify additional namespaces by passing `namespace` parameter.
>>> dispatcher = SoapDispatcher(
... name = "MTClientWS",
... location = "http://localhost:8008/ws/MTClientWS",
... action = 'http://localhost:8008/ws/MTClientWS', # SOAPAction
... namespace = "http://external.mt.moboperator", prefix="external",
... documentation = 'moboperator MTClientWS',
... namespaces = {
... 'external': 'http://external.mt.moboperator',
... 'model': 'http://model.common.mt.moboperator'
... },
... ns = True)
Now the registered method must return node names with namespaces' prefixes.
>>> def _multi_ns_func(self, serviceMsisdn):
... ret = {
... 'external:activateSubscriptionsReturn': [
... {'model:code': '0'},
... {'model:description': 'desc'},
... ]}
... return ret
Our prefixes will be changed to those used by the client.
"""
self.methods = {}
self.name = name
self.documentation = documentation
self.action = action # base SoapAction
self.location = location
self.namespace = namespace # targetNamespace
self.prefix = prefix
self.soap_ns = soap_ns
self.soap_uri = soap_uri
self.namespaces = namespaces
self.pretty = pretty
self.debug = debug
@staticmethod
def _extra_namespaces(xml, ns):
"""Extends xml with extra namespaces.
:param ns: dict with namespaceUrl:prefix pairs
:param xml: XML node to modify
"""
if ns:
_tpl = 'xmlns:%s="%s"'
_ns_str = " ".join([_tpl % (prefix, uri) for uri, prefix in ns.items() if uri not in xml])
xml = xml.replace('/>', ' ' + _ns_str + '/>')
return xml
def register_function(self, name, fn, returns=None, args=None, doc=None):
self.methods[name] = fn, returns, args, doc or getattr(fn, "__doc__", "")
def dispatch(self, xml, action=None, fault=None):
"""Receive and process SOAP call, returns the xml"""
# a dict can be sent in fault to expose it to the caller
# default values:
prefix = self.prefix
ret = None
if fault is None:
fault = {}
soap_ns, soap_uri = self.soap_ns, self.soap_uri
soap_fault_code = 'VersionMismatch'
name = None
# namespaces = [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
_ns_reversed = dict(((v, k) for k, v in self.namespaces.items())) # Switch keys-values
# _ns_reversed = {'http://external.mt.moboperator': 'external', 'http://model.common.mt.moboperator': 'model'}
try:
request = SimpleXMLElement(xml, namespace=self.namespace)
# detect soap prefix and uri (xmlns attributes of Envelope)
for k, v in request[:]:
if v in ("http://schemas.xmlsoap.org/soap/envelope/",
"http://www.w3.org/2003/05/soap-env",):
soap_ns = request.attributes()[k].localName
soap_uri = request.attributes()[k].value
# If the value from attributes on Envelope is in additional namespaces
elif v in self.namespaces.values():
_ns = request.attributes()[k].localName
_uri = request.attributes()[k].value
_ns_reversed[_uri] = _ns # update with received alias
# Now we change 'external' and 'model' to the received forms i.e. 'ext' and 'mod'
# After that we know how the client has prefixed additional namespaces
ns = NS_RX.findall(xml)
for k, v in ns:
if v in self.namespaces.values():
_ns_reversed[v] = k
soap_fault_code = 'Client'
# parse request message and get local method
method = request('Body', ns=soap_uri).children()(0)
if action:
# method name = action
name = action[len(self.action)+1:-1]
prefix = self.prefix
if not action or not name:
# method name = input message name
name = method.get_local_name()
prefix = method.get_prefix()
log.debug('dispatch method: %s', name)
function, returns_types, args_types, doc = self.methods[name]
log.debug('returns_types %s', returns_types)
# de-serialize parameters (if type definitions given)
if args_types:
args = method.children().unmarshall(args_types)
elif args_types is None:
args = {'request': method} # send raw request
else:
args = {} # no parameters
soap_fault_code = 'Server'
# execute function
ret = function(**args)
log.debug('dispathed method returns: %s', ret)
except Exception: # This shouldn't be one huge try/except
import sys
etype, evalue, etb = sys.exc_info()
log.error(traceback.format_exc())
if self.debug:
detail = ''.join(traceback.format_exception(etype, evalue, etb))
detail += '\n\nXML REQUEST\n\n' + xml
else:
detail = None
fault.update({'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
'faultstring': evalue,
'detail': detail})
# build response message
if not prefix:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
else:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"
xmlns:%(prefix)s="%(namespace)s"/>"""
xml %= { # a %= {} is a shortcut for a = a % {}
'namespace': self.namespace,
'prefix': prefix,
'soap_ns': soap_ns,
'soap_uri': soap_uri
}
# Now we add extra namespaces
xml = SoapDispatcher._extra_namespaces(xml, _ns_reversed)
# Change our namespace alias to that given by the client.
# We put [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
# mix it with {'http://external.mt.moboperator': 'ext', 'http://model.common.mt.moboperator': 'mod'}
mapping = dict(((k, _ns_reversed[v]) for k, v in self.namespaces.items())) # Switch keys-values and change value
# and get {'model': u'mod', 'external': u'ext'}
response = SimpleXMLElement(xml,
namespace=self.namespace,
namespaces_map=mapping,
prefix=prefix)
response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
body = response.add_child("%s:Body" % soap_ns, ns=False)
if fault:
# generate a Soap Fault (with the python exception)
body.marshall("%s:Fault" % soap_ns, fault, ns=False)
else:
# return normal value
res = body.add_child("%sResponse" % name, ns=self.namespace)
if not prefix:
res['xmlns'] = self.namespace # add target namespace
# serialize returned values (response) if type definition available
if returns_types:
# TODO: full sanity check of type structure (recursive)
complex_type = isinstance(ret, dict)
if complex_type:
# check if type mapping correlates with return value
types_ok = all([k in returns_types for k in ret.keys()])
if not types_ok:
warnings.warn("Return value doesn't match type structure: "
"%s vs %s" % (str(returns_types), str(ret)))
if not complex_type or not types_ok:
# backward compatibility for scalar and simple types
res.marshall(returns_types.keys()[0], ret, )
else:
# new style for complex classes
for k, v in ret.items():
res.marshall(k, v)
elif returns_types is None:
# merge xmlelement returned
res.import_node(ret)
elif returns_types == {}:
log.warning('Given returns_types is an empty dict.')
return response.as_xml(pretty=self.pretty)
# Introspection functions:
def list_methods(self):
"""Return a list of aregistered operations"""
return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()]
def help(self, method=None):
"""Generate sample request and response messages"""
(function, returns, args, doc) = self.methods[method]
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method': method, 'namespace': self.namespace}
request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if args:
items = args.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k, v in items:
request(method).marshall(k, v, add_comments=True, ns=False)
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)sResponse xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method': method, 'namespace': self.namespace}
response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if returns:
items = returns.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k, v in items:
response('%sResponse' % method).marshall(k, v, add_comments=True, ns=False)
return request.as_xml(pretty=True), response.as_xml(pretty=True), doc
def wsdl(self):
"""Generate Web Service Description v1.1"""
xml = """<?xml version="1.0"?>
<wsdl:definitions name="%(name)s"
targetNamespace="%(namespace)s"
xmlns:tns="%(namespace)s"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation>
<wsdl:types>
<xsd:schema targetNamespace="%(namespace)s"
elementFormDefault="qualified"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</xsd:schema>
</wsdl:types>
</wsdl:definitions>
""" % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation}
wsdl = SimpleXMLElement(xml)
for method, (function, returns, args, doc) in self.methods.items():
# create elements:
def parse_element(name, values, array=False, complex=False):
if not complex:
element = wsdl('wsdl:types')('xsd:schema').add_child('xsd:element')
complex = element.add_child("xsd:complexType")
else:
complex = wsdl('wsdl:types')('xsd:schema').add_child('xsd:complexType')
element = complex
element['name'] = name
if values:
items = values
elif values is None:
items = [('value', None)]
else:
items = []
if not array and items:
all = complex.add_child("xsd:all")
elif items:
all = complex.add_child("xsd:sequence")
for k, v in items:
e = all.add_child("xsd:element")
e['name'] = k
if array:
e[:] = {'minOccurs': "0", 'maxOccurs': "unbounded"}
if v in TYPE_MAP.keys():
t = 'xsd:%s' % TYPE_MAP[v]
elif v is None:
t = 'xsd:anyType'
elif isinstance(v, list):
n = "ArrayOf%s%s" % (name, k)
l = []
for d in v:
l.extend(d.items())
parse_element(n, l, array=True, complex=True)
t = "tns:%s" % n
elif isinstance(v, dict):
n = "%s%s" % (name, k)
parse_element(n, v.items(), complex=True)
t = "tns:%s" % n
else:
raise TypeError("unknonw type v for marshalling" % str(v))
e.add_attribute('type', t)
parse_element("%s" % method, args and args.items())
parse_element("%sResponse" % method, returns and returns.items())
# create messages:
for m, e in ('Input', ''), ('Output', 'Response'):
message = wsdl.add_child('wsdl:message')
message['name'] = "%s%s" % (method, m)
part = message.add_child("wsdl:part")
part[:] = {'name': 'parameters',
'element': 'tns:%s%s' % (method, e)}
# create ports
portType = wsdl.add_child('wsdl:portType')
portType['name'] = "%sPortType" % self.name
for method, (function, returns, args, doc) in self.methods.items():
op = portType.add_child('wsdl:operation')
op['name'] = method
if doc:
op.add_child("wsdl:documentation", doc)
input = op.add_child("wsdl:input")
input['message'] = "tns:%sInput" % method
output = op.add_child("wsdl:output")
output['message'] = "tns:%sOutput" % method
# create bindings
binding = wsdl.add_child('wsdl:binding')
binding['name'] = "%sBinding" % self.name
binding['type'] = "tns:%sPortType" % self.name
soapbinding = binding.add_child('soap:binding')
soapbinding['style'] = "document"
soapbinding['transport'] = "http://schemas.xmlsoap.org/soap/http"
for method in self.methods.keys():
op = binding.add_child('wsdl:operation')
op['name'] = method
soapop = op.add_child('soap:operation')
soapop['soapAction'] = self.action + method
soapop['style'] = 'document'
input = op.add_child("wsdl:input")
##input.add_attribute('name', "%sInput" % method)
soapbody = input.add_child("soap:body")
soapbody["use"] = "literal"
output = op.add_child("wsdl:output")
##output.add_attribute('name', "%sOutput" % method)
soapbody = output.add_child("soap:body")
soapbody["use"] = "literal"
service = wsdl.add_child('wsdl:service')
service["name"] = "%sService" % self.name
service.add_child('wsdl:documentation', text=self.documentation)
port = service.add_child('wsdl:port')
port["name"] = "%s" % self.name
port["binding"] = "tns:%sBinding" % self.name
soapaddress = port.add_child('soap:address')
soapaddress["location"] = self.location
return wsdl.as_xml(pretty=True)
class SOAPHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""User viewable help information and wsdl"""
args = self.path[1:].split("?")
if self.path != "/" and args[0] not in self.server.dispatcher.methods.keys():
self.send_error(404, "Method not found: %s" % args[0])
else:
if self.path == "/":
# return wsdl if no method supplied
response = self.server.dispatcher.wsdl()
else:
# return supplied method help (?request or ?response messages)
req, res, doc = self.server.dispatcher.help(args[0])
if len(args) == 1 or args[1] == "request":
response = req
else:
response = res
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(response)
def do_POST(self):
"""SOAP POST gateway"""
request = self.rfile.read(int(self.headers.getheader('content-length')))
fault = {}
# execute the method
response = self.server.dispatcher.dispatch(request, fault=fault)
# check if fault dict was completed (faultcode, faultstring, detail)
if fault:
self.send_response(500)
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(response)
class WSGISOAPHandler(object):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def __call__(self, environ, start_response):
return self.handler(environ, start_response)
def handler(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
return self.do_get(environ, start_response)
elif environ['REQUEST_METHOD'] == 'POST':
return self.do_post(environ, start_response)
else:
start_response('405 Method not allowed', [('Content-Type', 'text/plain')])
return ['Method not allowed']
def do_get(self, environ, start_response):
path = environ.get('PATH_INFO').lstrip('/')
query = environ.get('QUERY_STRING')
if path != "" and path not in self.dispatcher.methods.keys():
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ["Method not found: %s" % path]
elif path == "":
# return wsdl if no method supplied
response = self.dispatcher.wsdl()
else:
# return supplied method help (?request or ?response messages)
req, res, doc = self.dispatcher.help(path)
if len(query) == 0 or query == "request":
response = req
else:
response = res
start_response('200 OK', [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)))])
return [response]
def do_post(self, environ, start_response):
length = int(environ['CONTENT_LENGTH'])
request = environ['wsgi.input'].read(length)
response = self.dispatcher.dispatch(request)
start_response('200 OK', [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)))])
return [response]
if __name__ == "__main__":
dispatcher = SoapDispatcher(
name="PySimpleSoapSample",
location="http://localhost:8008/",
action='http://localhost:8008/', # SOAPAction
namespace="http://example.com/pysimplesoapsamle/", prefix="ns0",
documentation='Example soap service using PySimpleSoap',
trace=True, debug=True,
ns=True)
def adder(p, c, dt=None):
"""Add several values"""
dt = dt + datetime.timedelta(365)
return {'ab': p['a'] + p['b'], 'dd': c[0]['d'] + c[1]['d'], 'dt': dt}
def dummy(in0):
"""Just return input"""
return in0
def echo(request):
"""Copy request->response (generic, any type)"""
return request.value
dispatcher.register_function(
'Adder', adder,
returns={'AddResult': {'ab': int, 'dd': unicode, 'dt': datetime.date}},
args={'p': {'a': int, 'b': int}, 'dt': Date, 'c': [{'d': Decimal}]}
)
dispatcher.register_function(
'Dummy', dummy,
returns={'out0': str},
args={'in0': str}
)
dispatcher.register_function('Echo', echo)
if '--local' in sys.argv:
wsdl = dispatcher.wsdl()
for method, doc in dispatcher.list_methods():
request, response, doc = dispatcher.help(method)
if '--serve' in sys.argv:
log.info("Starting server...")
httpd = HTTPServer(("", 8008), SOAPHandler)
httpd.dispatcher = dispatcher
httpd.serve_forever()
if '--wsgi-serve' in sys.argv:
log.info("Starting wsgi server...")
from wsgiref.simple_server import make_server
application = WSGISOAPHandler(dispatcher)
wsgid = make_server('', 8008, application)
wsgid.serve_forever()
if '--consume' in sys.argv:
from .client import SoapClient
client = SoapClient(
location="http://localhost:8008/",
action='http://localhost:8008/', # SOAPAction
namespace="http://example.com/sample.wsdl",
soap_ns='soap',
trace=True,
ns=False
)
p = {'a': 1, 'b': 2}
c = [{'d': '1.20'}, {'d': '2.01'}]
response = client.Adder(p=p, dt='2010-07-24', c=c)
result = response.AddResult
log.info(int(result.ab))
log.info(str(result.dd))
if '--consume-wsdl' in sys.argv:
from .client import SoapClient
client = SoapClient(
wsdl="http://localhost:8008/",
)
p = {'a': 1, 'b': 2}
c = [{'d': '1.20'}, {'d': '2.01'}]
dt = datetime.date.today()
response = client.Adder(p=p, dt=dt, c=c)
result = response['AddResult']
log.info(int(result['ab']))
log.info(str(result['dd']))
|
|
"""
This module was taken from the PBSUITE (v 14.9.9) available at http://sourceforge.net/projects/pb-jelly/
It has been published with the follwing licencsing:
##################################################
Copyright (c) '2013 Baylor College of Medicine
Contributors: Adam English ([email protected])
Affiliation: Human Genome Sequencing Center
URL: http://www.hgsc.bcm.tmc.edu/
https://sourceforge.net/projects/pb-jelly/
http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0047768
http://www.biomedcentral.com/1471-2105/15/180
Citation: English, Adam C., Stephen Richards, Yi Han, Min Wang,
Vanesa Vee, Jiaxin Qu, Xiang Qin, et al. "Mind the
Gap: Upgrading Genomes with Pacific Biosciences RS
Long-Read Sequencing Technology." PLoS ONE 7, no. 11
(November 21, 2012): e47768.
doi:10.1371/journal.pone.0047768.
Citation: English, Adam C., William J. Salerno, Jeffery G.
Reid. "PBHoney: identyfying genomic variants via
long-read discordance and interrupted mapping."
BMC Bioinformatics 2014, 15:180 (June 10, 2014).
doi:10.1186/1471-2105-15-180
'
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
##################################################
"""
from string import Template
import tempfile
import subprocess, signal, logging, os, stat, sys
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
def exe(cmd, timeout=-1):
"""
Executes a command through the shell.
timeout in minutes! so 1440 mean is 24 hours.
-1 means never
"""
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, close_fds=True)
signal.signal(signal.SIGALRM, alarm_handler)
if timeout > 0:
signal.alarm(int(timeout*60))
try:
stdoutVal, stderrVal = proc.communicate()
print(cmd)
logging.debug("Executing {}".format(cmd))
logging.debug("STDERR:\n{}".format(stderrVal))
logging.debug("STDOUT:\n{}".format(stdoutVal))
signal.alarm(0) # reset the alarm
except Alarm:
logging.error(("Command was taking too long. "
"Automatic Timeout Initiated after %d minutes") \
% (timeout))
proc.kill()
return 214,None,None
retCode = proc.returncode
return retCode,stdoutVal,stderrVal
class Command():
def __init__(self, cmd, jobname, stdout, stderr):
self.cmd = cmd
self.jobname = jobname
self.stdout = stdout
self.stderr = stderr
def asDict(self):
return {"CMD":self.cmd, "JOBNAME":self.jobname, \
"STDOUT":self.stdout, "STDERR":self.stderr}
class CommandRunner():
"""
Uses a command template to run stuff. This is helpful for cluster commands
and chunking several commands together
"""
def __init__(self, template=None, njobs=0):
"""
template: a string that will become the template for submitting to your cluster:
#you can also go ahead and specify a string.Template
default is to not submit to your cluster
${CMD} > ${STDOUT} 2> ${STDERR}
njobs: (0)
for clumping commands together and submitting them in a script
"""
if template is None:
template = "${CMD} > ${STDOUT} 2> ${STDERR}"
self.runType = "Running"
else:
self.runType = "Submitting"
self.template = Template(template)
self.njobs = njobs
def __call__(self, cmds, wDir = None, id = None):
"""
Executes Commands - can either be a list or a single Command
wDir is the working directory where chunk scripts will be written
if id is None a random identifier will be applied when chunking
"""
if wDir is None:
wDir = "./"
if type(cmds) != list:
cmd = self.buildCommand(cmds)
return exe(cmd)
if self.njobs == 0:
outRet = []
for c in cmds:
outRet.append(exe(self.buildCommand(c)))
return outRet
if id is None:
id = tempfile.mkstemp(dir=wDir)[1]
outputRet =[]
for chunk, commands in enumerate( partition(cmds, self.njobs) ):
outScript = open(os.path.join(wDir, "%s_chunk%d.sh" % (id, chunk)),'w')
outScript.write("#!/bin/bash\n\n")
for c in commands:
outScript.write(c.cmd+"\n")
outScript.close()
#Add executeable
existing_permissions = stat.S_IMODE(os.stat(outScript.name).st_mode)
if not os.access(outScript.name, os.X_OK):
new_permissions = existing_permissions | stat.S_IXUSR
os.chmod(outScript.name, new_permissions)
submit = Command(outScript.name, \
id + "_chunk%d" % chunk, \
os.path.join(wDir, id + ("_chunk%d.out" % chunk)), \
os.path.join(wDir, id + ("_chunk%d.err" % chunk)))
cmd = self.buildCommand(submit)
outputRet.append(exe(cmd))
return outputRet
def checkTemplate(self):
"""
Checks that my template works okay
"""
temp.update({"CMD":"test", \
"STDOUT":"testo", \
"STDERR":"teste", \
"JOBNAME":"testn"})
try:
w = self.template.substitute(temp)
except KeyError:
logging.error("Your submission template is invalid ")
sys.exit(1)
def buildCommand(self, cmdSetup):
"""
substitutes a template with a Command
"""
return self.template.substitute(cmdSetup.asDict())
def partition(n,m):
"""
Helper function. splits list n into m partitions
"""
p = map(lambda x: list(), range(m))
p=list(p)
index = 0
for item in n:
p[index].append(item)
if index < m-1:
index += 1
else:
index = 0
return filter(lambda x: len(x)>0, p)
|
|
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
See <https://Python-Markdown.github.io/extensions/code_hilite>
for documentation.
Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import parseBoolValue
try: # pragma: no cover
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import get_formatter_by_name
pygments = True
except ImportError: # pragma: no cover
pygments = False
def parse_hl_lines(expr):
"""Support our syntax for emphasizing certain lines of code.
expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
Returns a list of ints, the line numbers to emphasize.
"""
if not expr:
return []
try:
return list(map(int, expr.split()))
except ValueError: # pragma: no cover
return []
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it on to the Pygments highlighter.
Usage:
code = CodeHilite(src=some_code, lang='python')
html = code.hilite()
Arguments:
* src: Source string or any object with a .readline attribute.
* lang: String name of Pygments lexer to use for highlighting. Default: `None`.
* guess_lang: Auto-detect which lexer to use. Ignored if `lang` is set to a valid
value. Default: `True`.
* use_pygments: Pass code to pygments for code highlighting. If `False`, the code is
instead wrapped for highlighting by a JavaScript library. Default: `True`.
* linenums: An alias to Pygments `linenos` formatter option. Default: `None`.
* css_class: An alias to Pygments `cssclass` formatter option. Default: 'codehilite'.
* lang_prefix: Prefix prepended to the language when `use_pygments` is `False`.
Default: "language-".
Other Options:
Any other options are accepted and passed on to the lexer and formatter. Therefore,
valid options include any options which are accepted by the `html` formatter or
whichever lexer the code's language uses. Note that most lexers do not have any
options. However, a few have very useful options, such as PHP's `startinline` option.
Any invalid options are ignored without error.
Formatter options: https://pygments.org/docs/formatters/#HtmlFormatter
Lexer Options: https://pygments.org/docs/lexers/
Advanced Usage:
code = CodeHilite(
src = some_code,
lang = 'php',
startinline = True, # Lexer option. Snippet does not start with `<?php`.
linenostart = 42, # Formatter option. Snippet starts on line 42.
hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50.
linenos = 'inline' # Formatter option. Avoid alignment problems.
)
html = code.hilite()
"""
def __init__(self, src, **options):
self.src = src
self.lang = options.pop('lang', None)
self.guess_lang = options.pop('guess_lang', True)
self.use_pygments = options.pop('use_pygments', True)
self.lang_prefix = options.pop('lang_prefix', 'language-')
if 'linenos' not in options:
options['linenos'] = options.pop('linenums', None)
if 'cssclass' not in options:
options['cssclass'] = options.pop('css_class', 'codehilite')
if 'wrapcode' not in options:
# Override pygments default
options['wrapcode'] = True
# Disallow use of `full` option
options['full'] = False
self.options = options
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._parseHeader()
if pygments and self.use_pygments:
try:
lexer = get_lexer_by_name(self.lang, **self.options)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src, **self.options)
else:
lexer = get_lexer_by_name('text', **self.options)
except ValueError: # pragma: no cover
lexer = get_lexer_by_name('text', **self.options)
formatter = get_formatter_by_name('html', **self.options)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('{}{}'.format(self.lang_prefix, self.lang))
if self.options['linenos']:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="{}"'.format(' '.join(classes))
return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format(
self.options['cssclass'],
class_str,
txt
)
def _parseHeader(self):
"""
Determines language of a code block from shebang line and whether the
said line should be removed or left in place. If the sheband line
contains a path (even a single /) then it is assumed to be a real
shebang line and left alone. However, if no path is given
(e.i.: #!python or :::python) then it is assumed to be a mock shebang
for language identification of a code fragment and removed from the
code block prior to processing for code highlighting. When a mock
shebang (e.i: #!python) is found, line numbering is turned on. When
colons are found in place of a shebang (e.i.: :::python), line
numbering is left in the current state - off by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
"""
import re
# split text into lines
lines = self.src.split("\n")
# pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError: # pragma: no cover
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.options['linenos'] is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.options['linenos'] = True
self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Hilight source code in code blocks. """
def code_unescape(self, text):
"""Unescape code."""
text = text.replace("<", "<")
text = text.replace(">", ">")
# Escaped '&' should be replaced at the end to avoid
# conflicting with < and >.
text = text.replace("&", "&")
return text
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
code = CodeHilite(
self.code_unescape(block[0].text),
tab_length=self.md.tab_length,
style=self.config.pop('pygments_style', 'default'),
**self.config
)
placeholder = self.md.htmlStash.store(code.hilite())
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, **kwargs):
# define default configs
self.config = {
'linenums': [None,
"Use lines numbers. True|table|inline=yes, False=no, None=auto"],
'guess_lang': [True,
"Automatic language detection - Default: True"],
'css_class': ["codehilite",
"Set class name for wrapper <div> - "
"Default: codehilite"],
'pygments_style': ['default',
'Pygments HTML Formatter Style '
'(Colorscheme) - Default: default'],
'noclasses': [False,
'Use inline styles instead of CSS classes - '
'Default false'],
'use_pygments': [True,
'Use Pygments to Highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True'],
'lang_prefix': [
'language-',
'Prefix prepended to the language when use_pygments is false. Default: "language-"'
]
}
for key, value in kwargs.items():
if key in self.config:
self.setConfig(key, value)
else:
# manually set unknown keywords.
if isinstance(value, str):
try:
# Attempt to parse str as a bool value
value = parseBoolValue(value, preserve_none=True)
except ValueError:
pass # Assume it's not a bool value. Use as-is.
self.config[key] = [value, '']
def extendMarkdown(self, md):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.register(hiliter, 'hilite', 30)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return CodeHiliteExtension(**kwargs)
|
|
"""
Tests for sigfoxapi.
The following environment variables have to be set:
SIGFOX_LOGIN_ID
SIGFOX_PASSWORD
SIGFOX_DEVICETYPE_ID
SIGFOX_DEVICE_ID
SIGFOX_USER_ID
SIGFOX_GROUP_ID
WARNING: These tests operate against the live Sigfox backend. Some tests
are specific to my personal setup.
"""
from __future__ import print_function
import os
import time
from nose.tools import raises
import sigfoxapi
sigfoxapi.DEBUG = True
SIGFOX_LOGIN_ID = os.environ['SIGFOX_LOGIN_ID']
SIGFOX_PASSWORD = os.environ['SIGFOX_PASSWORD']
SIGFOX_DEVICETYPE_ID = os.environ['SIGFOX_DEVICETYPE_ID']
SIGFOX_DEVICE_ID = os.environ['SIGFOX_DEVICE_ID']
SIGFOX_USER_ID = os.environ['SIGFOX_USER_ID']
SIGFOX_GROUP_ID = os.environ['SIGFOX_GROUP_ID']
TIMESTAMP = time.strftime('%Y-%m-%d %H:%M:%S')
SINCE = 1496239200
BEFORE = SINCE #int(time.time())
@raises(sigfoxapi.SigfoxApiBadRequest)
def test_sigfoxapi_autherror():
s = sigfoxapi.Sigfox('wrong_login_id', 'wrong_password')
s.group_info(SIGFOX_GROUP_ID)
@raises(sigfoxapi.SigfoxApiAuthError)
def test_sigfoxapi_autherror():
s = sigfoxapi.Sigfox('012345678901234567891234', '12345678901234567890123456789012')
s.group_info(SIGFOX_GROUP_ID)
# Doesn work with @raises
def test_sigfoxapi_notfound():
s = sigfoxapi.Sigfox(SIGFOX_LOGIN_ID, SIGFOX_PASSWORD)
try:
s.group_info('123456789012345678901234')
except sigfoxapi.SigfoxApiNotFound:
pass
@raises(sigfoxapi.SigfoxApiError)
def test_sigfoxapi_apierror():
s = sigfoxapi.Sigfox(SIGFOX_LOGIN_ID, SIGFOX_PASSWORD)
s.group_info('does_not_exist')
# TODO: Add more exceotion testing
class _TestSigfoxBase(object):
def setup(self):
self.s = sigfoxapi.Sigfox(SIGFOX_LOGIN_ID, SIGFOX_PASSWORD)
sigfoxapi.RETURN_OBJECTS = False
class _TestSigfoxBaseObject(object):
def setup(self):
self.s = sigfoxapi.Sigfox(SIGFOX_LOGIN_ID, SIGFOX_PASSWORD)
sigfoxapi.RETURN_OBJECTS = True
class TestSigfoxUsers(_TestSigfoxBase):
def test_user_list(self):
users = self.s.user_list(SIGFOX_GROUP_ID)
assert isinstance(users, list)
assert len(users) == 1
assert users[0]['timezone'] == 'Australia/Melbourne'
def test_user_list_limit(self):
users = self.s.user_list(SIGFOX_GROUP_ID, offset=0, limit=1)
assert len(users) == 1
# @raises(sigfoxapi.SigfoxApiError)
# def test_user_list_invalid_groupid(self):
# self.s.user_list('invalid')
class TestSigfoxUsersObject(_TestSigfoxBaseObject):
def test_user_list(self):
users = self.s.user_list(SIGFOX_GROUP_ID)
assert isinstance(users, sigfoxapi.Object)
assert len(users) == 1
assert users[0].timezone == 'Australia/Melbourne'
class TestSigfoxGroups(_TestSigfoxBase):
def test_group_info(self):
group = self.s.group_info(SIGFOX_GROUP_ID)
assert isinstance(group, dict)
assert group['id'] == SIGFOX_GROUP_ID
def test_group_list(self):
groups = self.s.group_list()
assert isinstance(groups, list)
assert len(groups) == 0
def test_group_list_limit(self):
groups = self.s.group_list(limit=1)
assert len(groups) <= 1
def test_group_list_offset(self):
groups = self.s.group_list(offset=10)
class TestSigfoxGroupsObject(_TestSigfoxBaseObject):
def test_group_info(self):
group = self.s.group_info(SIGFOX_GROUP_ID)
assert isinstance(group, sigfoxapi.Object)
assert group.id == SIGFOX_GROUP_ID
def test_group_list(self):
groups = self.s.group_list()
assert isinstance(groups, sigfoxapi.Object)
assert len(groups) == 0
class TestSigfoxDevicetypes(_TestSigfoxBase):
def test_devicetype_info_and_edit(self):
devicetype = self.s.devicetype_info(SIGFOX_DEVICETYPE_ID)
assert isinstance(devicetype, dict)
assert devicetype['id'] == SIGFOX_DEVICETYPE_ID
assert devicetype['description'] != TIMESTAMP
params = {'id': SIGFOX_DEVICETYPE_ID,
'description': TIMESTAMP}
self.s.devicetype_edit(SIGFOX_DEVICETYPE_ID, params)
devicetype = self.s.devicetype_info(SIGFOX_DEVICETYPE_ID)
assert devicetype['description'] == TIMESTAMP
def test_devicetype_list(self):
devicetypes = self.s.devicetype_list()
assert isinstance(devicetypes, list)
found = [devicetype for devicetype in devicetypes
if devicetype['id'] == SIGFOX_DEVICETYPE_ID]
assert len(found) == 1
def test_devicetype_errors(self):
errors = self.s.devicetype_errors(SIGFOX_DEVICETYPE_ID)
assert isinstance(errors, list)
if len(errors) > 0:
pass # TODO
def test_devicetype_errors_limit(self):
self.s.devicetype_errors(SIGFOX_DEVICETYPE_ID, limit=1)
def test_devicetype_errors_offset(self):
self.s.devicetype_errors(SIGFOX_DEVICETYPE_ID, offset=1)
def test_devicetype_errors_before(self):
errors = self.s.devicetype_errors(SIGFOX_DEVICETYPE_ID, before=BEFORE)
def test_devicetype_errors_since(self):
self.s.devicetype_errors(SIGFOX_DEVICETYPE_ID, since=SINCE)
def test_devicetype_warnings(self):
warnings = self.s.devicetype_warnings(SIGFOX_DEVICETYPE_ID)
assert isinstance(warnings, list)
if len(warnings) > 0:
pass # TODO
def test_devicetype_warnings_limit(self):
self.s.devicetype_warnings(SIGFOX_DEVICETYPE_ID, limit=1)
def test_devicetype_warnings_offset(self):
self.s.devicetype_warnings(SIGFOX_DEVICETYPE_ID, offset=1)
def test_devicetype_warnings_before(self):
self.s.devicetype_warnings(SIGFOX_DEVICETYPE_ID, before=BEFORE)
def test_devicetype_warnings_since(self):
self.s.devicetype_warnings(SIGFOX_DEVICETYPE_ID, since=SINCE)
def test_devicetype_messages(self):
messages = self.s.devicetype_messages(SIGFOX_DEVICETYPE_ID)
assert isinstance(messages, list)
if len(messages) > 0:
for message in messages:
assert isinstance(message['data'], str)
assert isinstance(message['device'], str)
assert isinstance(message['linkQuality'], str)
assert isinstance(message['snr'], str)
assert isinstance(message['time'], int)
def test_devicetype_messages_limit(self):
self.s.devicetype_messages(SIGFOX_DEVICETYPE_ID, limit=1)
def test_devicetype_messages_offset(self):
self.s.devicetype_messages(SIGFOX_DEVICETYPE_ID, offset=1)
def test_devicetype_messages_before(self):
self.s.devicetype_messages(SIGFOX_DEVICETYPE_ID, before=BEFORE)
def test_devicetype_messages_since(self):
self.s.devicetype_messages(SIGFOX_DEVICETYPE_ID, since=SINCE)
class TestSigfoxDevicetypesObject(_TestSigfoxBaseObject):
def test_devicetype_info(self):
devicetype = self.s.devicetype_info(SIGFOX_DEVICETYPE_ID)
assert isinstance(devicetype, sigfoxapi.Object)
assert devicetype.id == SIGFOX_DEVICETYPE_ID
def test_devicetype_list(self):
devicetypes = self.s.devicetype_list()
assert isinstance(devicetypes, sigfoxapi.Object)
found = [devicetype for devicetype in devicetypes
if devicetype.id == SIGFOX_DEVICETYPE_ID]
assert len(found) == 1
def test_devicetype_errors(self):
errors = self.s.devicetype_errors(SIGFOX_DEVICETYPE_ID)
assert isinstance(errors, sigfoxapi.Object)
if len(errors) > 0:
pass # TODO
def test_devicetype_warnings(self):
warnings = self.s.devicetype_warnings(SIGFOX_DEVICETYPE_ID)
assert isinstance(warnings, sigfoxapi.Object)
if len(warnings) > 0:
pass # TODO
def test_devicetype_messages(self):
messages = self.s.devicetype_messages(SIGFOX_DEVICETYPE_ID)
assert isinstance(messages, sigfoxapi.Object)
if len(messages) > 0:
for message in messages:
assert isinstance(message.data, str)
assert isinstance(message.device, str)
assert isinstance(message.linkQuality, str)
assert isinstance(message.snr, str)
CALLBACKID = None
class TestSigfoxCallbacks(_TestSigfoxBase):
def test_callback_0_list_and_edit(self):
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
found = [callback for callback in callbacks
if callback['channel'] == 'EMAIL' and
callback['message'] == TIMESTAMP]
assert found == []
new_callbacks = [
{
'channel': 'EMAIL',
'subject': 'SIGFOXAPI TEST',
'recipient': '[email protected]',
'message': TIMESTAMP,
'callbackType': 0,
'callbackSubtype': 2,
'enabled': False,
'sendDuplicate': False,
'payloadConfig': ''
}
]
self.s.callback_new(SIGFOX_DEVICETYPE_ID, new_callbacks)
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
found = [callback for callback in callbacks
if callback['channel'] == 'EMAIL' and
callback['message'] == TIMESTAMP]
assert len(found) == 1
global CALLBACKID
CALLBACKID = found[0]['id']
def test_callback_1_enable(self):
self.s.callback_enable(SIGFOX_DEVICETYPE_ID, CALLBACKID)
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
found = [callback for callback in callbacks
if callback['channel'] == 'EMAIL' and
callback['message'] == TIMESTAMP]
assert found[0]['enabled'] is True
def test_callback_2_disable(self):
self.s.callback_disable(SIGFOX_DEVICETYPE_ID, CALLBACKID)
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
found = [callback for callback in callbacks
if callback['channel'] == 'EMAIL' and
callback['message'] == TIMESTAMP]
assert found[0]['enabled'] is False
def test_callback_2_disable(self):
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
for callback in callbacks:
if callback['channel'] == 'EMAIL' and callback['subject'] == 'SIGFOXAPI TEST':
self.s.callback_delete(SIGFOX_DEVICETYPE_ID, callback['id'])
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
found = [callback for callback in callbacks
if callback['channel'] == 'EMAIL' and
callback['subject'] == 'SIGFOXAPI TEST']
assert len(found) == 0
def test_callback_errors(self):
errors = self.s.callback_errors()
for error in errors:
assert error['device']
assert error['deviceType']
assert error['data']
assert error['time']
assert error['snr']
def test_callback_errors_devicetypeid(self):
errors = self.s.callback_errors(deviceTypeId=SIGFOX_DEVICETYPE_ID)
for error in errors:
assert error['deviceType'] == SIGFOX_DEVICETYPE_ID
def test_callback_errors_deviceid(self):
errors = self.s.callback_errors(hexId=SIGFOX_DEVICE_ID)
for error in errors:
assert error['device'] == SIGFOX_DEVICE_ID
def test_callback_errors_groupid(self):
errors = self.s.callback_errors(groupId=SIGFOX_GROUP_ID)
def test_callback_errors_limit(self):
self.s.callback_errors(limit=1)
def test_callback_errors_offset(self):
self.s.callback_errors(offset=10)
def test_callback_errors_since(self):
self.s.callback_errors(since=SINCE)
@raises(sigfoxapi.SigfoxApiError)
def test_callback_errors_before(self):
self.s.callback_errors(before=BEFORE)
class TestSigfoxCallbacksObject(_TestSigfoxBaseObject):
def test_callback_list(self):
callbacks = self.s.callback_list(SIGFOX_DEVICETYPE_ID)
found = [callback for callback in callbacks
if callback.channel == 'EMAIL' and
callback.message == TIMESTAMP]
assert found == []
def test_callback_errors(self):
errors = self.s.callback_errors()
for error in errors:
assert error.device
assert error.deviceType
assert error.data
assert error.time
assert error.snr
class TestSigfoxDevices(_TestSigfoxBase):
def test_device_list(self):
devices = self.s.device_list(SIGFOX_DEVICETYPE_ID)
assert isinstance(devices, list)
found = [device for device in devices if device['id'] == SIGFOX_DEVICE_ID]
assert len(found) == 1
assert isinstance(found[0], dict)
def test_device_list_snr(self):
self.s.device_list(SIGFOX_DEVICETYPE_ID, snr=1)
def test_device_list_limit(self):
self.s.device_list(SIGFOX_DEVICETYPE_ID, limit=1)
def test_device_list_offset(self):
self.s.device_list(SIGFOX_DEVICETYPE_ID, offset=10)
def test_device_info(self):
device = self.s.device_info(SIGFOX_DEVICE_ID)
assert device['id'] == SIGFOX_DEVICE_ID
def test_device_tokenstate(self):
tokenstate = self.s.device_tokenstate(SIGFOX_DEVICE_ID)
assert tokenstate['code'] in [0,1,2]
def test_device_messages(self):
messages = self.s.device_messages(SIGFOX_DEVICE_ID)
assert isinstance(messages, list)
if len(messages) > 0:
for message in messages:
assert isinstance(message['data'], str)
assert isinstance(message['device'], str)
assert isinstance(message['linkQuality'], str)
assert isinstance(message['snr'], str)
assert isinstance(message['time'], int)
def test_device_messages_limit_offset(self):
messages1 = self.s.device_messages(SIGFOX_DEVICE_ID, limit=10)
assert len(messages1) == 10
messages2 = self.s.device_messages(SIGFOX_DEVICE_ID, limit=1, offset=9)
assert messages1[9]['time'] == messages2[0]['time']
def test_device_messages_before(self):
messages = self.s.device_messages(SIGFOX_DEVICE_ID, before=1497905981)
assert len(messages) == 1
@raises(sigfoxapi.SigfoxApiBadRequest)
def test_device_messages_before_invalid(self):
messages = self.s.device_messages(SIGFOX_DEVICE_ID, before=BEFORE)
assert len(messages) == 0
def test_device_messages_next_add(self):
messages = self.s.device_messages(SIGFOX_DEVICE_ID)
if len(messages) == 100:
assert self.s.next is not None
while self.s.next:
messages = messages + self.s.next()
assert len(messages) > 100
assert self.s.next is None
def test_device_messages_next_iadd(self):
messages = self.s.device_messages(SIGFOX_DEVICE_ID)
if len(messages) == 100:
assert self.s.next is not None
while self.s.next:
messages += self.s.next()
assert len(messages) > 100
assert self.s.next is None
def test_device_locations(self):
locations = self.s.device_locations(SIGFOX_DEVICE_ID)
assert isinstance(locations, list)
if len(locations) > 0:
for location in locations:
assert isinstance(location['valid'], bool)
def test_device_locations_limit(self):
self.s.device_locations(SIGFOX_DEVICE_ID, limit=1)
def test_device_locations_offset(self):
self.s.device_locations(SIGFOX_DEVICE_ID, offset=10)
def test_device_locations_since(self):
self.s.device_locations(SIGFOX_DEVICE_ID, since=SINCE)
@raises(sigfoxapi.SigfoxApiError)
def test_device_locations_before(self):
self.s.device_locations(SIGFOX_DEVICE_ID, before=BEFORE)
def test_device_warnings(self):
warnings = self.s.device_warnings(SIGFOX_DEVICE_ID)
assert isinstance(warnings, list)
def test_device_warnings_limit(self):
self.s.device_warnings(SIGFOX_DEVICE_ID, limit=1)
def test_device_warnings_offset(self):
self.s.device_warnings(SIGFOX_DEVICE_ID, offset=10)
def test_device_warnings_since(self):
self.s.device_warnings(SIGFOX_DEVICE_ID, since=SINCE)
@raises(sigfoxapi.SigfoxApiError)
def test_device_warnings_before(self):
self.s.device_warnings(SIGFOX_DEVICE_ID, before=BEFORE)
def test_device_errors(self):
errors = self.s.device_errors(SIGFOX_DEVICE_ID)
assert isinstance(errors, list)
def test_device_errors_limit(self):
errors = self.s.device_errors(SIGFOX_DEVICE_ID, limit=1)
def test_device_errors_offset(self):
errors = self.s.device_errors(SIGFOX_DEVICE_ID, offset=10)
def test_device_errors_since(self):
errors = self.s.device_errors(SIGFOX_DEVICE_ID, since=SINCE)
@raises(sigfoxapi.SigfoxApiError)
def test_device_errors_before(self):
errors = self.s.device_errors(SIGFOX_DEVICE_ID, before=BEFORE)
def test_device_networkstate(self):
networkstate = self.s.device_networkstate(SIGFOX_DEVICE_ID)
assert isinstance(networkstate, dict)
assert networkstate['networkStatus'] in ['OK', 'NOK']
def test_device_messagemetrics(self):
metrics = self.s.device_messagemetrics(SIGFOX_DEVICE_ID)
assert isinstance(metrics, dict)
assert isinstance(metrics['lastDay'], int)
assert isinstance(metrics['lastMonth'], int)
assert isinstance(metrics['lastWeek'], int)
def test_device_consumptions(self):
consumptions = self.s.device_messagemetrics(SIGFOX_DEVICE_ID)
assert isinstance(consumptions, dict)
class TestSigfoxDevicesObject(_TestSigfoxBaseObject):
def test_device_list(self):
devices = self.s.device_list(SIGFOX_DEVICETYPE_ID)
assert isinstance(devices, sigfoxapi.Object)
found = [device for device in devices if device.id == SIGFOX_DEVICE_ID]
assert len(found) == 1
assert isinstance(found[0], sigfoxapi.Object)
def test_device_info(self):
device = self.s.device_info(SIGFOX_DEVICE_ID)
assert device.id == SIGFOX_DEVICE_ID
def test_device_tokenstate(self):
tokenstate = self.s.device_tokenstate(SIGFOX_DEVICE_ID)
assert tokenstate.code in [0,1,2]
def test_device_messages(self):
messages = self.s.device_messages(SIGFOX_DEVICE_ID)
assert isinstance(messages, sigfoxapi.Object)
if len(messages) > 0:
for message in messages:
assert isinstance(message.data, str)
assert isinstance(message.device, str)
assert isinstance(message.linkQuality, str)
assert isinstance(message.snr, str)
assert isinstance(message.time, int)
def test_device_locations(self):
locations = self.s.device_locations(SIGFOX_DEVICE_ID)
assert isinstance(locations, sigfoxapi.Object)
if len(locations) > 0:
for location in locations:
assert isinstance(location.valid, bool)
def test_device_warnings(self):
warnings = self.s.device_warnings(SIGFOX_DEVICE_ID)
assert isinstance(warnings, sigfoxapi.Object)
def test_device_errors(self):
errors = self.s.device_errors(SIGFOX_DEVICE_ID)
assert isinstance(errors, sigfoxapi.Object)
def test_device_networkstate(self):
networkstate = self.s.device_networkstate(SIGFOX_DEVICE_ID)
assert isinstance(networkstate, sigfoxapi.Object)
assert networkstate.networkStatus in ['OK', 'NOK']
def test_device_messagemetrics(self):
metrics = self.s.device_messagemetrics(SIGFOX_DEVICE_ID)
assert isinstance(metrics, sigfoxapi.Object)
assert isinstance(metrics.lastDay, int)
assert isinstance(metrics.lastMonth, int)
assert isinstance(metrics.lastWeek, int)
def test_device_consumptions(self):
consumptions = self.s.device_messagemetrics(SIGFOX_DEVICE_ID)
class TestSigfoxCoverage(_TestSigfoxBase):
def test_coverage_redundancy(self):
redundancy = self.s.coverage_redundancy(43.415, 1.9693, mode='OUTDOOR')
assert redundancy['redundancy'] > 1
def test_coverage_predictions(self):
predictions = self.s.coverage_predictions(43.415, 1.9693)
assert len(predictions['margins']) == 3
class TestSigfoxCoverageObject(_TestSigfoxBaseObject):
def test_coverage_redundancy(self):
redundancy = self.s.coverage_redundancy(43.415, 1.9693, mode='OUTDOOR')
assert redundancy.redundancy > 1
def test_coverage_predictions(self):
predictions = self.s.coverage_predictions(43.415, 1.9693)
assert len(predictions.margins) == 3
|
|
import time
def timer_counter(func):
def wrapper(*args, **kwargs):
start_time = int(round(time.time() * 1000))
result = func(*args, **kwargs)
end_time = int(round(time.time() * 1000))
args[0].api_counter += 1
args[0].api_timer += (end_time - start_time)
args[0].api_last_resp_time = (end_time - start_time)
return result
return wrapper
class VmaxApi(object):
def __init__(self, Restful, base_url):
self.rest = Restful
url = "%s/univmax/restapi" % (base_url)
self.rest.set_url(url)
self.version = 'v80'
self.api_counter = 0
self.api_timer = 0
self.api_last_resp_time = 0
def api_average_time(self):
return self.api_timer / self.api_counter
######################################
## ADMINISTRATION Resource group
######################################
@timer_counter
def get_app_list(self):
target_uri = "%s/common/Application/list" % (self.rest.url)
return self.rest.get(target_uri)
@timer_counter
def get_sharding_info(self):
target_uri = "%s/common/Sharding/info" % (self.rest.url)
return self.rest.get(target_uri)
######################################
## COMMON Resource group
######################################
@timer_counter
def get_iterator(self, iterator_id):
target_uri = "%s/common/Iterator/%s" % (self.rest.url, iterator_id)
return self.rest.get(target_uri)
@timer_counter
def get_iterator_page(self, iterator_id, params_dict=None):
target_uri = "%s/common/Iterator/%s/page" % (self.rest.url, iterator_id)
return self.rest.get(target_uri, params_dict)
######################################
## MANAGEMENT Resource group
######################################
@timer_counter
def get_usage_stats(self):
target_uri = "%s/management/RuntimeUsage/read" % (self.rest.url)
return self.rest.get(target_uri)
######################################
## PERFORMANCE Resource group
######################################
@timer_counter
def get_perf_array_alerts(self, params_dict):
target_uri = "%s/performance/Array/alerts" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_array_keys(self, params_dict):
target_uri = "%s/performance/Array/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_array_metrics(self, params_dict):
target_uri = "%s/performance/Array/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_bedirector_keys(self, params_dict):
target_uri = "%s/performance/BEDirector/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_bedirector_metrics(self, params_dict):
target_uri = "%s/performance/BEDirector/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_beemulation_keys(self, params_dict):
target_uri = "%s/performance/BeEmulation/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_beemulation_metrics(self, params_dict):
target_uri = "%s/performance/BeEmulation/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_beport_keys(self, params_dict):
target_uri = "%s/performance/BEPort/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_beport_metrics(self, params_dict):
target_uri = "%s/performance/BEPort/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_board_keys(self, params_dict):
target_uri = "%s/performance/Board/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_board_metrics(self, params_dict):
target_uri = "%s/performance/Board/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_cachepartition_keys(self, params_dict):
target_uri = "%s/performance/CachePartition/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_cachepartition_metrics(self, params_dict):
target_uri = "%s/performance/CachePartition/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_core_keys(self, params_dict):
target_uri = "%s/performance/Core/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_core_metrics(self, params_dict):
target_uri = "%s/performance/Core/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_database_keys(self, params_dict):
target_uri = "%s/performance/Database/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_database_metrics(self, params_dict):
target_uri = "%s/performance/Database/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_databasebypool_keys(self, params_dict):
target_uri = "%s/performance/DatabaseByPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_databasebypool_metrics(self, params_dict):
target_uri = "%s/performance/DatabaseByPool/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_devicegroup_keys(self, params_dict):
target_uri = "%s/performance/DeviceGroup/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_devicegroup_metrics(self, params_dict):
target_uri = "%s/performance/DeviceGroup/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_disk_keys(self, params_dict):
target_uri = "%s/performance/Disk/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_disk_metrics(self, params_dict):
target_uri = "%s/performance/Disk/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_diskgroup_keys(self, params_dict):
target_uri = "%s/performance/DiskGroup/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_diskgroup_metrics(self, params_dict):
target_uri = "%s/performance/DiskGroup/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_disktechpool_keys(self, params_dict):
target_uri = "%s/performance/DiskTechPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_disktechpool_metrics(self, params_dict):
target_uri = "%s/performance/DiskTechPool/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_dsepool_keys(self, params_dict):
target_uri = "%s/performance/DSEPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_dsepool_metrics(self, params_dict):
target_uri = "%s/performance/DSEPool/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_edsdirector_keys(self, params_dict):
target_uri = "%s/performance/EDSDirector/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_edsdirector_metrics(self, params_dict):
target_uri = "%s/performance/EDSDirector/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_edsemulation_keys(self, params_dict):
target_uri = "%s/performance/EDSEmulation/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_edsemulation_metrics(self, params_dict):
target_uri = "%s/performance/EDSEmulation/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_externaldisk_keys(self, params_dict):
target_uri = "%s/performance/ExternalDisk/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_externaldisk_metrics(self, params_dict):
target_uri = "%s/performance/ExternalDisk/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_externaldiskgroup_keys(self, params_dict):
target_uri = "%s/performance/ExternalDiskGroup/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_externaldiskgroup_metrics(self, params_dict):
target_uri = "%s/performance/ExternalDiskGroup/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_fastpolicy_keys(self, params_dict):
target_uri = "%s/performance/FASTPolicy/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_fastpolicy_metrics(self, params_dict):
target_uri = "%s/performance/FASTPolicy/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_fedirector_keys(self, params_dict):
target_uri = "%s/performance/FEDirector/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_fedirector_metrics(self, params_dict):
target_uri = "%s/performance/FEDirector/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_fedirectorbyport_keys(self, params_dict):
target_uri = "%s/performance/FEDirectorByPort/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_fedirectorbyport_metrics(self, params_dict):
target_uri = "%s/performance/FEDirectorByPort/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_feemulation_keys(self, params_dict):
target_uri = "%s/performance/FeEmulation/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_feemulation_metrics(self, params_dict):
target_uri = "%s/performance/FeEmulation/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_feport_keys(self, params_dict):
target_uri = "%s/performance/FEPort/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_feport_metrics(self, params_dict):
target_uri = "%s/performance/FEPort/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_ficonemulation_keys(self, params_dict):
target_uri = "%s/performance/FiconEmulation/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_ficonemulation_metrics(self, params_dict):
target_uri = "%s/performance/FiconEmulation/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_imdirector_keys(self, params_dict):
target_uri = "%s/performance/IMDirector/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_imdirector_metrics(self, params_dict):
target_uri = "%s/performance/IMDirector/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_imemulation_keys(self, params_dict):
target_uri = "%s/performance/IMEmulation/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_imemulation_metrics(self, params_dict):
target_uri = "%s/performance/IMEmulation/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_portgroup_keys(self, params_dict):
target_uri = "%s/performance/PortGroup/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_portgroup_metrics(self, params_dict):
target_uri = "%s/performance/PortGroup/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfa_keys(self, params_dict):
target_uri = "%s/performance/RDFA/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfa_metrics(self, params_dict):
target_uri = "%s/performance/RDFA/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfdirector_keys(self, params_dict):
target_uri = "%s/performance/RDFDirector/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfdirector_metrics(self, params_dict):
target_uri = "%s/performance/RDFDirector/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfemulation_keys(self, params_dict):
target_uri = "%s/performance/RDFEmulation/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfemulation_metrics(self, params_dict):
target_uri = "%s/performance/RDFEmulation/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfport_keys(self, params_dict):
target_uri = "%s/performance/RDFPort/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfport_metrics(self, params_dict):
target_uri = "%s/performance/RDFPort/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfs_keys(self, params_dict):
target_uri = "%s/performance/RDFS/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_rdfs_metrics(self, params_dict):
target_uri = "%s/performance/RDFS/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_settings_importfiles(self, params_dict):
target_uri = "%s/performance/Settings/importFiles" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_snappool_keys(self, params_dict):
target_uri = "%s/performance/SnapPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_snappool_metrics(self, params_dict):
target_uri = "%s/performance/SnapPool/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_srp_keys(self, params_dict):
target_uri = "%s/performance/SRP/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_srp_metrics(self, params_dict):
target_uri = "%s/performance/SRP/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_srpthinpool_keys(self, params_dict):
target_uri = "%s/performance/SRPThinPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroup_keys(self, params_dict):
target_uri = "%s/performance/StorageGroup/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroup_metrics(self, params_dict):
target_uri = "%s/performance/StorageGroup/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroupbypool_keys(self, params_dict):
target_uri = "%s/performance/StorageGroupByPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroupbypool_metrics(self, params_dict):
target_uri = "%s/performance/StorageGroupByPool/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroupbytier_keys(self, params_dict):
target_uri = "%s/performance/StorageGroupByTier/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroupbytier_metrics(self, params_dict):
target_uri = "%s/performance/StorageGroupByTier/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagegroupbytier_perfkeys(self, params_dict):
target_uri = "%s/performance/StorageGroupByTier/perf/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagetier_keys(self, params_dict):
target_uri = "%s/performance/StorageTier/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_storagetier_metrics(self, params_dict):
target_uri = "%s/performance/StorageTier/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_summary_keys(self, params_dict):
target_uri = "%s/performance/Summary/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_thinpool_keys(self, params_dict):
target_uri = "%s/performance/ThinPool/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_thinpool_metrics(self, params_dict):
target_uri = "%s/performance/ThinPool/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_thintier_keys(self, params_dict):
target_uri = "%s/performance/ThinTier/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_thintier_metrics(self, params_dict):
target_uri = "%s/performance/ThinTier/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_tierbystoragegroup_keys(self, params_dict):
target_uri = "%s/performance/TierByStorageGroup/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_tierbystoragegroup_metrics(self, params_dict):
target_uri = "%s/performance/TierByStorageGroup/metrics" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_perf_tierbystoragegroup_perfkeys(self, params_dict):
target_uri = "%s/performance/TierByStorageGroup/perf/keys" % (self.rest.url)
return self.rest.post(target_uri, params_dict)
######################################
## PROVISIONING Resource group
######################################
@timer_counter
def get_prov_arrays(self):
target_uri = "%s/provisioning/symmetrix" % (self.rest.url)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array(self, array_id):
target_uri = "%s/provisioning/symmetrix/%s" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array_directors(self, array_id):
target_uri = "%s/provisioning/symmetrix/%s/director" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array_director(self, array_id, director_id):
target_uri = "%s/provisioning/symmetrix/%s/director/%s" % (self.rest.url, array_id, director_id)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array_director_ports(self, array_id, director_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/director/%s/port" % (self.rest.url, array_id, director_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_prov_array_director_port(self, array_id, director_id, port_id):
target_uri = "%s/provisioning/symmetrix/%s/director/%s/port/%s" % (self.rest.url, array_id, director_id, port_id)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array_fastpolicies(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/fastpolicy" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_fastpolicy(self, array_id, policy_name, params_dict):
target_uri = "%s/provisioning/symmetrix%s/fastpolicy/%s" % (self.rest.url, array_id, policy_name)
return self.rest.post(target_uri, params_dict)
@timer_counter
def edit_fastpolicy(self, array_id, policy_name, params_dict):
target_uri = "%s/provisioning/symmetrix%s/fastpolicy/%s" % (self.rest.url, array_id, policy_name)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_fastpolicy(self, array_id, policy_id):
target_uri = "%s/provisioning/symmetrix%s/fastpolicy/%s" % (self.rest.url, array_id, policy_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_fastpolicy(self, array_id, policy_id):
target_uri = "%s/provisioning/symmetrix%s/fastpolicy/%s" % (self.rest.url, array_id, policy_id)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array_hosts(self, array_id):
target_uri = "%s/provisioning/symmetrix/%s/host" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_prov_array_host(self, array_id, host_id):
target_uri = "%s/provisioning/symmetrix%s/host/%s" % (self.rest.url, array_id, host_id)
return self.rest.get(target_uri)
@timer_counter
def create_prov_array_host(self, array_id, params_dict):
target_uri = "%s/provisioning/symmetrix%s/host" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def edit_prov_array_host(self, array_id, host_id, params_dict):
target_uri = "%s/provisioning/symmetrix%s/host/%s" % (self.rest.url, array_id, host_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_prov_array_host(self, array_id, host_id):
target_uri = "%s/provisioning/symmetrix%s/host/%s" % (self.rest.url, array_id, host_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_hostgroups(self, array_id):
target_uri = "%s/provisioning/symmetrix/%s/hostgroup" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def create_prov_array_hostgroup(self, array_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/hostgroup" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_prov_array_hostgroup(self, array_id, hostgroup_id):
target_uri = "%s/provisioning/symmetrix/%s/hostgroup/%s" % (self.rest.url, array_id, hostgroup_id)
return self.rest.get(target_uri)
@timer_counter
def edit_prov_array_hostgroup(self, array_id, hostgroup_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/hostgroup/%s" % (self.rest.url, array_id, hostgroup_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_prov_array_hostgroup(self, array_id, hostgroup_id):
target_uri = "%s/provisioning/symmetrix/%s/hostgroup/%s" % (self.rest.url, array_id, hostgroup_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_initiators(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/initiator" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_prov_array_initiator(self, array_id, initiator_id):
target_uri = "%s/provisioning/symmetrix/%s/initiator/%s" % (self.rest.url, array_id, initiator_id)
return self.rest.get(target_uri)
@timer_counter
def edit_prov_array_initiator(self, array_id, initiator_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/initiator/%s" % (self.rest.url, array_id, initiator_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def get_prov_array_maskingviews(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/maskingview" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_prov_array_maskingviews(self, array_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/maskingview" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_prov_array_maskingview(self, array_id, maskingview_id):
target_uri = "%s/provisioning/symmetrix/%s/maskingview/%s" % (self.rest.url, array_id, maskingview_id)
return self.rest.get(target_uri)
@timer_counter
def edit_prov_array_maskingview(self, array_id, maskingview_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/maskingview/%s" % (self.rest.url, array_id, maskingview_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_prov_array_maskingview(self, array_id, maskingview_id):
target_uri = "%s/provisioning/symmetrix/%s/maskingview/%s" % (self.rest.url, array_id, maskingview_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_maskingview_connections(self, array_id, maskingview_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/maskingview/%s/connections" % (self.rest.url, array_id, maskingview_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_prov_array_ports(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/port" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_prov_array_portgoups(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/portgroup" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_prov_array_portgoups(self, array_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/portgroup" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_prov_array_portgroup(self, array_id, portgroup_id):
target_uri = "%s/provisioning/symmetrix/%s/portgroup/%s" % (self.rest.url, array_id, portgroup_id)
return self.rest.get(target_uri)
@timer_counter
def edit_prov_array_portgroup(self, array_id, portgroup_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/portgroup/%s" % (self.rest.url, array_id, portgroup_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_prov_array_portgroup(self, array_id, portgroup_id):
target_uri = "%s/provisioning/symmetrix/%s/portgroup/%s" % (self.rest.url, array_id, portgroup_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_storagegroups(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/storagegroup" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_prov_array_storagegroups(self, array_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/storagegroup" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_prov_array_storagegroup(self, array_id, storagegroup_id):
target_uri = "%s/provisioning/symmetrix/%s/storagegroup/%s" % (self.rest.url, array_id, storagegroup_id)
return self.rest.get(target_uri)
@timer_counter
def edit_prov_array_storagegroup(self, array_id, storagegroup_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/storagegroup/%s" % (self.rest.url, array_id, storagegroup_id)
return self.rest.delete(target_uri, params_dict)
@timer_counter
def delete_prov_array_storagegroup(self, array_id, storagegroup_id):
target_uri = "%s/provisioning/symmetrix/%s/storagegroup/%s" % (self.rest.url, array_id, storagegroup_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_thinpools(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/thinpool" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_prov_array_thinpools(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/thinpool" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_prov_array_thinpool(self, array_id, thinpool_id):
target_uri = "%s/provisioning/symmetrix/%s/thinpool/%s" % (self.rest.url, array_id, thinpool_id)
return self.rest.get(target_uri)
@timer_counter
def edit_prov_array_thinpool(self, array_id, thinpool_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/thinpool/%s" % (self.rest.url, array_id, thinpool_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_prov_array_thinpool(self, array_id, thinpool_id):
target_uri = "%s/provisioning/symmetrix/%s/thinpool/%s" % (self.rest.url, array_id, thinpool_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_tiers(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/tier" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_prov_array_tier(self, array_id, params_dict):
target_uri = "%s/provisioning/symmetrix/%s/tier" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_prov_array_tier(self, array_id, tier_id):
target_uri = "%s/provisioning/symmetrix/%s/tier/%s" % (self.rest.url, array_id, tier_id)
return self.rest.get(target_uri)
@timer_counter
def delete_prov_array_tier(self, array_id, tier_id):
target_uri = "%s/provisioning/symmetrix/%s/tier/%s" % (self.rest.url, array_id, tier_id)
return self.rest.delete(target_uri)
@timer_counter
def get_prov_array_volumes(self, array_id, params_dict=None):
target_uri = "%s/provisioning/symmetrix/%s/volume" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_prov_array_volume(self, array_id, volume_id):
target_uri = "%s/provisioning/symmetrix/%s/volume/%s" % (self.rest.url, array_id, volume_id)
return self.rest.get(target_uri)
@timer_counter
def delete_prov_array_volume(self, array_id, volume_id):
target_uri = "%s/provisioning/symmetrix/%s/volume/%s" % (self.rest.url, array_id, volume_id)
return self.rest.delete(target_uri)
######################################
## REPLICATION Resource group
######################################
# TODO
######################################
## SLO PROVISIONING Resource group
######################################
@timer_counter
def get_slo_arrays(self):
target_uri = "%s/sloprovisioning/symmetrix" % (self.rest.url)
return self.rest.get(target_uri)
@timer_counter
def get_slo_array(self, array_id):
target_uri = "%s/sloprovisioning/symmetrix/%s" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_slo_array_directors(self, array_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/director" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_slo_array_director(self, array_id, director_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/director/%s" % (self.rest.url, array_id, director_id)
return self.rest.get(target_uri)
@timer_counter
def get_slo_array_director_ports(self, array_id, director_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/director/%s/port" % (self.rest.url, array_id, director_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_port(self, array_id, director_id, port_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/director/%s/port/%s" % (self.rest.url, array_id, director_id, port_id)
return self.rest.get(target_uri)
@timer_counter
def get_slo_array_hosts(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/host" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_slo_array_host(self, array_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/host" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_slo_array_host(self, array_id, host_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/host/%s" % (self.rest.url, array_id, host_id)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_array_host(self, array_id, host_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/host/%s" % (self.rest.url, array_id, host_id)
return self.rest.put(target_uri, params_dict)
def delete_slo_array_host(self, array_id, host_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/host/%s" % (self.rest.url, array_id, host_id)
return self.rest.delete(target_uri)
@timer_counter
def get_slo_array_hostgroups(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/hostgroup" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_slo_array_hostgroup(self, array_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/hostgroup" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_slo_array_hostgroup(self, array_id, group_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/hostgroup/%s" % (self.rest.url, array_id, group_id)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_array_hostgroup(self, array_id, group_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/hostgroup/%s" % (self.rest.url, array_id, group_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_slo_array_hostgroup(self, array_id, group_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/hostgroup/%s" % (self.rest.url, array_id, group_id)
return self.rest.delete(target_uri)
@timer_counter
def get_slo_array_initiators(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/initiator" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_initiator(self, array_id, initiator_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/initiator/%s" % (self.rest.url, array_id, initatorId)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_array_initiator(self, array_id, initiator_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/initiator/%s" % (self.rest.url, array_id, initatorId)
return self.rest.put(target_uri, params_dict)
@timer_counter
def get_slo_array_maskingviews(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/maskingview" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_slo_array_maskingviews(self, array_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/maskingview" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_slo_arary_maskingview(self, array_id, mv_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/maskingview/%s" % (self.rest.url, array_id, mv_id)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_arary_maskingview(self, array_id, mv_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/maskingview/%s" % (self.rest.url, array_id, mv_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_slo_arary_maskingview(self, array_id, mv_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/maskingview/%s" % (self.rest.url, array_id, mv_id)
return self.rest.delete(target_uri)
@timer_counter
def get_slo_array_maskingview_connections(self, array_id, mv_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/maskingview/%s/connections" % (self.rest.url, array_id, mv_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_ports(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/port" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_portgroups(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/portgroup" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def create_slo_array_portgroup(self, array_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/portgroup" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_slo_array_portgroup(self, array_id, portgroup_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/portgroup/%s" % (self.rest.url, array_id, portgroup_id)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_array_portgroup(self, array_id, portgroup_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/portgroup/%s" % (self.rest.url, array_id, portgroup_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def delete_slo_array_portgroup(self, array_id, portgroup_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/portgroup/%s" % (self.rest.url, array_id, portgroup_id)
return self.rest.delete(target_uri)
@timer_counter
def get_slo_array_slos(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/slo" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_slo(self, array_id, slo_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/slo/%s" % (self.rest.url, array_id, slo_id)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_array_slo(self, array_id, slo_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/slo/%s" % (self.rest.url, array_id, slo_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def get_slo_array_srps(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/srp" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_srp(self, array_id, srp_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/srp/%s" % (self.rest.url, array_id, srp_id)
return self.rest.get(target_uri)
@timer_counter
def get_slo_array_storagegroups(self, array_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/storagegroup" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def create_slo_array_storagegroup(self, array_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/storagegroup" % (self.rest.url, array_id)
return self.rest.post(target_uri, params_dict)
@timer_counter
def get_slo_array_storagegroup(self, array_id, sg_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/storagegroup/%s" % (self.rest.url, array_id, sg_id)
return self.rest.get(target_uri)
@timer_counter
def edit_slo_array_storagegroup(self, array_id, sg_id, params_dict):
target_uri = "%s/sloprovisioning/symmetrix/%s/storagegroup/%s" % (self.rest.url, array_id, sg_id)
return self.rest.put(target_uri, params_dict)
@timer_counter
def delete_slo_array_storagegroup(self, array_id, sg_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/storagegroup/%s" % (self.rest.url, array_id, sg_id)
return self.rest.delete(target_uri)
@timer_counter
def get_slo_array_volumes(self, array_id, params_dict=None):
target_uri = "%s/sloprovisioning/symmetrix/%s/volume" % (self.rest.url, array_id)
return self.rest.get(target_uri, params_dict)
@timer_counter
def get_slo_array_volume(self, array_id, volume_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/volume/%s" % (self.rest.url, array_id, volume_id)
return self.rest.get(target_uri)
@timer_counter
def delete_slo_array_volume(self, array_id, volume_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/volume/%s" % (self.rest.url, array_id, volume_id)
return self.rest.delete(target_uri)
@timer_counter
def get_slo_array_workloads(self, array_id):
target_uri = "%s/sloprovisioning/symmetrix/%s/workloadtype" % (self.rest.url, array_id)
return self.rest.get(target_uri)
######################################
## SYSTEM Resource group
######################################
@timer_counter
def get_alerts(self):
target_uri = "%s/system/alert" % (self.rest.url)
return self.rest.get(target_uri)
@timer_counter
def get_alert(self, array_id):
target_uri = "%s/system/alert/%s" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_jobs(self):
target_uri = "%s/system/job" % (self.rest.url)
return self.rest.get(target_uri)
@timer_counter
def get_job(self, array_id):
target_uri = "%s/system/job/%s" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_arrays(self):
target_uri = "%s/system/symmetrix" % (self.rest.url)
return self.rest.get(target_uri)
@timer_counter
def get_array(self, array_id):
target_uri = "%s/system/symmetrix/%s" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_array_alerts(self, array_id):
target_uri = "%s/system/symmetrix/%s/alert" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_array_alert(self, array_id, alert_id):
target_uri = "%s/system/symmetrix/%s/alert/%s" % (self.rest.url, array_id, alert_id)
return self.rest.get(target_uri)
@timer_counter
def get_array_jobs(self, array_id):
target_uri = "%s/system/symmetrix/%s/job" % (self.rest.url, array_id)
return self.rest.get(target_uri)
@timer_counter
def get_array_job(self, array_id, job_id):
target_uri = "%s/system/symmetrix/%s/job/%s" % (self.rest.url, array_id, job_id)
return self.rest.get(target_uri)
@timer_counter
def get_version(self):
target_uri = "%s/system/version" % (self.rest.url)
return self.rest.get(target_uri)
######################################
## WORKLOAD Resource group
######################################
|
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import os
import re
import pkg_resources
import sys
from datetime import datetime
import warnings
from dateutil import tz
from botocore.compat import six
from cement.utils.misc import minimal_logger
from subprocess import Popen, PIPE, STDOUT
urllib = six.moves.urllib
from ..objects.exceptions import CommandError, InvalidOptionsError
from ..core import io, fileoperations
LOG = minimal_logger(__name__)
def prompt_for_item_in_list(lst, default=1):
ind = prompt_for_index_in_list(lst, default)
return lst[ind]
def prompt_for_index_in_list(lst, default=1):
for x in range(0, len(lst)):
io.echo(str(x + 1) + ')', lst[x])
while True:
try:
choice = int(io.prompt('default is ' + str(default),
default=default))
if not (0 < choice <= len(lst)):
raise ValueError # Also thrown by non int numbers
else:
break
except ValueError:
io.echo('Sorry, that is not a valid choice. '
'Please choose a number between 1 and ' +
str(len(lst)) + '.')
return choice - 1
def get_unique_name(name, current_uniques):
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# if sys.version_info[0] >= 3:
# base_name = name
# else:
# base_name = name.decode('utf8')
base_name = name
number = 2
while base_name in current_uniques:
base_name = name + str(number)
number += 1
return base_name
def mask_vars(key, value):
if (re.match('.*_CONNECTION_STRING', key) or
key == 'AWS_ACCESS_KEY_ID' or
key == 'AWS_SECRET_KEY') \
and value is not None:
value = "*****"
return key, value
def print_list_in_columns(lst):
"""
This function is currently only intended for environmant names,
which are guaranteed to be 23 characters or less.
:param lst: List of env names
"""
if sys.stdout.isatty():
lst = list_to_columns(lst)
index = 0
for x in range(0, len(lst[0])):
line = []
for i in range(0, len(lst)):
try:
line.append(lst[i][x])
except IndexError:
pass
io.echo_and_justify(42, *line)
else:
# Dont print in columns if using pipe
for i in lst:
io.echo(i)
def list_to_columns(lst):
COLUMN_NUM = 3
assert len(lst) > COLUMN_NUM, "List size must be greater than {0}".\
format(COLUMN_NUM)
remainder = len(lst) % COLUMN_NUM
column_size = len(lst) // COLUMN_NUM
if remainder != 0:
column_size += 1
colunms = [[] for i in range(0, COLUMN_NUM)]
index = 0
stop = column_size
for x in range(0, COLUMN_NUM):
colunms[x] += lst[index:stop]
index = stop
stop += column_size
return colunms
def url_encode(data):
return urllib.parse.quote(data)
def get_delta_from_now_and_datetime(date):
return datetime.now(tz.tzlocal()) - get_local_time(date)
def get_local_time(utctime):
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
utctime = utctime.replace(tzinfo=from_zone)
return utctime.astimezone(to_zone)
def get_local_time_as_string(utctime):
localtime = get_local_time(utctime)
return localtime.strftime("%Y-%m-%d %H:%M:%S")
def is_ssh():
return "SSH_CLIENT" in os.environ or "SSH_TTY" in os.environ
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
def exec_cmd(args, live_output=True):
"""
Execute a child program (args) in a new process. Displays
live output by default.
:param args: list: describes the command to be run
:param live_output: bool: whether to print live output
:return str: child program output
"""
LOG.debug(' '.join(args))
process = Popen(args, stdout=PIPE, stderr=STDOUT)
output = []
for line in iter(process.stdout.readline, b''):
line = line.decode('utf-8')
if line != os.linesep:
if live_output:
sys.stdout.write(line)
sys.stdout.flush()
else:
LOG.debug(line)
output.append(line)
process.stdout.close()
process.wait()
returncode = process.returncode
error_msg = 'Exited with return code {}'.format(returncode)
output_str = ''.join(output)
if returncode:
raise CommandError(error_msg, output_str, returncode)
return output_str
exec_cmd_live_output = exec_cmd
def exec_cmd_quiet(args):
return exec_cmd(args, False)
def flatten(lists):
"""
Return a new (shallow) flattened list.
:param lists: list: a list of lists
:return list
"""
return [item for sublist in lists for item in sublist]
def anykey(d):
"""
Return any key in dictionary.
:param d: dict: dictionary
:return object
"""
return next(six.iterkeys(d))
def last_modified_file(filepaths):
"""
Return the most recently modified file.
:param filepaths: list: paths to files
:return str
"""
return max(filepaths, key=os.path.getmtime)
def get_data_from_url(url, timeout=20):
return urllib.request.urlopen(url, timeout=timeout).read()
def print_from_url(url):
result = get_data_from_url(url)
io.echo(result)
def parse_version(version_string):
"""
Parse string as a verison object for comparison
Example: parse_version('1.9.2') > parse_version('1.9.alpha')
See docs for pkg_resource.parse_version as this is just a wrapper
"""
return pkg_resources.parse_version(version_string)
def save_file_from_url(url, location, filename):
result = get_data_from_url(url)
return fileoperations.save_to_file(result, location, filename)
# http://stackoverflow.com/a/5164027
def prettydate(d):
"""
Return a human readable str of how long d was compared to now.
:param d: datetime/float: datetime or unix timestamp
:return str
"""
if isinstance(d, float): # epoch timestamp
d = datetime.utcfromtimestamp(d)
diff = datetime.utcnow() - d
s = diff.seconds
if diff.days > 7 or diff.days < 0:
return d.strftime('%d %b %y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '{0} days ago'.format(diff.days)
elif s <= 1:
return 'just now'
elif s < 60:
return '{0} seconds ago'.format(s)
elif s < 120:
return '1 minute ago'
elif s < 3600:
return '{0} minutes ago'.format(s // 60)
elif s < 7200:
return '1 hour ago'
else:
return '{0} hours ago'.format(s // 3600)
def merge_dicts(low_priority, high_priority):
"""
Return a new dict that is a merge of low_priority and high_priority dicts.
When keys collide, takes the value of higher_priority dict.
:param low_priority: dict: shallow dictionary
:param high_priority: dict: shallow dictionary
:return dict
"""
result_dict = low_priority.copy()
result_dict.update(high_priority)
return result_dict
def retract_string(string):
try:
string_len = len(string)
keep_characters = range(0, 4)
keep_characters.extend(range(string_len - 4, string_len))
retracted_string = []
for i, c in enumerate(string):
if i in keep_characters:
retracted_string.append(c)
else:
retracted_string.append('*')
return ''.join(retracted_string)
except:
return ''
def check_source(value):
match = re.match(r"([^/]+/[^/]+/[^/]+)", value)
if match is None or len(value.split("/")) > 3:
raise argparse.ArgumentTypeError(
"%s is a invalid source. Example source would be something like: codecommit/repo/branch" % value)
return value
def parse_source(source):
# Source is already validated by the check_source method.
if source is None:
return
split_source = source.split('/')
# Validate that we support the source location
source_location = split_source[0].lower()
validate_source_location(source_location)
repository = split_source[1]
branch = split_source[2]
return source_location, repository, branch
def validate_source_location(source_location):
valid_source_locations = ['codecommit']
if source_location in valid_source_locations:
return
else:
raise InvalidOptionsError("Source location '{0}' is not in the list of valid locations: {1}".format(source_location, valid_source_locations))
def encode_to_ascii(unicode_value):
empty_string = ""
if unicode_value is None:
return empty_string
return unicode_value.encode('ascii', 'ignore')
|
|
import functools
import logging
from typing import Callable, Optional, Any, Tuple, List
import hw_intf
from app_config import AppConfig
from common import CancelException, HwNotInitialized
from hw_common import HWDevice, HWType
from wnd_utils import WndUtils
log = logging.getLogger('dmt.wallet_tools_dlg')
class ActionPageBase:
def __init__(self, parent_dialog, app_config: AppConfig, hw_devices: hw_intf.HWDevices, action_title: str):
self.parent_dialog = parent_dialog
self.app_config: AppConfig = app_config
self.finishing = False
self.hw_devices = hw_devices
self.hw_devices.sig_connected_hw_device_changed.connect(self._on_connected_hw_device_changed)
self.action_title = action_title
self.fn_exit_page: Optional[Callable[[], None]] = None
self.fn_set_action_title: Optional[Callable[[str], None]] = None
self.fn_set_btn_close_visible: Optional[Callable[[bool], None]] = None
self.fn_set_btn_close_enabled: Optional[Callable[[bool], None]] = None
self.fn_set_btn_cancel_visible: Optional[Callable[[bool], None]] = None
self.fn_set_btn_cancel_enabled: Optional[Callable[[bool], None]] = None
self.fn_set_btn_cancel_text: Optional[Callable[[str, str], None]] = None
self.fn_set_btn_back_visible: Optional[Callable[[bool], None]] = None
self.fn_set_btn_back_enabled: Optional[Callable[[bool], None]] = None
self.fn_set_btn_back_text: Optional[Callable[[str, str], None]] = None
self.fn_set_btn_continue_visible: Optional[Callable[[bool], None]] = None
self.fn_set_btn_continue_enabled: Optional[Callable[[bool], None]] = None
self.fn_set_btn_continue_text: Optional[Callable[[str, str], None]] = None
self.fn_set_hw_change_enabled: Optional[Callable[[bool], None]] = None
self.fn_show_message_page: Optional[Callable[[Optional[str]], None]] = None
self.fn_show_action_page: Optional[Callable[[None], None]] = None
def set_control_functions(
self,
fn_exit_page: Callable[[], None],
fn_set_action_title: Callable[[str], None],
fn_set_btn_close_visible: Callable[[bool], None],
fn_set_btn_close_enabled: Callable[[bool], None],
fn_set_btn_cancel_visible: Callable[[bool], None],
fn_set_btn_cancel_enabled: Callable[[bool], None],
fn_set_btn_cancel_text: Callable[[str, str], None],
fn_set_btn_back_visible: Callable[[bool], None],
fn_set_btn_back_enabled: Callable[[bool], None],
fn_set_btn_back_text: Callable[[str, str], None],
fn_set_btn_continue_visible: Callable[[bool], None],
fn_set_btn_continue_enabled: Callable[[bool], None],
fn_set_btn_continue_text: Callable[[str, str], None],
fn_set_hw_panel_visible: Callable[[bool], None],
fn_set_hw_change_enabled: Callable[[bool], None],
fn_show_message_page: Optional[Callable[[Optional[str]], None]],
fn_show_action_page: Optional[Callable[[], None]]):
self.fn_exit_page = fn_exit_page
self.fn_set_action_title = fn_set_action_title
self.fn_set_btn_close_visible = fn_set_btn_close_visible
self.fn_set_btn_close_enabled = fn_set_btn_close_enabled
self.fn_set_btn_cancel_visible = fn_set_btn_cancel_visible
self.fn_set_btn_cancel_enabled = fn_set_btn_cancel_enabled
self.fn_set_btn_cancel_text = fn_set_btn_cancel_text
self.fn_set_btn_back_visible = fn_set_btn_back_visible
self.fn_set_btn_back_enabled = fn_set_btn_back_enabled
self.fn_set_btn_back_text = fn_set_btn_back_text
self.fn_set_btn_continue_visible = fn_set_btn_continue_visible
self.fn_set_btn_continue_enabled = fn_set_btn_continue_enabled
self.fn_set_btn_continue_text = fn_set_btn_continue_text
self.fn_set_hw_panel_visible = fn_set_hw_panel_visible
self.fn_set_hw_change_enabled = fn_set_hw_change_enabled
self.fn_show_message_page = fn_show_message_page
self.fn_show_action_page = fn_show_action_page
def initialize(self):
self.update_action_subtitle('')
self.set_btn_close_visible(False)
def on_close(self):
pass
def _on_connected_hw_device_changed(self, hw_device: HWDevice):
if not self.finishing:
self.on_connected_hw_device_changed(hw_device)
def on_validate_hw_device(self, hw_device: HWDevice) -> bool:
"""
Its purpose is to validate in derived classes whether the hardware wallet device passed in the 'hw_device'
argument is approved or not. This way, a derived class may not allow a certain type or model of hardware
wallet for the tasks associated with that class.
:return: True, if hw device is accepted, False otherwise
"""
return False
def on_connected_hw_device_changed(self, cur_hw_device: HWDevice):
pass
def exit_page(self):
if self.fn_exit_page:
self.fn_exit_page()
def set_action_title(self, title: str):
if self.fn_set_action_title:
self.fn_set_action_title(title)
def set_btn_close_visible(self, visible: bool):
if self.fn_set_btn_close_visible:
self.fn_set_btn_close_visible(visible)
def set_btn_close_enabled(self, enabled: bool):
if self.fn_set_btn_close_enabled:
self.fn_set_btn_close_enabled(enabled)
def set_btn_cancel_visible(self, visible: bool):
if self.fn_set_btn_cancel_visible:
self.fn_set_btn_cancel_visible(visible)
def set_btn_cancel_enabled(self, enabled: bool):
if self.fn_set_btn_cancel_enabled:
self.fn_set_btn_cancel_enabled(enabled)
def set_btn_cancel_text(self, label: str, tool_tip: Optional[str] = None):
if self.fn_set_btn_cancel_text:
self.fn_set_btn_cancel_text(label, tool_tip)
def set_btn_back_visible(self, visible: bool):
if self.fn_set_btn_back_visible:
self.fn_set_btn_back_visible(visible)
def set_btn_back_enabled(self, enabled: bool):
if self.fn_set_btn_back_enabled:
self.fn_set_btn_back_enabled(enabled)
def set_btn_back_text(self, label: str, tool_tip: Optional[str] = None):
if self.fn_set_btn_back_text:
self.fn_set_btn_back_text(label, tool_tip)
def set_btn_continue_visible(self, visible: bool):
if self.fn_set_btn_continue_visible:
self.fn_set_btn_continue_visible(visible)
def set_btn_continue_enabled(self, enabled: bool):
if self.fn_set_btn_continue_enabled:
self.fn_set_btn_continue_enabled(enabled)
def set_btn_continue_text(self, label: str, tool_tip: Optional[str] = None):
if self.fn_set_btn_continue_text:
self.fn_set_btn_continue_text(label, tool_tip)
def set_hw_panel_visible(self, visible: bool):
if self.fn_set_hw_panel_visible:
self.fn_set_hw_panel_visible(visible)
def set_hw_change_enabled(self, enabled: bool):
if self.fn_set_hw_change_enabled:
self.fn_set_hw_change_enabled(enabled)
def show_message_page(self, message: Optional[str] = None):
if self.fn_show_message_page:
self.fn_show_message_page(message)
def show_action_page(self):
if self.fn_show_action_page:
self.fn_show_action_page()
def go_to_next_step(self):
pass
def go_to_prev_step(self):
self.exit_page()
def on_btn_continue_clicked(self):
self.go_to_next_step()
def on_btn_back_clicked(self):
self.go_to_prev_step()
def on_before_cancel(self) -> bool:
"""
Called by the wallet tools dialog before closing dialog (after the <Cancel> button has been clicked.
:return: True if the action widget allows for closure or False otherwise.
"""
return True
def on_before_close(self) -> bool:
"""
Called by the wallet tools dialog before closing dialog (after the <Close> button has been clicked.
:return: True if the action widget allows for closure or False otherwise.
"""
return True
def update_action_subtitle(self, subtitle: Optional[str] = None):
title = self.action_title
if subtitle:
title += ' - ' + subtitle
self.set_action_title(f'<b>{title}</b>')
def handle_hw_exceptions(func):
"""
The purpose of this wrapper is to intercept known exceptions related to hardware wallets, like cancelling
operations by the user, errors about not initialized device, etc, and to display an appropriate message.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
ret = None
try:
ret = func(self, *args, **kwargs)
except CancelException:
pass
except HwNotInitialized:
WndUtils.error_msg('Your hardware wallet device is not initialized. To initialize your device, you can '
'use the (a) "initialization" or (b) "recovery from seed" features available in this '
'application.')
except Exception as e:
WndUtils.error_msg(str(e), True)
return ret
return wrapper
|
|
import math, random, time, copy
import pygame
import pygame.gfxdraw
from ttws_types import *
from puzzle import Puzzle
from loader import decode_pb
# Taken from http://pygame.org/project-AAfilledRoundedRect-2349-.html
def aafilled_rounded_rect(surface, rect, colour, radius=0.4, angle=0):
"""
Radius is corner radius, 0 for a square corner, 1 for a semi-circle end
Angle is the rotation of the final object, rotated about the center point
If smooth is true, smoothing is attempted by scaling up the surface,
rotating and then downscaling. This looks better for some rotations but not
others.
"""
rect = pygame.Rect(rect)
colour = pygame.Color(*colour)
alpha = colour.a
colour.a = 0
center = rect.center
rect.topleft = 0, 0
rect_surf = pygame.Surface(rect.size, pygame.SRCALPHA)
circle = pygame.Surface([min(rect.size) * 3] * 2, pygame.SRCALPHA)
pygame.draw.ellipse(circle, (0, 0, 0),circle.get_rect(), 0)
circle = pygame.transform.smoothscale(circle, [int(min(rect.size) * radius)] * 2)
radius = rect_surf.blit(circle, (0, 0))
radius.bottomright = rect.bottomright
rect_surf.blit(circle,radius)
radius.topright = rect.topright
rect_surf.blit(circle,radius)
radius.bottomleft = rect.bottomleft
rect_surf.blit(circle,radius)
rect_surf.fill((0, 0, 0), rect.inflate(-radius.w, 0))
rect_surf.fill((0, 0, 0), rect.inflate(0, -radius.h))
rect_surf.fill(colour, special_flags=pygame.BLEND_RGBA_MAX)
rect_surf.fill((255, 255, 255, alpha), special_flags=pygame.BLEND_RGBA_MIN)
# Rotate about the center point, with smooth scaling to provide
# anti-aliasing if the angle is not likely to produce a good result
smooth = angle % 90 != 0
if smooth:
rect_surf = pygame.transform.smoothscale(rect_surf, (rect_surf.get_width() * 2, rect_surf.get_height() * 2))
rect_surf = pygame.transform.rotate(rect_surf, angle)
if smooth:
rect_surf = pygame.transform.smoothscale(rect_surf, (rect_surf.get_width() / 2, rect_surf.get_height() / 2))
rotated_rect = rect_surf.get_rect()
rotated_rect.center = center
return surface.blit(rect_surf, rotated_rect)
def shade_rgb(colour, percent):
"""
Lighten or darken an (R, G, B) colour
percent of -1 is maximum darkening (black)
percent of 1 is maximum lightening (white)
Adapted from http://stackoverflow.com/questions/5560248/programmatically-lighten-or-darken-a-hex-color-or-rgb-and-blend-colors
"""
t = 0.0 if percent < 0.0 else 255.0
p = percent * -1.0 if percent < 0.0 else percent
r, g, b = colour
r = ((t - r) * p) + r
g = ((t - g) * p) + g
b = ((t - b) * p) + b
return (int(r), int(g), int(b))
class UI(object):
def __init__(self, puzzles=[]):
self.current_puzzle = 0
self.puzzle_codes = puzzles
pygame.init()
# Create a resizable screen area
self.screen = pygame.display.set_mode((600, 600), pygame.RESIZABLE)
pygame.scrap.init()
clock = pygame.time.Clock()
if self.puzzle_codes:
self.puzzle = decode_pb(self.puzzle_codes[0])
else:
# Randomise first puzzle
self.puzzle = Puzzle(random.randint(1, 6), random.randint(1, 5))
self.puzzle.randomise()
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, {"key": pygame.K_r}))
self.initialise()
# As soon as the event loop start, begin solving the first puzzle
pygame.event.post(pygame.event.Event(pygame.KEYDOWN, {"key": pygame.K_s}))
# Start main loop
self.quit = False
while not self.quit:
# Limit frames per second
clock.tick(30)
self.process_events()
pygame.quit()
def process_events(self):
# Fetch any waiting events
events = pygame.event.get([pygame.VIDEORESIZE, pygame.QUIT, pygame.KEYDOWN, pygame.USEREVENT])
for event in events:
if event.type == pygame.VIDEORESIZE:
self.screen = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)
self.calculate_sizes()
elif event.type == pygame.QUIT:
self.puzzle.keep_solving = False
self.quit = True
elif event.type == pygame.KEYDOWN:
if pygame.key.name(event.key) == "q":
self.puzzle.keep_solving = False
self.quit = True
elif pygame.key.name(event.key) == "n":
self.puzzle.keep_solving = False
self.puzzle = Puzzle(random.randint(1, 6), random.randint(1, 5))
self.puzzle.randomise()
self.initialise()
self.puzzle.solve()
elif pygame.key.name(event.key) == "s":
self.puzzle.solve()
elif pygame.key.name(event.key) == "r":
self.puzzle.keep_solving = False
self.puzzle.solve(randomise=True)
elif pygame.key.name(event.key) == "right":
# Load next puzzle
if self.current_puzzle < len(self.puzzle_codes) - 1:
self.puzzle.keep_solving = False
self.current_puzzle += 1
print "loading puzzle %s" % (self.current_puzzle + 1)
self.puzzle = decode_pb(self.puzzle_codes[self.current_puzzle])
self.initialise()
self.puzzle.solve()
elif pygame.key.name(event.key) == "left":
# Load previous puzzle
if self.current_puzzle > 0:
self.puzzle.keep_solving = False
self.current_puzzle -= 1
print "loading puzzle %s" % (self.current_puzzle + 1)
self.puzzle = decode_pb(self.puzzle_codes[self.current_puzzle])
self.initialise()
self.puzzle.solve()
elif pygame.key.name(event.key) == "p":
# Grab clipboard text
text = pygame.scrap.get(pygame.SCRAP_TEXT)
if not text:
break
try:
# TODO: make the save/load mechanism better!
self.puzzle = decode_pb(text)
except:
print "Cannot load puzzle from text '%s'" % text
break
# Puzzle is loadable - store it in a file
f = open("pasted_puzzles", "a")
f.write(text + "\n")
f.close()
self.puzzle_codes.insert(self.current_puzzle+1, text)
self.initialise()
self.puzzle.solve()
# Clear events we're not interested in (e.g. mouse movements)
pygame.event.clear()
if events:
# Don't redraw screen if no events have been processed
self.draw_frame()
def force_update(self):
pygame.event.post(pygame.event.Event(pygame.USEREVENT))
self.process_events()
def initialise(self):
self.calculate_sizes()
self.puzzle.register_observer(self.force_update)
def calculate_sizes(self):
"""Define some variables for scaling the puzzle"""
width = self.screen.get_width()
height = self.screen.get_height()
# line_width determines all other sizes
#
# Resize whole puzzle to fit on screen, but not smaller than a line width of 5
# "4 + " is for the left/top margin (2 line widths) and bottom/right margin (2 line widths)
# "5 *" is for each cell
# height - 100 means there will always be 100 pixels at the bottom of
# the screen for status info
self.line_width = max(5, min(width / (4 + (5 * self.puzzle.width)),
(height - 100) / (4 + (5 * self.puzzle.height))))
# line_width needs to be odd otherwise nodes can extend beyond edges
if self.line_width % 2 == 0:
self.line_width += 1
# Left and top margin - 2 line widths
self.margin = self.line_width * 2
# Each cell is 5 line widths, assuming the line_width was even!
self.cell_size = (self.line_width - 1) * 5
# For edges with a gap, this is the width of that gap
self.gap_size = self.cell_size / 5
# Each node is a circle (e.g. draws the corners of the puzzle) the
# diameter of which must be the line width
self.node_radius = self.line_width / 2
self.ang = 0
def find_v_edge_coords(self, x, y):
"""
Given an (x, y) position in the puzzle, find the x_start, y_start, x_end,
y_end coordinates of this vertical edge.
"""
x_start = self.margin + (x * self.cell_size)
x_end = x_start + self.cell_size
y_start = self.margin + (y * self.cell_size)
y_end = y_start
return x_start, y_start, x_end, y_end
def find_h_edge_coords(self, x, y):
"""
Given an (x, y) position in the puzzle, find the x_start, y_start, x_end,
y_end coordinates of this horizontal edge.
"""
x_start = self.margin + (x * self.cell_size)
x_end = x_start
y_start = self.margin + (y * self.cell_size)
y_end = y_start + self.cell_size
return x_start, y_start, x_end, y_end
def find_node_coords(self, x, y):
"""
Given an (x, y) position in the puzzle, find the x_start, y_start, x_end,
y_end coordinates of this node.
"""
x_start = x_end = self.margin + (x * self.cell_size)
y_start = y_end = self.margin + (y * self.cell_size)
# End nodes are drawn differently around the edges so require a start
# and end point
if x == 0:
# Node is on the left
x_end -= self.line_width
elif x == self.puzzle.width:
# Node is on the right
x_end += self.line_width
if y == 0:
# Node is at the top
y_end -= self.line_width
elif y == self.puzzle.height:
# Node is at the bottom
y_end += self.line_width
return x_start, y_start, x_end, y_end
def draw_path(self, path, colour):
"""Draw a path in the given colour."""
for n in range(len(path)):
x, y = path[n]
# Draw edge from this path node to the next one
if n < len(path) - 1:
# Make sure start is top-left, end is bottom-right
x_start = min(path[n][0], path[n + 1][0])
x_end = max(path[n][0], path[n + 1][0])
y_start = min(path[n][1], path[n + 1][1])
y_end = max(path[n][1], path[n + 1][1])
if x_start != x_end:
# If there is a hexagon, lighten the colour so it looks like it's under
# the path
edge = copy.deepcopy(self.puzzle.v_edges[y_start][x_start])
edge.colour = colour
if edge.is_hexagon():
edge.hexagon.colour = shade_rgb(edge.hexagon.colour, 0.75)
x_start, y_start, x_end, y_end = self.find_v_edge_coords(x_start, y_start)
self.draw_v_edge(edge, x_start, y_start, x_end, y_end)
elif y_start != y_end:
# If there is a hexagon, lighten the colour so it looks like it's under
# the path
edge = copy.deepcopy(self.puzzle.h_edges[y_start][x_start])
edge.colour = colour
if edge.is_hexagon():
edge.hexagon.colour = shade_rgb(edge.hexagon.colour, 0.75)
x_start, y_start, x_end, y_end = self.find_h_edge_coords(x_start, y_start)
self.draw_h_edge(edge, x_start, y_start, x_end, y_end)
# Draw node
x_start, y_start, x_end, y_end = self.find_node_coords(x, y)
# If there is a hexagon, lighten the colour so it looks like it's under
# the path
node = copy.deepcopy(self.puzzle.nodes[y][x])
if node.is_hexagon():
node.hexagon.colour = shade_rgb(node.hexagon.colour, 0.75)
# If this is not the end of the path, remove end bit
if n < len(path) - 1:
node.remove_type(NodeType.END)
else:
# This is the end of the path - draw an extra circle to fill in the
# corners (otherwise there's a gap at the corner nodes)
end_node = Node(colour=colour)
self.draw_node(end_node, x_start, y_start, x_end, y_end)
node.colour = colour
self.draw_node(node, x_start, y_start, x_end, y_end)
def draw_frame(self):
"""Draw a single frame."""
self.screen.fill(Colour.BACKGROUND)
# Draw vertical edges
for y in range(self.puzzle.height + 1):
for x in range(self.puzzle.width):
# Mark hexagons as in error or not
edge = self.puzzle.v_edges[y][x]
if edge.is_hexagon():
edge.hexagon.has_error = False
if self.puzzle.solution_found and (x, y) in self.puzzle.removed_v_edges:
edge.hexagon.has_error = True
x_start, y_start, x_end, y_end = self.find_v_edge_coords(x, y)
self.draw_v_edge(edge, x_start, y_start, x_end, y_end)
# Draw horizontal edges
for y in range(self.puzzle.height):
for x in range(self.puzzle.width + 1):
# Mark hexagons as in error or not
edge = self.puzzle.h_edges[y][x]
if edge.is_hexagon():
edge.hexagon.has_error = False
if self.puzzle.solution_found and (x, y) in self.puzzle.removed_h_edges:
edge.hexagon.has_error = True
x_start, y_start, x_end, y_end = self.find_h_edge_coords(x, y)
self.draw_h_edge(edge, x_start, y_start, x_end, y_end)
# Draw nodes
for y in range(self.puzzle.height + 1):
for x in range(self.puzzle.width + 1):
# Mark hexagons as in error or not
node = self.puzzle.nodes[y][x]
if node.is_hexagon():
node.hexagon.has_error = False
if self.puzzle.solution_found and (x, y) in self.puzzle.removed_nodes:
node.hexagon.has_error = True
x_start, y_start, x_end, y_end = self.find_node_coords(x, y)
self.draw_node(node, x_start, y_start, x_end, y_end)
# Draw cells
for y in range(self.puzzle.height):
for x in range(self.puzzle.width):
# Find middle of the cell
x_centre = self.margin + (x * self.cell_size) + (self.cell_size / 2)
y_centre = self.margin + (y * self.cell_size) + (self.cell_size / 2)
# TODO: remove this - for debugging only
if 0:
# If this cell is part of an area, colour it in so we can see which areas have been defined
bg_colour = Colour.BACKGROUND
bg_colour_map = {0: Colour.BACKGROUND,
1: (245,255,212),
2: (212,255,232),
3: (218,250,255),
4: (232,239,255),
5: (250,223,255)}
for n, area in enumerate(self.puzzle.areas):
if (x, y) in area:
if n in bg_colour_map:
bg_colour = bg_colour_map[n]
else:
bg_colour = Colour.BACKGROUND
bg_colour = Colour.BACKGROUND
if self.puzzle.solution_found and (x, y) in self.puzzle.removed_pieces:
bg_colour = Colour.ERROR
self.draw_cell(self.puzzle.cells[y][x], x_centre, y_centre, bg_colour)
# Draw path
if self.puzzle.solution_found:
colour = Colour.PATH
else:
# An intermediate path, draw dimmer and slighlty transparent
colour = shade_rgb(Colour.LINE, 0.5)
self.draw_path(self.puzzle.path, colour)
self.draw_path(self.puzzle.symmetry_path(self.puzzle.path), colour)
# Draw status bar
status_top = self.screen.get_height() - 100
pygame.draw.rect(self.screen, Colour.DARK_GREY, (0, status_top, self.screen.get_width(), 100))
font = pygame.font.SysFont("Arial", 20, bold=True)
text_surf = font.render("%s" % self.puzzle.message, True, (0,0,0))
self.screen.blit(text_surf, (20, status_top + 5))
text_surf = font.render("Time taken: %0.2fs" % (self.puzzle.time_taken), True, (0,0,0))
self.screen.blit(text_surf, (20, status_top + 30))
text_surf = font.render("Paths attempted: {:,}".format(self.puzzle.path_attempts), True, (0,0,0))
self.screen.blit(text_surf, (20, status_top + 55))
pygame.display.flip()
# helpful little debug circle
#pygame.draw.circle(self.screen, Colour.BLUE, (100, 100), 2)
def draw_hexagon(self, x, y, colour):
"""Draw a hexagon, centered around (x, y)."""
# Takes up 70% of a line, i.e. radius is 35%
r = self.line_width * 0.35
points = []
# (0, 360, 60) would make the point be at the top, we want it flat on top
for ang in range(-30, 330, 60):
x_offset = r * math.sin(math.radians(ang))
y_offset = r * math.cos(math.radians(ang))
points.append((x + x_offset, y + y_offset))
pygame.gfxdraw.aapolygon(self.screen, points, colour)
pygame.gfxdraw.filled_polygon(self.screen, points, colour)
def draw_triangle(self, x, y, number):
"""Draw one, two or three triangles, centered around (x, y)."""
# Takes up 70% of a line, i.e. radius is 35%
r = self.line_width * 0.35
x = x - (self.line_width / 2) * (number - 1)
for _ in range(number):
points = []
for ang in range(-60, 300, 120):
x_offset = r * math.sin(math.radians(ang))
y_offset = r * math.cos(math.radians(ang))
points.append((x + x_offset, y + y_offset))
pygame.gfxdraw.aapolygon(self.screen, points, Colour.ORANGE)
pygame.gfxdraw.filled_polygon(self.screen, points, Colour.ORANGE)
x += self.line_width
def draw_tetris(self, x, y, tetris, blue=False):
"""
Draw a tetris shape (yellow or blue) onto a new surface, then placing the
surface onto the screen, centered at (x, y), with appropriate rotation.
"""
surface = pygame.Surface((self.cell_size, self.cell_size), pygame.SRCALPHA)
margin = self.line_width
# Remaining space for tetris squares, into which we have to fit 5 squares
# and 4 gaps
piece_size = (self.cell_size - (margin * 2.0)) / 6.0
gap = piece_size / 4.0
# Draw the first shape in the list (the given, non-rotated one)
shape = tetris.shapes[0]
# TODO: center tetris pieces better
# Calculate offsets required to center the shape
avg = lambda vals: sum(vals, 0.0) / len(vals)
tx_offset = avg(set([tx for tx, ty in shape]))
ty_offset = avg(set([ty for tx, ty in shape]))
for tx, ty in shape:
tx = (self.cell_size / 2.0) - (piece_size / 2.0) + \
((tx - tx_offset) * (piece_size + gap))
ty = (self.cell_size / 2.0) - (piece_size / 2.0) + \
((ty - ty_offset) * (piece_size + gap))
if blue:
thickness = self.line_width / 8
# Draw several 1-width rectangles inside each other to get
# rectangles with thickness (overcomes several problems with draw.rect thickness)
for t in range(0, thickness):
pygame.draw.rect(surface, Colour.BLUE, (tx+t, ty+t, piece_size-2*t, piece_size-2*t), 1)
else:
pygame.draw.rect(surface, Colour.YELLOW, (tx, ty, piece_size, piece_size))
# Put the surface on the screen
top = y - (self.cell_size / 2)
left = x - (self.cell_size / 2)
if tetris.rotated:
# Attempt to rotate a little more smoothly by scaling up first
surface = pygame.transform.smoothscale(surface, (surface.get_width() * 2, surface.get_height() * 2))
surface = pygame.transform.rotate(surface, 15)
surface = pygame.transform.smoothscale(surface, (surface.get_width() / 2, surface.get_height() / 2))
x_offset = (surface.get_width() / 2) - (self.cell_size / 2)
y_offset = (surface.get_height() / 2) - (self.cell_size / 2)
self.screen.blit(surface, (left - x_offset, top - y_offset))
else:
self.screen.blit(surface, (left, top))
def draw_y(self, x, y):
"""
Draw an elimination mark (an upside-down Y shape), as three overlapping
rectangles.
"""
width = self.line_width / 2.0
length = width * 2
# For each rectangle
r = width / 2.0
for ang in (60, 180, 300):
# Find centre point
x_offset = r * math.sin(math.radians(ang))
y_offset = r * math.cos(math.radians(ang))
x_centre = x + x_offset
y_centre = y + y_offset
# Use the centre point to find the top-left of the rectangle to draw
left = x_centre - (length / 2.0)
top = y_centre - (width / 2.0)
# Draw rectangle, appropriately rotated around the centre point
aafilled_rounded_rect(self.screen, (left, top, length, width), Colour.WHITE, radius=0, angle=ang-90)
def draw_v_edge(self, edge, x_start, y_start, x_end, y_end):
"""Draw a vertical edge, which may have a gap or contain a hexagon."""
if not edge.is_missing():
pygame.draw.line(self.screen, edge.colour, (x_start, y_start), (x_end, y_end), self.line_width)
elif edge.type == EdgeType.MISSING:
x_gap = self.cell_size / 2 - self.gap_size / 2
pygame.draw.line(self.screen, edge.colour, (x_start, y_start), (x_start + x_gap, y_end), self.line_width)
pygame.draw.line(self.screen, edge.colour, (x_end, y_start), (x_end - x_gap, y_end), self.line_width)
if edge.is_hexagon():
if edge.hexagon.has_error:
colour = Colour.ERROR
else:
colour = edge.hexagon.colour
self.draw_hexagon(x_start + self.cell_size / 2, y_start, colour)
def draw_h_edge(self, edge, x_start, y_start, x_end, y_end):
"""Draw a horizontal edge, which may have a gap or contain a hexagon."""
if not edge.is_missing():
pygame.draw.line(self.screen, edge.colour, (x_start, y_start), (x_end, y_end), self.line_width)
elif edge.type == EdgeType.MISSING:
y_gap = self.cell_size / 2 - self.gap_size / 2
pygame.draw.line(self.screen, edge.colour, (x_start, y_start), (x_end, y_start + y_gap), self.line_width)
pygame.draw.line(self.screen, edge.colour, (x_start, y_end), (x_end, y_end - y_gap), self.line_width)
if edge.is_hexagon():
if edge.hexagon.has_error:
colour = Colour.ERROR
else:
colour = edge.hexagon.colour
self.draw_hexagon(x_start, y_start + self.cell_size / 2, colour)
def draw_node(self, node, x_start, y_start, x_end, y_end):
"""
Draw a node, which may be a start or end node and/or contain a hexagon.
"""
if not (node.is_start() or node.is_end()):
# Normal nodes are just circles
left = x_start - (self.node_radius)
top = y_start - (self.node_radius)
diameter = self.node_radius * 2
aafilled_rounded_rect(self.screen, (left, top, diameter, diameter), node.colour, radius=1)
elif node.is_start():
# A start node is a larger circle
left = x_start - (self.node_radius * 2)
top = y_start - (self.node_radius * 2)
diameter = self.node_radius * 4
aafilled_rounded_rect(self.screen, (left, top, diameter, diameter), node.colour, radius=1)
elif node.is_end():
# An end node is a line with a round-end which extends outwards from the edge
width = self.line_width
length = width * 2
# Find the centre of the object in order to rotate about that point
x_centre = x_start + ((x_end - x_start) / 2.0)
y_centre = y_start + ((y_end - y_start) / 2.0)
# Use the centre points to find the top-left of the rectangle to draw
left = x_centre - (length / 2.0)
top = y_centre - (width / 2.0)
# The rectangle is drawn along the x-axis and needs to be rotated about
# its centre point depending on which sort of end node this is
if x_start == x_end:
# Top and bottom end points need to be rotated 90 degrees
angle = 90
elif y_start == y_end:
# Left and right end points are already orientated correctly
# Nasty hack to make end point line up
top += 1
angle = 0
elif ((x_start - x_end) * (y_start - y_end)) > 1:
# Top-left and bottom-right end points need to angle upwards
angle = 135
else:
# Top-right and bottom-left end points need to angle downwards
angle = 45
aafilled_rounded_rect(self.screen, (left, top, length, width), node.colour, radius=1, angle=angle)
if node.is_hexagon():
if node.hexagon.has_error:
colour = Colour.ERROR
else:
colour = node.hexagon.colour
self.draw_hexagon(x_start, y_start, colour)
def draw_cell(self, cell, x, y, bg_colour=Colour.BACKGROUND):
"""Draw a cell, which contains a particular shape."""
# Each square is 2 line_widths in size
scale = 3
x_start = x - (self.line_width * (scale / 2.0))
y_start = y - (self.line_width * (scale / 2.0))
width = height = self.line_width * scale
aafilled_rounded_rect(self.screen, (x_start, y_start, width, height), bg_colour, radius=0)
# x, y is the centre of the cell
if cell.is_square():
# Each square is 2 line_widths in size
scale = 2
x_start = x - (self.line_width * (scale / 2.0))
y_start = y - (self.line_width * (scale / 2.0))
width = height = self.line_width * scale
aafilled_rounded_rect(self.screen, (x_start, y_start, width, height), cell.square.colour, radius=0.75)
elif cell.is_triangle():
self.draw_triangle(x, y, cell.triangle.number)
elif cell.is_star():
# Each star is 1.5 line_widths in size
scale = 1.5
x_start = x - (self.line_width * (scale / 2.0))
y_start = y - (self.line_width * (scale / 2.0))
width = height = self.line_width * scale
# Draw a square with a rotated square on top of it
aafilled_rounded_rect(self.screen, (x_start, y_start, width, height), cell.star.colour, radius=0)
aafilled_rounded_rect(self.screen, (x_start, y_start, width, height), cell.star.colour, radius=0, angle=45)
elif cell.is_tetris():
self.draw_tetris(x, y, cell.tetris, blue=cell.tetris.negative)
elif cell.is_y():
self.draw_y(x, y)
|
|
"""
Tests for the following offsets:
- BMonthBegin
- BMonthEnd
"""
from __future__ import annotations
from datetime import datetime
import pytest
from pandas._libs.tslibs.offsets import MonthOffset
import pandas as pd
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tseries.offsets import (
BMonthBegin,
BMonthEnd,
)
@pytest.mark.parametrize("n", [-2, 1])
@pytest.mark.parametrize(
"cls",
[
BMonthBegin,
BMonthEnd,
],
)
def test_apply_index(cls, n):
offset = cls(n=n)
rng = pd.date_range(start="1/1/2000", periods=100000, freq="T")
ser = pd.Series(rng)
res = rng + offset
assert res.freq is None # not retained
assert res[0] == rng[0] + offset
assert res[-1] == rng[-1] + offset
res2 = ser + offset
# apply_index is only for indexes, not series, so no res2_v2
assert res2.iloc[0] == ser.iloc[0] + offset
assert res2.iloc[-1] == ser.iloc[-1] + offset
class TestBMonthBegin(Base):
_offset: type[MonthOffset] = BMonthBegin
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
assert not offset1 != offset2
offset_cases = []
offset_cases.append(
(
BMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
BMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2),
},
)
)
offset_cases.append(
(
BMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
BMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestBMonthEnd(Base):
_offset: type[MonthOffset] = BMonthEnd
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + BMonthEnd(normalize=True)
expected = dt.replace(hour=0) + BMonthEnd()
assert result == expected
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthEnd()
offset2 = BMonthEnd()
assert not offset1 != offset2
offset_cases = []
offset_cases.append(
(
BMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
},
)
)
offset_cases.append(
(
BMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29),
},
)
)
offset_cases.append(
(
BMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
|
|
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
from zope.interface import implements
from twisted.internet import defer
from twisted.trial import unittest
from twisted.words.protocols.jabber import sasl, sasl_mechanisms, xmlstream, jid
from twisted.words.xish import domish
NS_XMPP_SASL = 'urn:ietf:params:xml:ns:xmpp-sasl'
class DummySASLMechanism(object):
"""
Dummy SASL mechanism.
This just returns the initialResponse passed on creation, stores any
challenges and replies with an empty response.
@ivar challenge: Last received challenge.
@type challenge: C{unicode}.
@ivar initialResponse: Initial response to be returned when requested
via C{getInitialResponse} or C{None}.
@type initialResponse: C{unicode}
"""
implements(sasl_mechanisms.ISASLMechanism)
challenge = None
name = "DUMMY"
def __init__(self, initialResponse):
self.initialResponse = initialResponse
def getInitialResponse(self):
return self.initialResponse
def getResponse(self, challenge):
self.challenge = challenge
return ""
class DummySASLInitiatingInitializer(sasl.SASLInitiatingInitializer):
"""
Dummy SASL Initializer for initiating entities.
This hardwires the SASL mechanism to L{DummySASLMechanism}, that is
instantiated with the value of C{initialResponse}.
@ivar initialResponse: The initial response to be returned by the
dummy SASL mechanism or C{None}.
@type initialResponse: C{unicode}.
"""
initialResponse = None
def setMechanism(self):
self.mechanism = DummySASLMechanism(self.initialResponse)
class SASLInitiatingInitializerTest(unittest.TestCase):
"""
Tests for L{sasl.SASLInitiatingInitializer}
"""
def setUp(self):
self.output = []
self.authenticator = xmlstream.Authenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.init = DummySASLInitiatingInitializer(self.xmlstream)
def test_onFailure(self):
"""
Test that the SASL error condition is correctly extracted.
"""
failure = domish.Element(('urn:ietf:params:xml:ns:xmpp-sasl',
'failure'))
failure.addElement('not-authorized')
self.init._deferred = defer.Deferred()
self.init.onFailure(failure)
self.assertFailure(self.init._deferred, sasl.SASLAuthError)
self.init._deferred.addCallback(lambda e:
self.assertEquals('not-authorized',
e.condition))
return self.init._deferred
def test_sendAuthInitialResponse(self):
"""
Test starting authentication with an initial response.
"""
self.init.initialResponse = "dummy"
self.init.start()
auth = self.output[0]
self.assertEquals(NS_XMPP_SASL, auth.uri)
self.assertEquals('auth', auth.name)
self.assertEquals('DUMMY', auth['mechanism'])
self.assertEquals('ZHVtbXk=', str(auth))
def test_sendAuthNoInitialResponse(self):
"""
Test starting authentication without an initial response.
"""
self.init.initialResponse = None
self.init.start()
auth = self.output[0]
self.assertEquals('', str(auth))
def test_sendAuthEmptyInitialResponse(self):
"""
Test starting authentication where the initial response is empty.
"""
self.init.initialResponse = ""
self.init.start()
auth = self.output[0]
self.assertEquals('=', str(auth))
def test_onChallenge(self):
"""
Test receiving a challenge message.
"""
d = self.init.start()
challenge = domish.Element((NS_XMPP_SASL, 'challenge'))
challenge.addContent('bXkgY2hhbGxlbmdl')
self.init.onChallenge(challenge)
self.assertEqual('my challenge', self.init.mechanism.challenge)
self.init.onSuccess(None)
return d
def test_onChallengeEmpty(self):
"""
Test receiving an empty challenge message.
"""
d = self.init.start()
challenge = domish.Element((NS_XMPP_SASL, 'challenge'))
self.init.onChallenge(challenge)
self.assertEqual('', self.init.mechanism.challenge)
self.init.onSuccess(None)
return d
def test_onChallengeIllegalPadding(self):
"""
Test receiving a challenge message with illegal padding.
"""
d = self.init.start()
challenge = domish.Element((NS_XMPP_SASL, 'challenge'))
challenge.addContent('bXkg=Y2hhbGxlbmdl')
self.init.onChallenge(challenge)
self.assertFailure(d, sasl.SASLIncorrectEncodingError)
return d
def test_onChallengeIllegalCharacters(self):
"""
Test receiving a challenge message with illegal characters.
"""
d = self.init.start()
challenge = domish.Element((NS_XMPP_SASL, 'challenge'))
challenge.addContent('bXkg*Y2hhbGxlbmdl')
self.init.onChallenge(challenge)
self.assertFailure(d, sasl.SASLIncorrectEncodingError)
return d
def test_onChallengeMalformed(self):
"""
Test receiving a malformed challenge message.
"""
d = self.init.start()
challenge = domish.Element((NS_XMPP_SASL, 'challenge'))
challenge.addContent('a')
self.init.onChallenge(challenge)
self.assertFailure(d, sasl.SASLIncorrectEncodingError)
return d
class SASLInitiatingInitializerSetMechanismTest(unittest.TestCase):
"""
Test for L{sasl.SASLInitiatingInitializer.setMechanism}.
"""
def setUp(self):
self.output = []
self.authenticator = xmlstream.Authenticator()
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived("<stream:stream xmlns='jabber:client' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.init = sasl.SASLInitiatingInitializer(self.xmlstream)
def _setMechanism(self, name):
"""
Set up the XML Stream to have a SASL feature with the given mechanism.
"""
feature = domish.Element((NS_XMPP_SASL, 'mechanisms'))
feature.addElement('mechanism', content=name)
self.xmlstream.features[(feature.uri, feature.name)] = feature
self.init.setMechanism()
return self.init.mechanism.name
def test_anonymous(self):
"""
Test setting ANONYMOUS as the authentication mechanism.
"""
self.authenticator.jid = jid.JID('example.com')
self.authenticator.password = None
name = "ANONYMOUS"
self.assertEqual(name, self._setMechanism(name))
def test_plain(self):
"""
Test setting PLAIN as the authentication mechanism.
"""
self.authenticator.jid = jid.JID('[email protected]')
self.authenticator.password = 'secret'
name = "PLAIN"
self.assertEqual(name, self._setMechanism(name))
def test_digest(self):
"""
Test setting DIGEST-MD5 as the authentication mechanism.
"""
self.authenticator.jid = jid.JID('[email protected]')
self.authenticator.password = 'secret'
name = "DIGEST-MD5"
self.assertEqual(name, self._setMechanism(name))
def test_notAcceptable(self):
"""
Test using an unacceptable SASL authentication mechanism.
"""
self.authenticator.jid = jid.JID('[email protected]')
self.authenticator.password = 'secret'
self.assertRaises(sasl.SASLNoAcceptableMechanism,
self._setMechanism, 'SOMETHING_UNACCEPTABLE')
def test_notAcceptableWithoutUser(self):
"""
Test using an unacceptable SASL authentication mechanism with no JID.
"""
self.authenticator.jid = jid.JID('example.com')
self.authenticator.password = 'secret'
self.assertRaises(sasl.SASLNoAcceptableMechanism,
self._setMechanism, 'SOMETHING_UNACCEPTABLE')
|
|
import codecs
import json
import os
import shutil
import socket
import subprocess
import tempfile
from contextlib import contextmanager
from cStringIO import StringIO
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import mock
from nose.tools import eq_, ok_
from PIL import Image
from requests import RequestException
import mkt
import mkt.site.tests
from mkt.users.models import UserProfile
from mkt.developers import tasks
from mkt.files.models import FileUpload
from mkt.site.fixtures import fixture
from mkt.site.tests.test_utils_ import get_image_path
from mkt.site.utils import app_factory, ImageCheck
from mkt.submit.tests.test_views import BaseWebAppTest
from mkt.webapps.models import AddonExcludedRegion as AER
from mkt.webapps.models import Preview, Webapp
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = [32]
final_size = [(32, 12)]
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = [1000]
final_size = [(339, 128)]
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = [339]
final_size = [(339, 128)]
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 82, 100]
final_size = [(32, 12), (82, 30), (100, 37)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
for rsize, fsize in zip(resize_size, final_size):
dest_name = os.path.join(settings.ADDON_ICONS_PATH, '1234')
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix='.png',
delete=False)
# resize_icon removes the original, copy it to a tempfile and use that.
with storage.open(src.name, 'w') as fp:
shutil.copyfileobj(open(img), fp)
# Sanity check.
with storage.open(src.name) as fp:
src_image = Image.open(fp)
src_image.load()
eq_(src_image.size, original_size)
val = tasks.resize_icon(src.name, dest_name, resize_size,
locally=False)
eq_(val, {'icon_hash': 'bb362450'})
dest_image_filename = '%s-%s.png' % (dest_name, rsize)
with storage.open(dest_image_filename) as fp:
dest_image = Image.open(fp)
dest_image.load()
# Assert that the width is always identical.
eq_(dest_image.size[0], fsize[0])
# Assert that the height can be a wee bit fuzzy.
assert -1 <= dest_image.size[1] - fsize[1] <= 1, (
'Got width %d, expected %d' % (
fsize[1], dest_image.size[1]))
if storage.exists(dest_image_filename):
storage.delete(dest_image_filename)
assert not storage.exists(dest_image_filename)
assert not storage.exists(src.name)
def test_resize_promo_img():
"""Resize promo image."""
resize_size = [1050]
final_size = [(1050, 591), (640, 360), (320, 180)]
_promo_img_uploader(resize_size, final_size)
def _promo_img_uploader(resize_size, final_size):
img = get_image_path('game_1050.jpg')
original_size = (1050, 591)
for rsize, fsize in zip(resize_size, final_size):
dest_name = os.path.join(settings.WEBAPP_PROMO_IMG_PATH, '1234')
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix='.jpg',
delete=False)
# resize_icon removes the original, copy it to a tempfile and use that.
shutil.copyfile(img, src.name)
# Sanity check.
with storage.open(src.name) as fp:
src_image = Image.open(fp)
src_image.load()
eq_(src_image.size, original_size)
val = tasks.resize_promo_imgs(src.name, dest_name, resize_size,
locally=True)
eq_(val, {'promo_img_hash': '215dd2a2'})
with storage.open('%s-%s.png' % (dest_name, rsize)) as fp:
dest_image = Image.open(fp)
dest_image.load()
# Assert that the width is always identical.
eq_(dest_image.size[0], fsize[0])
# Assert that the height can be a wee bit fuzzy.
assert -1 <= dest_image.size[1] - fsize[1] <= 1, (
'Got width %d, expected %d' % (
fsize[1], dest_image.size[1]))
if os.path.exists(dest_image.filename):
os.remove(dest_image.filename)
assert not os.path.exists(dest_image.filename)
assert not os.path.exists(src.name)
class TestPngcrushImage(mkt.site.tests.TestCase):
def setUp(self):
img = get_image_path('mozilla.png')
self.src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
shutil.copyfile(img, self.src.name)
patcher = mock.patch('subprocess.Popen')
self.mock_popen = patcher.start()
attrs = {
'returncode': 0,
'communicate.return_value': ('ouput', 'error')
}
self.mock_popen.return_value.configure_mock(**attrs)
self.addCleanup(patcher.stop)
def tearDown(self):
os.remove(self.src.name)
@mock.patch('shutil.move')
def test_pngcrush_image_is_called(self, mock_move):
name = self.src.name
expected_suffix = '.opti.png'
expected_cmd = ['pngcrush', '-q', '-rem', 'alla', '-brute', '-reduce',
'-e', expected_suffix, name]
rval = tasks.pngcrush_image(name)
self.mock_popen.assert_called_once_with(
expected_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
mock_move.assert_called_once_with(
'%s%s' % (os.path.splitext(name)[0], expected_suffix), name)
eq_(rval, {'image_hash': 'bb362450'})
@mock.patch('mkt.webapps.models.Webapp.update')
@mock.patch('shutil.move')
def test_set_modified(self, mock_move, update_mock):
"""Test passed instance is updated with the hash."""
name = self.src.name
obj = app_factory()
ret = tasks.pngcrush_image(name, 'some_hash', set_modified_on=[obj])
ok_('some_hash' in ret)
eq_(update_mock.call_args_list[-1][1]['some_hash'], ret['some_hash'])
ok_('modified' in update_mock.call_args_list[-1][1])
class TestValidator(mkt.site.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
self.upload.add_file(['test data'], 'example.txt', 9)
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('mkt.developers.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert self.get_upload().valid
@mock.patch('mkt.developers.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validator(self.upload.pk)
assert not self.get_upload().valid
@mock.patch('mkt.developers.tasks.run_validator')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
eq_(self.upload.task_error, None)
tasks.validator(self.upload.pk)
error = self.get_upload().task_error
assert error is not None
assert error.startswith('Traceback (most recent call last)'), error
@mock.patch('mkt.developers.tasks.validate_app')
@mock.patch('mkt.developers.tasks.storage.open')
def test_validate_manifest(self, _open, _mock):
_open.return_value = tempfile.TemporaryFile()
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert _mock.called
@mock.patch('mkt.developers.tasks.validate_packaged_app')
@mock.patch('zipfile.is_zipfile')
def test_validate_packaged_app(self, _zipfile, _mock):
_zipfile.return_value = True
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert _mock.called
storage_open = storage.open
def _mock_hide_64px_icon(path, *args, **kwargs):
"""
A function that mocks `storage.open` and throws an IOError if you try to
open a 128x128px icon.
"""
if '128' in path:
raise IOError('No 128px icon for you!')
return storage_open(path, *args, **kwargs)
@override_settings(
PREVIEW_FULL_PATH='/tmp/uploads-tests/previews/full/%s/%d.%s',
PREVIEW_THUMBNAIL_PATH='/tmp/uploads-tests/previews/thumbs/%s/%d.png')
class TestResizePreview(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
# Make sure there are no leftover files in the test directory before
# launching tests that depend on the files presence/absence.
shutil.rmtree('/tmp/uploads-tests/previews/', ignore_errors=True)
def get_image(self, filename):
"""Copy image to tmp and return tmp path.
We do this because the task `resize_preview` removes the src file when
finished.
"""
src = get_image_path(filename)
dst = os.path.join(settings.TMP_PATH, 'preview', filename)
with open(src) as local_f:
with storage.open(dst, 'w') as remote_f:
shutil.copyfileobj(local_f, remote_f)
return dst
def test_preview(self):
addon = Webapp.objects.get(pk=337141)
preview = Preview.objects.create(addon=addon)
src = self.get_image('preview.jpg')
tasks.resize_preview(src, preview.pk)
preview = preview.reload()
eq_(preview.image_size, [400, 533])
eq_(preview.thumbnail_size, [100, 133])
eq_(preview.is_landscape, False)
with storage.open(preview.thumbnail_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [100, 133])
with storage.open(preview.image_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [400, 533])
def test_preview_rotated(self):
addon = Webapp.objects.get(pk=337141)
preview = Preview.objects.create(addon=addon)
src = self.get_image('preview_landscape.jpg')
tasks.resize_preview(src, preview.pk)
preview = preview.reload()
eq_(preview.image_size, [533, 400])
eq_(preview.thumbnail_size, [133, 100])
eq_(preview.is_landscape, True)
with storage.open(preview.thumbnail_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [133, 100])
with storage.open(preview.image_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [533, 400])
def test_preview_dont_generate_image(self):
addon = Webapp.objects.get(pk=337141)
preview = Preview.objects.create(addon=addon)
src = self.get_image('preview.jpg')
tasks.resize_preview(src, preview.pk, generate_image=False)
preview = preview.reload()
eq_(preview.image_size, [])
eq_(preview.thumbnail_size, [100, 133])
eq_(preview.sizes, {u'thumbnail': [100, 133]})
with storage.open(preview.thumbnail_path) as fp:
im = Image.open(fp)
eq_(list(im.size), [100, 133])
assert not os.path.exists(preview.image_path), preview.image_path
class TestFetchManifest(mkt.site.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
self.content_type = 'application/x-web-app-manifest+json'
patcher = mock.patch('mkt.developers.tasks.requests.get')
self.requests_mock = patcher.start()
self.addCleanup(patcher.stop)
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
def file(self, name):
return os.path.join(os.path.dirname(__file__), 'addons', name)
@contextmanager
def patch_requests(self):
response_mock = mock.Mock(status_code=200)
response_mock.iter_content.return_value = mock.Mock(
next=lambda: '<default>')
response_mock.headers = {'content-type': self.content_type}
yield response_mock
self.requests_mock.return_value = response_mock
@mock.patch('mkt.developers.tasks.validator')
def test_success_add_file(self, validator_mock):
with self.patch_requests() as ur:
ur.iter_content.return_value = mock.Mock(next=lambda: 'woo')
tasks.fetch_manifest('http://xx.com/manifest.json', self.upload.pk)
upload = FileUpload.objects.get(pk=self.upload.pk)
eq_(upload.name, 'http://xx.com/manifest.json')
eq_(storage.open(upload.path).read(), 'woo')
@mock.patch('mkt.developers.tasks.validator')
def test_success_call_validator(self, validator_mock):
with self.patch_requests() as ur:
ct = self.content_type + '; charset=utf-8'
ur.headers = {'content-type': ct}
tasks.fetch_manifest('http://xx.com/manifest.json', self.upload.pk)
assert validator_mock.called
assert self.requests_mock.called
eq_(self.requests_mock.call_args[1]['headers'], tasks.REQUESTS_HEADERS)
def check_validation(self, msg=''):
upload = self.get_upload()
if msg:
validation = json.loads(upload.validation)
eq_([m['message'] for m in validation['messages']], [msg])
eq_(validation['errors'], 1)
eq_(validation['success'], False)
eq_(len(validation['messages']), 1)
else:
validation_output = upload.validation
if not validation_output:
return
validation = json.loads(validation_output)
assert not validation['messages']
eq_(validation['errors'], 0)
eq_(validation['success'], True)
def test_connection_error(self):
reason = socket.gaierror(8, 'nodename nor servname provided')
self.requests_mock.side_effect = RequestException(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
def test_url_timeout(self):
reason = socket.timeout('too slow')
self.requests_mock.side_effect = RequestException(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
def test_other_url_error(self):
reason = Exception('Some other failure.')
self.requests_mock.side_effect = RequestException(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_no_content_type(self):
with self.patch_requests() as ur:
ur.headers = {}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_bad_content_type(self):
with self.patch_requests() as ur:
ur.headers = {'Content-Type': 'x'}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'Manifests must be served with the HTTP header "Content-Type: '
'application/x-web-app-manifest+json". See %s for more '
'information.' % tasks.CT_URL)
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_good_charset(self):
with self.patch_requests() as ur:
ur.headers = {
'content-type': 'application/x-web-app-manifest+json;'
'charset=utf-8'
}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation()
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_bad_charset(self):
with self.patch_requests() as ur:
ur.headers = {
'content-type': 'application/x-web-app-manifest+json;'
'charset=ISO-1234567890-LOL'
}
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation("The manifest's encoding does not match the "
'charset provided in the HTTP Content-Type.')
def test_response_too_large(self):
with self.patch_requests() as ur:
content = 'x' * (settings.MAX_WEBAPP_UPLOAD_SIZE + 1)
ur.iter_content.return_value = mock.Mock(next=lambda: content)
tasks.fetch_manifest('url', self.upload.pk)
max_webapp_size = settings.MAX_WEBAPP_UPLOAD_SIZE
self.check_validation('Your manifest must be less than %s bytes.' %
max_webapp_size)
@mock.patch('mkt.developers.tasks.validator', lambda uid, **kw: None)
def test_http_error(self):
with self.patch_requests() as ur:
ur.status_code = 404
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'No manifest was found at that URL. Check the address and try '
'again.')
def test_strip_utf8_bom(self):
with self.patch_requests() as ur:
with open(self.file('utf8bom.webapp')) as fp:
content = fp.read()
ur.iter_content.return_value = mock.Mock(next=lambda: content)
tasks.fetch_manifest('url', self.upload.pk)
# Should not be called with anything else (e.g., `decode_unicode`).
ur.iter_content.assert_called_with(
chunk_size=settings.MAX_WEBAPP_UPLOAD_SIZE + 1)
upload = self.get_upload()
with storage.open(upload.path, 'rb') as fp:
manifest = fp.read()
json.loads(manifest) # No parse error.
assert not manifest.startswith(codecs.BOM_UTF8)
def test_non_utf8_encoding(self):
with self.patch_requests() as ur:
with open(self.file('utf8bom.webapp')) as fp:
# Set encoding to utf16 which will be invalid.
content = fp.read().decode('utf8').encode('utf16')
ur.iter_content.return_value = mock.Mock(next=lambda: content)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'Your manifest file was not encoded as valid UTF-8.')
class TestFetchIcon(BaseWebAppTest):
def setUp(self):
super(TestFetchIcon, self).setUp()
self.content_type = 'image/png'
self.apps_path = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons')
patcher = mock.patch('mkt.developers.tasks.requests.get')
self.requests_mock = patcher.start()
self.requests_mock.return_value = StringIO('mozballin')
self.addCleanup(patcher.stop)
def webapp_from_path(self, path):
self.upload = self.get_upload(abspath=path,
user=UserProfile.objects.get(pk=999))
self.url = reverse('submit.app')
self.login('[email protected]')
return self.post_addon()
def test_no_version(self):
app = app_factory()
eq_(tasks.fetch_icon(app.pk), None)
def test_no_icons(self):
path = os.path.join(self.apps_path, 'noicon.webapp')
iconless_app = self.webapp_from_path(path)
tasks.fetch_icon(iconless_app.pk,
iconless_app.latest_version.all_files[0].pk)
assert not self.requests_mock.called
def test_bad_icons(self):
path = os.path.join(self.apps_path, 'badicon.webapp')
iconless_app = self.webapp_from_path(path)
tasks.fetch_icon(iconless_app.pk,
iconless_app.latest_version.all_files[0].pk)
assert not self.requests_mock.called
def check_icons(self, webapp, file_obj=None):
manifest = webapp.get_manifest_json(file_obj)
biggest = max([int(size) for size in manifest['icons']])
icon_dir = webapp.get_icon_dir()
for size in mkt.CONTENT_ICON_SIZES:
if not size <= biggest:
continue
icon_path = os.path.join(icon_dir, '%s-%s.png'
% (str(webapp.id), size))
with storage.open(icon_path, 'r') as img:
checker = ImageCheck(img)
assert checker.is_image()
eq_(checker.img.size, (size, size))
def test_data_uri(self):
app_path = os.path.join(self.apps_path, 'dataicon.webapp')
webapp = self.webapp_from_path(app_path)
file_obj = webapp.latest_version.all_files[0]
tasks.fetch_icon(webapp.pk, file_obj.pk)
eq_(webapp.icon_type, self.content_type)
self.check_icons(webapp, file_obj)
def test_hosted_icon(self):
app_path = os.path.join(self.apps_path, 'mozball.webapp')
webapp = self.webapp_from_path(app_path)
file_obj = webapp.latest_version.all_files[0]
img_path = os.path.join(self.apps_path, 'mozball-128.png')
with open(img_path, 'r') as content:
tasks.save_icon(webapp, content.read())
eq_(webapp.icon_type, self.content_type)
self.check_icons(webapp, file_obj)
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
@mock.patch('mkt.developers.tasks._fetch_content')
@mock.patch('mkt.developers.tasks.save_icon')
def test_cdn_icon(self, save, fetch, json):
response = mock.Mock()
response.read.return_value = ''
webapp = app_factory()
url = 'http://foo.com/bar'
json.return_value = {'icons': {'128': url}}
tasks.fetch_icon(webapp.pk, webapp.latest_version.all_files[0].pk)
assert url in fetch.call_args[0][0]
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
@mock.patch('mkt.developers.tasks.SafeUnzip')
@mock.patch('mkt.developers.tasks.save_icon')
def test_packaged_icon(self, save, zip, json):
response = mock.Mock()
response.read.return_value = ''
zf = mock.Mock()
zip.return_value = zf
webapp = app_factory(is_packaged=True)
file_obj = webapp.latest_version.all_files[0]
url = '/path/to/icon.png'
json.return_value = {'icons': {'128': url}}
with storage.open(file_obj.file_path, 'w') as f:
f.write("fake zip file")
tasks.fetch_icon(webapp.pk, file_obj.pk)
assert url[1:] in zf.extract_path.call_args[0][0]
class TestRegionEmail(mkt.site.tests.WebappTestCase):
@mock.patch.object(settings, 'SITE_URL', 'http://omg.org/')
def test_email_for_one_new_region(self):
tasks.region_email([self.app.id], [mkt.regions.BRA.id])
msg = mail.outbox[0]
eq_(msg.subject, '%s: Brazil region added to the Firefox Marketplace'
% self.app.name)
eq_(msg.to, ['[email protected]'])
dev_url = ('http://omg.org/developers/app/something-something/'
'edit#details')
assert unicode(self.app.name) in msg.body
assert dev_url in msg.body
assert ' added a new ' in msg.body
assert ' for Brazil.' in msg.body
# TODO: Re-enable this when we bring back Unsubscribe (bug 802379).
# assert 'Unsubscribe' in msg.body
@mock.patch.object(settings, 'SITE_URL', 'http://omg.org/')
def test_email_for_two_new_regions(self):
tasks.region_email([self.app.id],
[mkt.regions.GBR.id, mkt.regions.BRA.id])
msg = mail.outbox[0]
eq_(msg.subject, '%s: New regions added to the Firefox Marketplace'
% self.app.name)
eq_(msg.to, ['[email protected]'])
dev_url = ('http://omg.org/developers/app/something-something/'
'edit#details')
assert unicode(self.app.name) in msg.body
assert dev_url in msg.body
assert ' added two new ' in msg.body
assert ': Brazil and United Kingdom.' in msg.body
# TODO: Re-enable this when we bring back Unsubscribe (bug 802379).
# assert 'Unsubscribe' in msg.body
@mock.patch.object(settings, 'SITE_URL', 'http://omg.org/')
def test_email_for_several_new_regions(self):
tasks.region_email([self.app.id],
[mkt.regions.GBR.id, mkt.regions.USA.id,
mkt.regions.BRA.id])
msg = mail.outbox[0]
eq_(msg.subject,
'%s: New regions added to the Firefox Marketplace' % self.app.name)
assert ' added a few new ' in msg.body
assert ': Brazil, United Kingdom, and United States.' in msg.body
class TestRegionExclude(mkt.site.tests.WebappTestCase):
def test_exclude_no_apps(self):
tasks.region_exclude([], [])
eq_(AER.objects.count(), 0)
tasks.region_exclude([], [mkt.regions.GBR.id])
eq_(AER.objects.count(), 0)
def test_exclude_no_regions(self):
tasks.region_exclude([self.app.id], [])
eq_(AER.objects.count(), 0)
def test_exclude_one_new_region(self):
tasks.region_exclude([self.app.id], [mkt.regions.GBR.id])
excluded = list(AER.objects.filter(addon=self.app)
.values_list('region', flat=True))
eq_(excluded, [mkt.regions.GBR.id])
def test_exclude_several_new_regions(self):
tasks.region_exclude([self.app.id], [mkt.regions.USA.id,
mkt.regions.GBR.id])
excluded = sorted(AER.objects.filter(addon=self.app)
.values_list('region', flat=True))
eq_(excluded, sorted([mkt.regions.USA.id, mkt.regions.GBR.id]))
|
|
from __future__ import division, absolute_import, print_function
import sys
import time
from datetime import date
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
from numpy.compat import unicode
class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
"Test LineSplitter w/o delimiter"
strg = " 1 2 3 4 5 # test"
test = LineSplitter()(strg)
assert_equal(test, ['1', '2', '3', '4', '5'])
test = LineSplitter('')(strg)
assert_equal(test, ['1', '2', '3', '4', '5'])
def test_space_delimiter(self):
"Test space delimiter"
strg = " 1 2 3 4 5 # test"
test = LineSplitter(' ')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
test = LineSplitter(' ')(strg)
assert_equal(test, ['1 2 3 4', '5'])
def test_tab_delimiter(self):
"Test tab delimiter"
strg = " 1\t 2\t 3\t 4\t 5 6"
test = LineSplitter('\t')(strg)
assert_equal(test, ['1', '2', '3', '4', '5 6'])
strg = " 1 2\t 3 4\t 5 6"
test = LineSplitter('\t')(strg)
assert_equal(test, ['1 2', '3 4', '5 6'])
def test_other_delimiter(self):
"Test LineSplitter on delimiter"
strg = "1,2,3,4,,5"
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
#
strg = " 1,2,3,4,,5 # test"
test = LineSplitter(',')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
# gh-11028 bytes comment/delimiters should get encoded
strg = b" 1,2,3,4,,5 % test"
test = LineSplitter(delimiter=b',', comments=b'%')(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5'])
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
strg = " 1 2 3 4 5 # test"
test = LineSplitter(3)(strg)
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter(20)(strg)
assert_equal(test, ['1 3 4 5 6'])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter(30)(strg)
assert_equal(test, ['1 3 4 5 6'])
def test_variable_fixed_width(self):
strg = " 1 3 4 5 6# test"
test = LineSplitter((3, 6, 6, 3))(strg)
assert_equal(test, ['1', '3', '4 5', '6'])
#
strg = " 1 3 4 5 6# test"
test = LineSplitter((6, 6, 9))(strg)
assert_equal(test, ['1', '3 4', '5 6'])
# -----------------------------------------------------------------------------
class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
names = ['A', 'a', 'b', 'c']
test = NameValidator().validate(names)
assert_equal(test, ['A', 'a', 'b', 'c'])
test = NameValidator(case_sensitive=False).validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='upper').validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='lower').validate(names)
assert_equal(test, ['a', 'a_1', 'b', 'c'])
# check exceptions
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
def test_excludelist(self):
"Test excludelist"
names = ['dates', 'data', 'Other Data', 'mask']
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
test = validator.validate(names)
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
def test_missing_names(self):
"Test validate missing names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist), ['a', 'b', 'c'])
namelist = ('', 'b', 'c')
assert_equal(validator(namelist), ['f0', 'b', 'c'])
namelist = ('a', 'b', '')
assert_equal(validator(namelist), ['a', 'b', 'f0'])
namelist = ('', 'f0', '')
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
def test_validate_nb_names(self):
"Test validate nb names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist, nbfields=1), ('a',))
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
['a', 'b', 'c', 'g0', 'g1'])
def test_validate_wo_names(self):
"Test validate no names"
namelist = None
validator = NameValidator()
assert_(validator(namelist) is None)
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
# -----------------------------------------------------------------------------
def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
"Test creation of a StringConverter"
converter = StringConverter(int, -99999)
assert_equal(converter._status, 1)
assert_equal(converter.default, -99999)
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade('0'), 0)
assert_equal(converter._status, 1)
# On systems where long defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade('17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade('0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade('0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
# unicode conversion (5).
for s in ['a', u'a', b'a']:
res = converter.upgrade(s)
assert_(type(res) is unicode)
assert_equal(res, u'a')
assert_equal(converter._status, 5 + status_offset)
def test_missing(self):
"Tests the use of missing values."
converter = StringConverter(missing_values=('missing',
'missed'))
converter.upgrade('0')
assert_equal(converter('0'), 0)
assert_equal(converter(''), converter.default)
assert_equal(converter('missing'), converter.default)
assert_equal(converter('missed'), converter.default)
try:
converter('miss')
except ValueError:
pass
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
test = convert('2001-01-01')
assert_equal(test, date(2001, 1, 1))
test = convert('2009-01-01')
assert_equal(test, date(2009, 1, 1))
test = convert('')
assert_equal(test, date(2000, 1, 1))
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
old_mapper = StringConverter._mapper[:] # copy of list
conv = StringConverter(_bytes_to_date)
assert_equal(conv._mapper, old_mapper)
assert_(hasattr(conv, 'default'))
def test_keep_default(self):
"Make sure we don't lose an explicit default"
converter = StringConverter(None, missing_values='',
default=-999)
converter.upgrade('3.14159265')
assert_equal(converter.default, -999)
assert_equal(converter.type, np.dtype(float))
#
converter = StringConverter(
None, missing_values='', default=0)
converter.upgrade('3.14159265')
assert_equal(converter.default, 0)
assert_equal(converter.type, np.dtype(float))
def test_keep_default_zero(self):
"Check that we don't lose a default of 0"
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(converter.default, 0)
def test_keep_missing_values(self):
"Check that we're not losing missing values"
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(
converter.missing_values, set(['', 'N/A']))
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
val = "-9223372036854775807"
assert_(converter(val) == -9223372036854775807)
val = "9223372036854775807"
assert_(converter(val) == 9223372036854775807)
def test_uint64_dtype(self):
"Check that uint64 integer types can be specified"
converter = StringConverter(np.uint64, default=0)
val = "9223372043271415339"
assert_(converter(val) == 9223372043271415339)
class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
assert_equal(has_nested_fields(ndtype), True)
def test_easy_dtype(self):
"Test ndtype on dtypes"
# Simple case
ndtype = float
assert_equal(easy_dtype(ndtype), np.dtype(float))
# As string w/o names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', "i4"), ('f1', "f8")]))
# As string w/o names but different default format
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
# As string w/ names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (too many)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (not enough)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names=", b"),
np.dtype([('f0', "i4"), ('b', "f8")]))
# ... (with different default format)
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
np.dtype([('a', "i4"), ('f00', "f8")]))
# As list of tuples w/o names
ndtype = [('A', int), ('B', float)]
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
# As list of tuples w/ names
assert_equal(easy_dtype(ndtype, names="a,b"),
np.dtype([('a', int), ('b', float)]))
# As list of tuples w/ not enough names
assert_equal(easy_dtype(ndtype, names="a"),
np.dtype([('a', int), ('f0', float)]))
# As list of tuples w/ too many names
assert_equal(easy_dtype(ndtype, names="a,b,c"),
np.dtype([('a', int), ('b', float)]))
# As list of types w/o names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
# As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
# As simple dtype w/ names
ndtype = np.dtype(float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
# As simple dtype w/o names (but multiple fields)
ndtype = np.dtype(float)
assert_equal(
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
def test_flatten_dtype(self):
"Testing flatten_dtype"
# Standard dtype
dt = np.dtype([("a", "f8"), ("b", "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
# Recursive dtype
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
# dtype with shaped fields
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, int])
dt_flat = flatten_dtype(dt, True)
assert_equal(dt_flat, [float] * 2 + [int] * 3)
# dtype w/ titles
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
|
|
"""
setuptools must be installed first. If you do not have setuptools installed
please download and install it from http://pypi.python.org/pypi/setuptools
"""
import os
import sys
import subprocess
import re
import setuptools
from numpy.distutils.core import setup
import numpy
curdir = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(curdir, "README.txt")).read()
CHANGES = open(os.path.join(curdir, "CHANGES.txt")).read()
DISTNAME = 'statsmodels'
DESCRIPTION = 'Statistical computations and models for use with SciPy'
LONG_DESCRIPTION = README + '\n\n' + CHANGES
MAINTAINER = 'Skipper Seabold, Josef Perktold'
MAINTAINER_EMAIL ='[email protected]'
URL = 'http://statsmodels.sourceforge.net/'
LICENSE = 'BSD License'
DOWNLOAD_URL = ''
def check_dependency_versions(min_versions):
"""
Don't let setuptools do this. It's rude.
Just makes sure it can import the packages and if not, stops the build
process.
"""
from distutils.version import StrictVersion
try:
from numpy.version import short_version as npversion
except ImportError:
raise ImportError("statsmodels requires numpy")
try:
from scipy.version import short_version as spversion
except ImportError:
raise ImportError("statsmodels requires scipy")
try:
from pandas.version import version as pversion
except:
raise ImportError("statsmodels requires pandas")
try:
assert StrictVersion(npversion) >= min_versions['numpy']
except AssertionError:
raise ImportError("Numpy version is %s. Requires >= %s" %
(npversion, min_versions['numpy']))
try:
assert StrictVersion(spversion) >= min_versions['scipy']
except AssertionError:
raise ImportError("Scipy version is %s. Requires >= %s" %
(spversion, min_versions['scipy']))
try:
#NOTE: not sure how robust this regex is but it at least allows
# double digit version numbering
pversion = re.match("\d*\.\d*\.\d*", pversion).group()
assert StrictVersion(pversion) >= min_versions['pandas']
except AssertionError:
raise ImportError("Pandas version is %s. Requires >= %s" %
(pversion, min_versions['pandas']))
MAJ = 0
MIN = 5
REV = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJ,MIN,REV)
classifiers = [ 'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering']
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(" ".join(cmd), stdout = subprocess.PIPE, env=env,
shell=True).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename='statsmodels/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
dowrite = True
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists(filename):
# must be a source distribution, use existing version file
try:
from statsmodels.version import git_revision as GIT_REVISION
#print "debug import success GIT_REVISION", GIT_REVISION
except ImportError:
dowrite = False
#changed: if we are not in a git repository then don't update version.py
## raise ImportError("Unable to import git_revision. Try removing " \
## "statsmodels/version.py and the build directory " \
## "before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
if dowrite:
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
# 2.x
from distutils.command.build_py import build_py
def configuration(parent_package='', top_path=None, package_name=DISTNAME):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path,
namespace_packages = ['scikits'])
config.add_subpackage('scikits')
config.add_subpackage(DISTNAME)
config.add_subpackage('scikits.statsmodels')
config.add_data_files('scikits/__init__.py')
config.add_data_files('docs/build/htmlhelp/statsmodelsdoc.chm',
'statsmodels/statsmodelsdoc.chm')
config.set_options(
ignore_setup_xxx_py = True,
assume_default_configuration = True,
delegate_options_to_subpackages = True,
quiet = False,
)
return config
if __name__ == "__main__":
min_versions = {
'numpy' : '1.4.0',
'scipy' : '0.7.0',
'pandas' : '0.7.1',
}
check_dependency_versions(min_versions)
write_version_py()
setup(
name = DISTNAME,
version = VERSION,
maintainer = MAINTAINER,
maintainer_email = MAINTAINER_EMAIL,
description = DESCRIPTION,
license = LICENSE,
url = URL,
download_url = DOWNLOAD_URL,
long_description = LONG_DESCRIPTION,
configuration = configuration,
namespace_packages = ['scikits'],
packages = setuptools.find_packages(),
include_package_data = True,
test_suite="nose.collector",
zip_safe = False, # the package can not run out of an .egg file bc of
# nose tests
classifiers = classifiers,
cmdclass={'build_py': build_py})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.