text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btm_individual_co_edge_query1332_all_of
except ImportError:
btm_individual_co_edge_query1332_all_of = sys.modules[
"onshape_client.oas.models.btm_individual_co_edge_query1332_all_of"
]
try:
from onshape_client.oas.models import btm_individual_query138
except ImportError:
btm_individual_query138 = sys.modules[
"onshape_client.oas.models.btm_individual_query138"
]
try:
from onshape_client.oas.models import btm_individual_query_base139
except ImportError:
btm_individual_query_base139 = sys.modules[
"onshape_client.oas.models.btm_individual_query_base139"
]
try:
from onshape_client.oas.models import btp_statement269
except ImportError:
btp_statement269 = sys.modules["onshape_client.oas.models.btp_statement269"]
class BTMIndividualCoEdgeQuery1332(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"edge_query": (
btm_individual_query138.BTMIndividualQuery138,
), # noqa: E501
"face_query": (
btm_individual_query138.BTMIndividualQuery138,
), # noqa: E501
"deterministic_id_list": (
btm_individual_query_base139.BTMIndividualQueryBase139,
), # noqa: E501
"deterministic_ids": ([str],), # noqa: E501
"import_microversion": (str,), # noqa: E501
"node_id": (str,), # noqa: E501
"query": (
btm_individual_query_base139.BTMIndividualQueryBase139,
), # noqa: E501
"query_string": (str,), # noqa: E501
"persistent_query": (btp_statement269.BTPStatement269,), # noqa: E501
"query_statement": (btp_statement269.BTPStatement269,), # noqa: E501
"variable_name": (
btm_individual_query138.BTMIndividualQuery138,
), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"edge_query": "edgeQuery", # noqa: E501
"face_query": "faceQuery", # noqa: E501
"deterministic_id_list": "deterministicIdList", # noqa: E501
"deterministic_ids": "deterministicIds", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"node_id": "nodeId", # noqa: E501
"query": "query", # noqa: E501
"query_string": "queryString", # noqa: E501
"persistent_query": "persistentQuery", # noqa: E501
"query_statement": "queryStatement", # noqa: E501
"variable_name": "variableName", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btm_individual_co_edge_query1332.BTMIndividualCoEdgeQuery1332 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
edge_query (btm_individual_query138.BTMIndividualQuery138): [optional] # noqa: E501
face_query (btm_individual_query138.BTMIndividualQuery138): [optional] # noqa: E501
deterministic_id_list (btm_individual_query_base139.BTMIndividualQueryBase139): [optional] # noqa: E501
deterministic_ids ([str]): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
query (btm_individual_query_base139.BTMIndividualQueryBase139): [optional] # noqa: E501
query_string (str): [optional] # noqa: E501
persistent_query (btp_statement269.BTPStatement269): [optional] # noqa: E501
query_statement (btp_statement269.BTPStatement269): [optional] # noqa: E501
variable_name (btm_individual_query138.BTMIndividualQuery138): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btm_individual_co_edge_query1332_all_of.BTMIndividualCoEdgeQuery1332AllOf,
btm_individual_query138.BTMIndividualQuery138,
],
"oneOf": [],
}
| {
"content_hash": "d872a6585be9baa526d7e8e0e687b314",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 116,
"avg_line_length": 39.73279352226721,
"alnum_prop": 0.5977175463623395,
"repo_name": "onshape-public/onshape-clients",
"id": "dbadfd62975d5c160bfaf6bd702bef9ed4bcf1a1",
"size": "9831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/btm_individual_co_edge_query1332.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import textwrap
import xml.etree.ElementTree as ET
from six.moves import range
from unidecode import unidecode
import escpos.barcode
import escpos.feature
from satcomum import br
from satcomum import constantes
from .config import padrao as config_padrao
class ExtratoCFe(object):
"""Classe base para os extratos do CF-e de venda e cancelamento, fornecendo
uma implementação comum para o cabeçalho dos CF-e de venda e cancelamento,
além da infraestrutura que simplifica a interface para impressoras ESC/POS.
As implementações que realmente emitem os extratos estão nas classes
:class:`~satextrato.venda.ExtratoCFeVenda` e
:class:`~satextrato.cancelamento.ExtratoCFeCancelamento`.
"""
def __init__(self, fp, impressora, config=None):
"""Inicia uma instância de :class:`ExtratoCFe`.
:param fp: Um *file object* para o documento XML que contém o CF-e.
:param impressora: Um instância (ou subclasse, especialização) de
:class:`escpos.impl.epson.GenericESCPOS` usado para efetivamente
imprimir o extrato.
:param config: Opcional.
Uma instância de :class:`satextrato.config.Configuracoes`.
Se não informado, serão usados valores padrão.
"""
super(ExtratoCFe, self).__init__()
self._config = config or config_padrao()
self._tree = ET.parse(fp)
self.root = self._tree.getroot()
self.impressora = impressora
self._flag_negrito = False
self._flag_italico = False
self._flag_expandido = False
self._flag_condensado = False
@property
def _colunas(self):
if self._flag_condensado:
num_colunas = self.impressora.feature.columns.condensed
elif self._flag_expandido:
num_colunas = self.impressora.feature.columns.expanded
else:
num_colunas = self.impressora.feature.columns.normal
return num_colunas
@property
def is_ambiente_testes(self):
"""Indica se o CF-e-SAT foi emitido em "ambiente de testes".
Considera como emitido em ambiente de testes quando:
* Elemento B10 ``tpAmb`` for ``2`` (ambiente de testes) ou
* Elemento B12 ``signAC`` possuir a assinatura de teste, indicada pela
constante :attr:`satcomum.constantes.ASSINATURA_AC_TESTE`.
.. note::
O CF-e de cancelamento não possui o elemento ``tpAmb``, conforme
descrito na ER SAT, item 4.2.3 **Layout do CF-e de cancelamento**.
:raises ValueError: Se o documento XML não for identificado como um
CF-e-SAT de venda ou cancelamento.
"""
signAC = self.root.findtext('./infCFe/ide/signAC')
if self.root.tag == constantes.ROOT_TAG_VENDA:
tpAmb = self.root.findtext('./infCFe/ide/tpAmb')
return (tpAmb == constantes.B10_TESTES
or signAC == constantes.ASSINATURA_AC_TESTE)
elif self.root.tag == constantes.ROOT_TAG_CANCELAMENTO:
# CF-e-SAT de cancelamento não possui `tpAmb`
return signAC == constantes.ASSINATURA_AC_TESTE
raise ValueError(
(
'Documento nao parece ser um CF-e-SAT; root tag: {!r}'
).format(self.root.tag)
)
def imprimir(self):
self.cabecalho()
self.corpo()
self.rodape()
self.fim_documento()
def centro(self):
self.impressora.justify_center()
return self
def esquerda(self):
self.impressora.justify_left()
return self
def normal(self):
if self._flag_negrito:
self.negrito()
if self._flag_italico:
self.italico()
if self._flag_expandido:
self.expandido()
if self._flag_condensado:
self.condensado()
def negrito(self):
self._flag_negrito = not self._flag_negrito
self.impressora.set_emphasized(self._flag_negrito)
return self
def italico(self):
self._flag_italico = not self._flag_italico
return self
def expandido(self):
self._flag_expandido = not self._flag_expandido
self.impressora.set_expanded(self._flag_expandido)
return self
def condensado(self):
self._flag_condensado = not self._flag_condensado
self.impressora.set_condensed(self._flag_condensado)
return self
def bordas(
self,
texto_esquerda,
texto_direita,
colunas=None,
espacamento_minimo=4
):
largura = colunas or self._colunas
texto = _bordas(
texto_esquerda,
texto_direita,
largura=largura,
espacamento_minimo=espacamento_minimo
)
self.texto(texto)
return self
def quebrar(self, texto, colunas=None):
largura = colunas or self._colunas
# considera hard-breaks
linhas_fixas = texto.replace('\r', '').split('\n')
for linha_fixa in linhas_fixas:
linhas = textwrap.wrap(linha_fixa, largura)
for linha in linhas:
self.texto(linha)
return self
def avanco(self, linhas=1):
self.impressora.lf(lines=linhas)
return self
def texto(self, texto):
self.impressora.text(unidecode(texto))
return self
def separador(self, caracter='-', colunas=None):
largura = colunas or self._colunas
self.texto('-' * largura)
return self
def indicacao_de_teste(self):
"""Imprime indicação de teste se o CF-e estiver em "condição de
teste". Caso contrário, não faz nada. A indicação de teste,
conforme o Manual de Orientação deverá ser impressa em itálico
(note a linha em branco acima e abaixo da inscrição "TESTE"):
.. sourcecode:: text
10 20 30 40 48 :
....:....|....:....|....:....|....:....|....:..! :
:
= T E S T E = :
:
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< :
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< :
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< :
.. note::
O correto são linhas com sinais de ``>``, mas isso faz com que o
framework de teste interprete como um bloco de teste (doctest). Os
sinais de ``<`` dão uma boa ideia do resultado final.
"""
if self.is_ambiente_testes:
self.italico()
self.avanco()
self.texto('= T E S T E =')
self.avanco()
for i in range(3):
self.texto('>' * self._colunas)
self.italico()
def numero_extrato(self):
"""
Obtém o número do extrato, elemento ``nCFe`` (B06). Se o CF-e estiver
em condição de teste retornará ``000000``.
Veja :meth:`is_ambiente_testes` para outros detalhes.
"""
if self.is_ambiente_testes:
return '0' * 6
return self.root.findtext('./infCFe/ide/nCFe')
def chave_cfe_code128(self, chave):
"""Imprime o código de barras padrão Code128 para a chave do CF-e.
:param chave: Instância de :class:`satcomum.ersat.ChaveCFeSAT`.
"""
self.centro()
self.condensado()
self.texto(' '.join(chave.partes()))
self.condensado()
if self._config.code128.ignorar:
return
code128_params = dict(
barcode_height=self._config.code128.altura,
barcode_width=escpos.barcode.BARCODE_NORMAL_WIDTH,
barcode_hri=escpos.barcode.BARCODE_HRI_NONE
)
if self._config.code128.truncar:
tamanho = self._config.code128.truncar_tamanho
digitos = ''.join(chave.partes())[:tamanho]
self.centro()
self.impressora.code128(digitos, **code128_params)
elif self._config.code128.quebrar:
partes = _quebrar_chave(chave, self._config.code128.quebrar_partes)
for n_parte, parte in enumerate(partes, 1):
self.centro()
self.impressora.code128(parte, **code128_params)
if self._config.code128.pular_linha_entre_partes:
if n_parte < len(partes):
self.avanco()
else:
partes = chave.partes(1)
self.centro()
self.impressora.code128(partes[0], **code128_params)
def qrcode_mensagem(self):
mensagem = self._config.qrcode.mensagem.strip()
if not mensagem:
return
if self._config.qrcode.mensagem_modo_condensado:
self.condensado()
self.centro()
for linha in textwrap.wrap(mensagem, self._colunas):
self.texto(linha)
self.esquerda()
if self._config.qrcode.mensagem_modo_condensado:
self.condensado()
def fim_documento(self):
"""Encerra o documento, imprimindo o rodapé (se houver) e avançando ou
guilhotinando o documento, conforme as configurações.
"""
self.normal()
self.avanco()
self.separador()
conf_cupom = self._config.cupom
conf_rodape = self._config.rodape
if conf_rodape.esquerda or conf_rodape.direita:
self.condensado()
self.bordas(conf_rodape.esquerda, conf_rodape.direita)
self.condensado()
if self.impressora.feature.cutter and conf_cupom.cortar_documento:
if conf_cupom.avancar_linhas > 0:
self.avanco(conf_cupom.avancar_linhas)
self.impressora.cut(
partial=conf_cupom.cortar_parcialmente,
feed=conf_cupom.cortar_avanco
)
else:
# impressora não possui guilhotina ou não é para cortar o documento
if conf_cupom.avancar_linhas > 0:
self.avanco(conf_cupom.avancar_linhas)
def cabecalho(self):
self.normal()
emit = self.root.find('./infCFe/emit')
enderEmit = emit.find('enderEmit')
nome_fantasia = emit.findtext('xFant')
razao_social = emit.findtext('xNome')
logradouro = enderEmit.findtext('xLgr')
numero = enderEmit.findtext('nro')
complemento = enderEmit.findtext('xCpl')
bairro = enderEmit.findtext('xBairro')
if numero: # número não é obrigatório
logradouro = u'{}, {}'.format(logradouro, numero)
if complemento: # complemento não é obrigatório
# faz um esforço para manter o complemento na mesma linha que o
# logradouro/número se couber, senão o complemento irá usar uma
# linha exclusiva...
if len(logradouro) + len(complemento) < self._colunas:
# ok, mantém o complemento na mesma linha que o logradouro
logradouro = u'{}, {}'.format(logradouro, complemento)
complemento = '' # ignora a linha que deveria conter o xCpl
cidade = u'{}/{} CEP: {}'.format(
enderEmit.findtext('xMun'),
br.uf_pelo_codigo(int(self.root.findtext('./infCFe/ide/cUF'))),
br.as_cep(enderEmit.findtext('CEP')))
partes_endereco = [logradouro, complemento, bairro, cidade]
endereco = u'\r\n'.join([e for e in partes_endereco if e])
cnpj = 'CNPJ: {}'.format(br.as_cnpj(emit.findtext('CNPJ')))
im = 'IM: {}'.format(emit.findtext('IM') or '')
ie = 'IE: {}'.format(emit.findtext('IE'))
self.centro()
self.negrito()
if nome_fantasia:
self.quebrar(nome_fantasia)
self.quebrar(razao_social)
self.negrito()
self.quebrar(endereco)
self.avanco()
self.esquerda()
self.texto(cnpj)
self.texto(ie)
if im:
self.texto(im)
self.separador()
def rodape(self):
raise NotImplementedError()
def corpo(self):
raise NotImplementedError()
def _bordas(
esquerda,
direita,
largura=48,
espacamento_minimo=4,
favorecer_direita=True
):
"""Prepara duas strings para serem impressas alinhadas às bordas opostas da
mídia, os textos da esquerda (borda esquerda) e da direita (borda direita),
respeitando uma largura e espaçamento mínimo determinados.
.. sourcecode:: python
>>> _bordas('a', 'b')
'a b'
>>> esquerda = 'a' * 30
>>> direita = 'b' * 30
>>> _bordas(esquerda, direita)
'aaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbb'
>>> esquerda = 'Gazeta publica hoje breve nota de faxina na quermesse'
>>> direita = 'Um pequeno jabuti xereta viu dez cegonhas felizes'
>>> _bordas(esquerda, direita, espacamento_minimo=1)
'Gazeta publica hoje bre viu dez cegonhas felizes'
:param str esquerda: O texto a ser exibido à esquerda. Se o texto não
couber (em relação à largura e ao espaçamento mínimo) será exibida
apenas a porção mais à esquerda desse texto, sendo cortados (não
impressos) os caracteres do final do texto.
:param str direita: O texto à ser exibido à direita. Se o texto da direita
não couber (em relação à largura e ao espaçamento mínimo) será exibida
apenas porção mais à direita desse texto, sendo cortados (não
impressos) os caracteres do início do texto.
:param int largura: Largura em caracteres a considerar ao calcular o vão
entre os textos da esquerda e direita. O padrão é 48, já que é a
largura mais comum entre as impressoras térmicas de bobina quando
imprimindo com a fonte normal.
:param int espacamento_minimo: Opcional. Determina o número de espaços
mínimo a ser deixado entre os textos da esquerda e direita. O padrão
são quatro espaços.
:param bool favorecer_direita: Opcional. Determina se o texto da direita
deverá ser favorecido com um espaço maior quando houver diferença
(sobra) entre os textos da esquerda e direita em relação ao espaçamento
mínimo determinado. O padrão é favorecer o texto da direita, já que é
normalmente o dado relevante, como um valor ou percentual.
:returns: Uma string contendo os textos da esquerda e direita encaixados na
largura determinada, respeitando um espaçamento mínimo entre eles. Se
necessário os textos serão truncados para respeitar a largura (o texto
da esquerda será truncado no final e o texto da direita será truncado
no início).
:rtype: str
"""
espacamento = largura - (len(esquerda) + len(direita))
if espacamento < espacamento_minimo:
espacamento = espacamento_minimo
cpmax = int((largura - espacamento) // 2)
cpmax_esq, cpmax_dir = cpmax, cpmax
diferenca = largura - (espacamento + cpmax * 2)
if diferenca > 0:
if favorecer_direita:
cpmax_dir += diferenca
else:
cpmax_esq += diferenca
esquerda = esquerda[:cpmax_esq]
direita = direita[-cpmax_dir:]
return '%s%s%s' % (esquerda, ' ' * espacamento, direita)
def _quebrar_chave(chave, quebrar_partes):
# chave: satcomum.ersat.ChaveCFeSAT
# quebrar_partes: list[int]
# Lista <quebrar_partes> deve possuir apenas números pares
digitos = ''.join(chave.partes())
partes = []
a = 0
for n in quebrar_partes:
partes.append(digitos[a:a+n])
a += n
return partes
| {
"content_hash": "e817addca8d62d867b6ca60a0ed4d36a",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 79,
"avg_line_length": 34.73809523809524,
"alnum_prop": 0.5846470185058259,
"repo_name": "base4sistemas/satextrato",
"id": "6830e447e76445bacd4eb41d92baba41afeefd81",
"size": "16809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satextrato/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1126"
},
{
"name": "Python",
"bytes": "76099"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import logging
import scipy
import numpy as np
from theano import tensor
from theano.tensor.signal.downsample import max_pool_2d, DownsampleFactorMax
from blocks.extensions import SimpleExtension
from blocks.extensions.monitoring import (DataStreamMonitoring,
MonitoringExtension)
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.monitoring.evaluators import DatasetEvaluator
from blocks.roles import AuxiliaryRole
logger = logging.getLogger('main.nn')
class BnParamRole(AuxiliaryRole):
pass
# Batch normalization parameters that have to be replaced when testing
BNPARAM = BnParamRole()
class ZCA(object):
def __init__(self, n_components=None, data=None, filter_bias=0.1):
self.filter_bias = np.float32(filter_bias)
self.P = None
self.P_inv = None
self.n_components = 0
self.is_fit = False
if n_components and data:
self.fit(n_components, data)
def fit(self, n_components, data):
if len(data.shape) == 2:
self.reshape = None
else:
assert n_components == np.product(data.shape[1:]), \
'ZCA whitening components should be %d for convolutional data'\
% np.product(data.shape[1:])
self.reshape = data.shape[1:]
data = self._flatten_data(data)
assert len(data.shape) == 2
n, m = data.shape
self.mean = np.mean(data, axis=0)
bias = self.filter_bias * scipy.sparse.identity(m, 'float32')
cov = np.cov(data, rowvar=0, bias=1) + bias
eigs, eigv = scipy.linalg.eigh(cov)
assert not np.isnan(eigs).any()
assert not np.isnan(eigv).any()
assert eigs.min() > 0
if self.n_components:
eigs = eigs[-self.n_components:]
eigv = eigv[:, -self.n_components:]
sqrt_eigs = np.sqrt(eigs)
self.P = np.dot(eigv * (1.0 / sqrt_eigs), eigv.T)
assert not np.isnan(self.P).any()
self.P_inv = np.dot(eigv * sqrt_eigs, eigv.T)
self.P = np.float32(self.P)
self.P_inv = np.float32(self.P_inv)
self.is_fit = True
def apply(self, data, remove_mean=True):
data = self._flatten_data(data)
d = data - self.mean if remove_mean else data
return self._reshape_data(np.dot(d, self.P))
def inv(self, data, add_mean=True):
d = np.dot(self._flatten_data(data), self.P_inv)
d += self.mean if add_mean else 0.
return self._reshape_data(d)
def _flatten_data(self, data):
if self.reshape is None:
return data
assert data.shape[1:] == self.reshape
return data.reshape(data.shape[0], np.product(data.shape[1:]))
def _reshape_data(self, data):
assert len(data.shape) == 2
if self.reshape is None:
return data
return np.reshape(data, (data.shape[0],) + self.reshape)
class ContrastNorm(object):
def __init__(self, scale=55, epsilon=1e-8):
# 对比归一化
self.scale = np.float32(scale)
self.epsilon = np.float32(epsilon)
def apply(self, data, copy=False):
# 复制保留一份data
if copy:
data = np.copy(data)
data_shape = data.shape
print(data_shape)
if len(data.shape) > 2:
data = data.reshape(data.shape[0], np.product(data.shape[1:]))
assert len(data.shape) == 2, 'Contrast norm on flattened data'
data -= data.mean(axis=1)[:, np.newaxis]
norms = np.sqrt(np.sum(data ** 2, axis=1)) / self.scale
norms[norms < self.epsilon] = np.float32(1.)
data /= norms[:, np.newaxis]
if data_shape != data.shape:
data = data.reshape(data_shape)
return data
class TestMonitoring(object):
def _get_bn_params(self, output_vars):
# Pick out the nodes with batch normalization vars
cg = ComputationGraph(output_vars)
var_filter = VariableFilter(roles=[BNPARAM])
bn_ps = var_filter(cg.variables)
if len(bn_ps) == 0:
logger.warn('No batch normalization parameters found - is' +
' batch normalization turned off?')
self._bn = False
self._counter = None
self._counter_max = None
bn_share = []
output_vars_replaced = output_vars
else:
self._bn = True
assert len(set([p.name for p in bn_ps])) == len(bn_ps), \
'Some batch norm params have the same name'
logger.info('Batch norm parameters: %s' % ', '.join([p.name for p in bn_ps]))
# Filter out the shared variables from the model updates
def filter_share(par):
lst = [up for up in cg.updates if up.name == 'shared_%s' % par.name]
assert len(lst) == 1
return lst[0]
bn_share = map(filter_share, bn_ps)
# Replace the BN coefficients in the test data model - Replace the
# theano variables in the test graph with the shareds
output_vars_replaced = cg.replace(zip(bn_ps, bn_share)).outputs
# Pick out the counter
self._counter = self._param_from_updates(cg.updates, 'counter')
self._counter_max = self._param_from_updates(cg.updates, 'counter_max')
return bn_ps, bn_share, output_vars_replaced
def _param_from_updates(self, updates, p_name):
var_filter = VariableFilter(roles=[BNPARAM])
bn_ps = var_filter(updates.keys())
p = [p for p in bn_ps if p.name == p_name]
assert len(p) == 1, 'No %s of more than one %s' % (p_name, p_name)
return p[0]
def reset_counter(self):
if self._bn:
self._counter.set_value(np.float32(1))
def replicate_vars(self, output_vars):
# Problem in Blocks with multiple monitors monitoring the
# same value in a graph. Therefore, they are all "replicated" to a new
# Theano variable
if isinstance(output_vars, (list, tuple)):
return map(self.replicate_vars, output_vars)
assert not hasattr(output_vars.tag, 'aggregation_scheme'), \
'The variable %s already has an aggregator ' % output_vars.name + \
'assigned to it - are you using a datasetmonitor with the same' + \
' variable as output? This might cause trouble in Blocks'
new_var = 1 * output_vars
new_var.name = output_vars.name
return new_var
class ApproxTestMonitoring(DataStreamMonitoring, TestMonitoring):
def __init__(self, output_vars, *args, **kwargs):
output_vars = self.replicate_vars(output_vars)
_, _, replaced_vars = self._get_bn_params(output_vars)
super(ApproxTestMonitoring, self).__init__(replaced_vars, *args,
**kwargs)
def do(self, which_callback, *args, **kwargs):
assert not which_callback == "after_batch", "Do not monitor each mb"
self.reset_counter()
super(ApproxTestMonitoring, self).do(which_callback, *args, **kwargs)
class FinalTestMonitoring(SimpleExtension, MonitoringExtension, TestMonitoring):
"""Monitors validation and test set data with batch norm
Calculates the training set statistics for batch normalization and adds
them to the model before calculating the validation and test set values.
This is done in two steps: First the training set is iterated and the
statistics are saved in shared variables, then the model iterates through
the test/validation set using the saved shared variables.
When the training set is iterated, it is done for the full set, layer by
layer so that the statistics are correct. This is expensive for very deep
models, in which case some approximation could be in order
"""
def __init__(self, output_vars, train_data_stream, test_data_stream,
**kwargs):
output_vars = self.replicate_vars(output_vars)
super(FinalTestMonitoring, self).__init__(**kwargs)
self.trn_stream = train_data_stream
self.tst_stream = test_data_stream
bn_ps, bn_share, output_vars_replaced = self._get_bn_params(output_vars)
if self._bn:
updates = self._get_updates(bn_ps, bn_share)
trn_evaluator = DatasetEvaluator(bn_ps, updates=updates)
else:
trn_evaluator = None
self._trn_evaluator = trn_evaluator
self._tst_evaluator = DatasetEvaluator(output_vars_replaced)
def _get_updates(self, bn_ps, bn_share):
cg = ComputationGraph(bn_ps)
# Only store updates that relate to params or the counter
updates = OrderedDict([(up, cg.updates[up]) for up in
cg.updates if up.name == 'counter' or
up in bn_share])
assert self._counter == self._param_from_updates(cg.updates, 'counter')
assert self._counter_max == self._param_from_updates(cg.updates,
'counter_max')
assert len(updates) == len(bn_ps) + 1, \
'Counter or var missing from update'
return updates
def do(self, which_callback, *args):
"""Write the values of monitored variables to the log."""
assert not which_callback == "after_batch", "Do not monitor each mb"
# Run on train data and get the statistics
if self._bn:
self._counter_max.set_value(np.float32(np.inf))
self.reset_counter()
self._trn_evaluator.evaluate(self.trn_stream)
self.reset_counter()
value_dict = self._tst_evaluator.evaluate(self.tst_stream)
self.add_records(self.main_loop.log, value_dict.items())
class LRDecay(SimpleExtension):
def __init__(self, lr, decay_first, decay_last, **kwargs):
super(LRDecay, self).__init__(**kwargs)
self.iter = 0
self.decay_first = decay_first
self.decay_last = decay_last
self.lr = lr
self.lr_init = lr.get_value()
def do(self, which_callback, *args):
self.iter += 1
if self.iter > self.decay_first:
ratio = 1.0 * (self.decay_last - self.iter)
ratio = np.maximum(0, ratio / (self.decay_last - self.decay_first))
self.lr.set_value(np.float32(ratio * self.lr_init))
logger.info("Iter %d, lr %f" % (self.iter, self.lr.get_value()))
def global_meanpool_2d(x, num_filters):
mean = tensor.mean(x.flatten(3), axis=2)
mean = mean.dimshuffle(0, 1, 'x', 'x')
return mean, (num_filters, 1, 1)
def pool_2d(x, mode="average", ws=(2, 2), stride=(2, 2)):
import theano.sandbox.cuda as cuda
assert cuda.dnn.dnn_available()
return cuda.dnn.dnn_pool(x, ws=ws, stride=stride, mode=mode)
def maxpool_2d(z, in_dim, poolsize, poolstride):
z = max_pool_2d(z, ds=poolsize, st=poolstride)
output_size = tuple(DownsampleFactorMax.out_shape(in_dim, poolsize,
st=poolstride))
return z, output_size
| {
"content_hash": "622a753db75a5574f95ea6675fa4e6bd",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 89,
"avg_line_length": 38.166666666666664,
"alnum_prop": 0.6016397825505748,
"repo_name": "ryukinkou/ladder_customized",
"id": "b1430c418985c3290e9f8658a95e3f8b832968d2",
"size": "11268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ladder_theano_customized/nn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "241895"
}
],
"symlink_target": ""
} |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations(object):
"""JobsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databox.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.JobResourceList"]
"""Lists all the jobs available under the subscription.
:param skip_token: $skipToken is supported on Get list of jobs, which provides the next page in
the list of jobs.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.JobResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('JobResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/jobs'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.JobResourceList"]
"""Lists all the jobs available under the given resource group.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param skip_token: $skipToken is supported on Get list of jobs, which provides the next page in
the list of jobs.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.JobResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('JobResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs'} # type: ignore
def get(
self,
resource_group_name, # type: str
job_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.JobResource"
"""Gets information about the specified job.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:param expand: $expand is supported on details parameter for job, which provides details on the
job stages.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResource, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.JobResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
job_name, # type: str
job_resource, # type: "_models.JobResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.JobResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.JobResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(job_resource, 'JobResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
job_name, # type: str
job_resource, # type: "_models.JobResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.JobResource"]
"""Creates a new job with the specified parameters. Existing job cannot be updated with this API
and should instead be updated with the Update job API.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:param job_resource: Job details from request body.
:type job_resource: ~azure.mgmt.databox.models.JobResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either JobResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databox.models.JobResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
job_name=job_name,
job_resource=job_resource,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('JobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a job.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
job_name=job_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
job_name, # type: str
job_resource_update_parameter, # type: "_models.JobResourceUpdateParameter"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.JobResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.JobResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(job_resource_update_parameter, 'JobResourceUpdateParameter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
job_name, # type: str
job_resource_update_parameter, # type: "_models.JobResourceUpdateParameter"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.JobResource"]
"""Updates the properties of an existing job.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:param job_resource_update_parameter: Job update parameters from request body.
:type job_resource_update_parameter: ~azure.mgmt.databox.models.JobResourceUpdateParameter
:param if_match: Defines the If-Match condition. The patch will be performed only if the ETag
of the job on the server matches this value.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either JobResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databox.models.JobResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
job_name=job_name,
job_resource_update_parameter=job_resource_update_parameter,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('JobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}'} # type: ignore
def book_shipment_pick_up(
self,
resource_group_name, # type: str
job_name, # type: str
shipment_pick_up_request, # type: "_models.ShipmentPickUpRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ShipmentPickUpResponse"
"""Book shipment pick up.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:param shipment_pick_up_request: Details of shipment pick up request.
:type shipment_pick_up_request: ~azure.mgmt.databox.models.ShipmentPickUpRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ShipmentPickUpResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.ShipmentPickUpResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ShipmentPickUpResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.book_shipment_pick_up.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(shipment_pick_up_request, 'ShipmentPickUpRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ShipmentPickUpResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
book_shipment_pick_up.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}/bookShipmentPickUp'} # type: ignore
def cancel(
self,
resource_group_name, # type: str
job_name, # type: str
cancellation_reason, # type: "_models.CancellationReason"
**kwargs # type: Any
):
# type: (...) -> None
"""CancelJob.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:param cancellation_reason: Reason for cancellation.
:type cancellation_reason: ~azure.mgmt.databox.models.CancellationReason
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.cancel.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cancellation_reason, 'CancellationReason')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}/cancel'} # type: ignore
def list_credentials(
self,
resource_group_name, # type: str
job_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UnencryptedCredentialsList"]
"""This method gets the unencrypted secrets related to the job.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param job_name: The name of the job Resource within the specified resource group. job names
must be between 3 and 24 characters in length and use any alphanumeric and underscore only.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UnencryptedCredentialsList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.UnencryptedCredentialsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UnencryptedCredentialsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\.]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UnencryptedCredentialsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/jobs/{jobName}/listCredentials'} # type: ignore
| {
"content_hash": "724269bc42073268c8aaa89ecc6458a6",
"timestamp": "",
"source": "github",
"line_count": 853,
"max_line_length": 192,
"avg_line_length": 49.772567409144195,
"alnum_prop": 0.6319483700772565,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5f8d902f7a40375c86fb7bf558bbe5092ff52ac7",
"size": "42923",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2020_11_01/operations/_jobs_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
math = vtk.vtkMath()
math.RandomSeed(22)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(32)
sphere.SetThetaResolution(32)
extract = vtk.vtkExtractPolyDataPiece()
extract.SetInputConnection(sphere.GetOutputPort())
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(extract.GetOutputPort())
ps = vtk.vtkPieceScalars()
ps.SetInputConnection(normals.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(ps.GetOutputPort())
mapper.SetNumberOfPieces(2)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
sphere2 = vtk.vtkSphereSource()
sphere2.SetPhiResolution(32)
sphere2.SetThetaResolution(32)
extract2 = vtk.vtkExtractPolyDataPiece()
extract2.SetInputConnection(sphere2.GetOutputPort())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(extract2.GetOutputPort())
mapper2.SetNumberOfPieces(2)
mapper2.SetPiece(1)
mapper2.SetScalarRange(0, 4)
mapper2.SetScalarModeToUseCellFieldData()
mapper2.SetColorModeToMapScalars()
mapper2.ColorByArrayComponent("vtkGhostLevels", 0)
mapper2.SetGhostLevel(4)
# check the pipeline size
extract2.UpdateInformation()
psize = vtk.vtkPipelineSize()
if (psize.GetEstimatedSize(extract2, 0, 0) > 100):
print ("ERROR: Pipeline Size increased")
pass
if (psize.GetNumberOfSubPieces(10, mapper2) != 2):
print ("ERROR: Number of sub pieces changed")
pass
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.SetPosition(1.5, 0, 0)
sphere3 = vtk.vtkSphereSource()
sphere3.SetPhiResolution(32)
sphere3.SetThetaResolution(32)
extract3 = vtk.vtkExtractPolyDataPiece()
extract3.SetInputConnection(sphere3.GetOutputPort())
ps3 = vtk.vtkPieceScalars()
ps3.SetInputConnection(extract3.GetOutputPort())
mapper3 = vtk.vtkPolyDataMapper()
mapper3.SetInputConnection(ps3.GetOutputPort())
mapper3.SetNumberOfSubPieces(8)
mapper3.SetScalarRange(0, 8)
actor3 = vtk.vtkActor()
actor3.SetMapper(mapper3)
actor3.SetPosition(0, -1.5, 0)
sphere4 = vtk.vtkSphereSource()
sphere4.SetPhiResolution(32)
sphere4.SetThetaResolution(32)
extract4 = vtk.vtkExtractPolyDataPiece()
extract4.SetInputConnection(sphere4.GetOutputPort())
ps4 = vtk.vtkPieceScalars()
ps4.RandomModeOn()
ps4.SetScalarModeToCellData()
ps4.SetInputConnection(extract4.GetOutputPort())
mapper4 = vtk.vtkPolyDataMapper()
mapper4.SetInputConnection(ps4.GetOutputPort())
mapper4.SetNumberOfSubPieces(8)
mapper4.SetScalarRange(0, 8)
actor4 = vtk.vtkActor()
actor4.SetMapper(mapper4)
actor4.SetPosition(1.5, -1.5, 0)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
ren.AddActor(actor2)
ren.AddActor(actor3)
ren.AddActor(actor4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#iren.Start()
| {
"content_hash": "8c0ad42934fb44cae179776fdad5ae71",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 52,
"avg_line_length": 26.410714285714285,
"alnum_prop": 0.7711291413116971,
"repo_name": "timkrentz/SunTracker",
"id": "1d38c5ed05c8b6e1818c9dd6f274e032e6db7da6",
"size": "2981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/Parallel/Core/Testing/Python/TestPolyDataPieces.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
"""
MoinMoin - MoinMoin.macro.FootNote Tests
@copyright: 2008 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import os
from MoinMoin import macro
from MoinMoin.macro import FootNote
from MoinMoin.Page import Page
from MoinMoin.PageEditor import PageEditor
from MoinMoin._tests import become_trusted, create_page, make_macro, nuke_page
class TestFootNote:
""" testing macro Action calling action raw """
pagename = u'AutoCreatedMoinMoinTemporaryTestPageForFootNote'
def setup_class(self):
become_trusted(self.request)
self.page = create_page(self.request, self.pagename, u"Foo!")
def teardown_class(self):
nuke_page(self.request, self.pagename)
def test_enumbering(self):
""" module_tested: enumbering of Footnotes"""
m = make_macro(self.request, self.page)
text = 'a'
FootNote.execute(m, text)
text = 'b'
FootNote.execute(m, text)
result = FootNote.emit_footnotes(m.request, m.request.formatter)
assert result.endswith('2</a>)</li></ol></div>')
coverage_modules = ['MoinMoin.macro.FootNote']
| {
"content_hash": "e1f73a9e074f6f1653fa7379f869fe45",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 32.69444444444444,
"alnum_prop": 0.6644010195412065,
"repo_name": "Glottotopia/aagd",
"id": "c901fe981993c230d9a0e4084c9ac1fb6edcd9c9",
"size": "1202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/macro/_tests/test_FootNote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
"""
Django settings for cmdb project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*f)1h7v-ed7bajus^ykj0fe5n*#ld57m@4ca=a3!%v%3@o_7p#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'bootstrap_admin',
#'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'device_manage',
'idcroom_manage',
'operation',
#'bootstrap3',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cmdb.urls'
WSGI_APPLICATION = 'cmdb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cmdb',
'USER': 'cmdb',
'PASSWORD': 'cmdb',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from local_settings import *
except ImportError:
pass
| {
"content_hash": "78cc59df1c4ef15795ae489191913714",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 71,
"avg_line_length": 23.31313131313131,
"alnum_prop": 0.699740034662045,
"repo_name": "hilarry/cmdb",
"id": "c6a0c39550100d1b7cb21c43ee4a06e01215fc5d",
"size": "2308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmdb/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18469"
}
],
"symlink_target": ""
} |
"""Collection of tensorflow extention methods
Keep all the tensorflow extention methods in one place
"""
import tensorflow as tf
def conv(inputs, num_filters=32, name='conv'):
"""Convolutional layer
# Args:
inputs: input layer
num_filters: number of kernels/filters to use
name: tf name_scope name
"""
with tf.name_scope(name):
return tf.layers.conv2d(
inputs=inputs,
filters=num_filters,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
def maxpool(inputs, name='maxpool'):
"""Max pool layer
# Args:
inputs: input layer
name: tf name_scope name
"""
with tf.name_scope(name):
return tf.nn.max_pool(inputs, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
| {
"content_hash": "eba55af1c6501165e9b37ae86829a5f1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 96,
"avg_line_length": 23.942857142857143,
"alnum_prop": 0.5871121718377088,
"repo_name": "chasingbob/transfer-learning",
"id": "d500569104d629453035e70a48b324cad3d2b444",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_extensions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23997"
}
],
"symlink_target": ""
} |
import scrapy
class MoviespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class Cili006Item(scrapy.Item):
magnet = scrapy.Field()
ed2k = scrapy.Field()
topic_id = scrapy.Field()
filename = scrapy.Field() | {
"content_hash": "68f5d55abcf30b17c03c73dae45d121e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 48,
"avg_line_length": 21.846153846153847,
"alnum_prop": 0.6690140845070423,
"repo_name": "zhs007/movieSpider",
"id": "deef2da049b0e9a87d1e94de5667653a43a8e955",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviespider/moviespider/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1668"
},
{
"name": "HTML",
"bytes": "5333"
},
{
"name": "JavaScript",
"bytes": "76650"
},
{
"name": "Python",
"bytes": "47739"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script creates a "jumbo" file which merges all incoming files
for compiling.
"""
from __future__ import print_function
import argparse
import cStringIO
import os
def write_jumbo_files(inputs, outputs, written_input_set, written_output_set):
output_count = len(outputs)
input_count = len(inputs)
written_inputs = 0
for output_index, output_file in enumerate(outputs):
written_output_set.add(output_file)
if os.path.isfile(output_file):
with open(output_file, "r") as current:
current_jumbo_file = current.read()
else:
current_jumbo_file = None
out = cStringIO.StringIO()
out.write("/* This is a Jumbo file. Don't edit. */\n\n")
out.write("/* Generated with merge_for_jumbo.py. */\n\n")
input_limit = (output_index + 1) * input_count / output_count
while written_inputs < input_limit:
filename = inputs[written_inputs]
written_inputs += 1
out.write("#include \"%s\"\n" % filename)
written_input_set.add(filename)
new_jumbo_file = out.getvalue()
out.close()
if new_jumbo_file != current_jumbo_file:
with open(output_file, "w") as out:
out.write(new_jumbo_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--outputs", nargs="+", required=True,
help='List of output files to split input into')
parser.add_argument("--file-list", required=True)
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
lines = []
# If written with gn |write_file| each file is on its own line.
with open(args.file_list) as file_list_file:
lines = [line.strip() for line in file_list_file if line.strip()]
# If written with gn |response_file_contents| the files are space separated.
all_inputs = []
for line in lines:
all_inputs.extend(line.split())
written_output_set = set() # Just for double checking
written_input_set = set() # Just for double checking
for language_ext in (".cc", ".c", ".mm"):
if language_ext == ".cc":
ext_pattern = (".cc", ".cpp")
else:
ext_pattern = tuple([language_ext])
outputs = [x for x in args.outputs if x.endswith(ext_pattern)]
inputs = [x for x in all_inputs if x.endswith(ext_pattern)]
if not outputs:
assert not inputs
continue
write_jumbo_files(inputs, outputs, written_input_set, written_output_set)
header_files = set([x for x in all_inputs if x.endswith(".h")])
assert set(args.outputs) == written_output_set, "Did not fill all outputs"
files_not_included = set(all_inputs) - written_input_set - header_files
assert not files_not_included, ("Jumbo build left out files: %s" %
files_not_included)
if args.verbose:
print("Generated %s (%d files) based on %s" % (
str(args.outputs), len(written_input_set), args.file_list))
if __name__ == "__main__":
main()
| {
"content_hash": "172b15be029c72e927be3ad83d237cca",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 33.376344086021504,
"alnum_prop": 0.6530283505154639,
"repo_name": "chrisdickinson/nojs",
"id": "ae435d059c72a525df38af9908fa8d4355449cf5",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/config/merge_for_jumbo.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "52243"
},
{
"name": "JavaScript",
"bytes": "55472"
},
{
"name": "Python",
"bytes": "16760"
}
],
"symlink_target": ""
} |
import sympy as sp
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics.functions import find_dynamicsymbols
from simupy.utils.symbolic import (lambdify_with_vector_args, grad,
DEFAULT_LAMBDIFY_MODULES)
from simupy.array import Array, empty_array
from simupy.systems import DynamicalSystem as DynamicalSystemBase
DEFAULT_CODE_GENERATOR = lambdify_with_vector_args
DEFAULT_CODE_GENERATOR_ARGS = {
'modules': DEFAULT_LAMBDIFY_MODULES
}
class DynamicalSystem(DynamicalSystemBase):
def __init__(self, state_equation=None, state=None, input_=None,
output_equation=None, constants_values={}, dt=0,
initial_condition=None, code_generator=None,
code_generator_args={}):
"""
DynamicalSystem constructor, used to create systems from symbolic
expressions.
Parameters
----------
state_equation : array_like of sympy Expressions, optional
Vector valued expression for the derivative of the state.
state : array_like of sympy symbols, optional
Vector of symbols representing the components of the state, in the
desired order, matching state_equation.
input_ : array_like of sympy symbols, optional
Vector of symbols representing the components of the input, in the
desired order. state_equation may depend on the system input. If
the system has no state, the output_equation may depend on the
system input.
output_equation : array_like of sympy Expressions
Vector valued expression for the output of the system.
constants_values : dict
Dictionary of constants substitutions.
dt : float
Sampling rate of system. Use 0 for continuous time systems.
initial_condition : array_like of numerical values, optional
Array or Matrix used as the initial condition of the system.
Defaults to zeros of the same dimension as the state.
code_generator : callable, optional
Function to be used as code generator.
code_generator_args : dict, optional
Dictionary of keyword args to pass to the code generator.
By default, the code generator uses a wrapper for ``sympy.lambdify``.
You can change it by passing the system initialization arguments
``code_generator`` (the function) and additional keyword arguments to
the generator in a dictionary ``code_generator_args``. You can change
the defaults for future systems by changing the module values. See the
readme or docs for an example.
"""
self.constants_values = constants_values
self.state = state
self.input = input_
self.code_generator = code_generator or DEFAULT_CODE_GENERATOR
code_gen_args_to_set = DEFAULT_CODE_GENERATOR_ARGS.copy()
code_gen_args_to_set.update(code_generator_args)
self.code_generator_args = code_gen_args_to_set
self.state_equation = state_equation
self.output_equation = output_equation
self.initial_condition = initial_condition
self.dt = dt
self.validate()
@property
def state(self):
return self._state
@state.setter
def state(self, state):
if state is None: # or other checks?
state = empty_array()
if isinstance(state, sp.Expr):
state = Array([state])
self.dim_state = len(state)
self._state = state
@property
def input(self):
return self._inputs
@input.setter
def input(self, input_):
if input_ is None: # or other checks?
input_ = empty_array()
if isinstance(input_, sp.Expr): # check it's a single dynamicsymbol?
input_ = Array([input_])
self.dim_input = len(input_)
self._inputs = input_
@property
def state_equation(self):
return self._state_equation
@state_equation.setter
def state_equation(self, state_equation):
if state_equation is None: # or other checks?
state_equation = empty_array()
else:
assert len(state_equation) == len(self.state)
assert find_dynamicsymbols(state_equation) <= (
set(self.state) | set(self.input)
)
assert state_equation.atoms(sp.Symbol) <= (
set(self.constants_values.keys())
| set([dynamicsymbols._t])
)
self._state_equation = state_equation
self.update_state_equation_function()
self.state_jacobian_equation = grad(self.state_equation, self.state)
self.update_state_jacobian_function()
self.input_jacobian_equation = grad(self.state_equation, self.input)
self.update_input_jacobian_function()
@property
def output_equation(self):
return self._output_equation
@output_equation.setter
def output_equation(self, output_equation):
if isinstance(output_equation, sp.Expr):
output_equation = Array([output_equation])
if output_equation is None and self.dim_state == 0:
output_equation = empty_array()
else:
if output_equation is None:
output_equation = self.state
assert output_equation.atoms(sp.Symbol) <= (
set(self.constants_values.keys())
| set([dynamicsymbols._t])
)
if self.dim_state:
assert find_dynamicsymbols(output_equation) <= set(self.state)
else:
assert find_dynamicsymbols(output_equation) <= set(self.input)
self.dim_output = len(output_equation)
self._output_equation = output_equation
self.update_output_equation_function()
def update_state_equation_function(self):
if not self.dim_state or self.state_equation == empty_array():
return
self.state_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state) +
sp.flatten(self.input),
self.state_equation.subs(self.constants_values),
**self.code_generator_args
)
def update_state_jacobian_function(self):
if not self.dim_state or self.state_equation == empty_array():
return
self.state_jacobian_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state) +
sp.flatten(self.input),
self.state_jacobian_equation.subs(self.constants_values),
**self.code_generator_args
)
def update_input_jacobian_function(self):
# TODO: state-less systems should have an input/output jacobian
if not self.dim_state or self.state_equation == empty_array():
return
self.input_jacobian_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state) +
sp.flatten(self.input),
self.input_jacobian_equation.subs(self.constants_values),
**self.code_generator_args
)
def update_output_equation_function(self):
if not self.dim_output or self.output_equation == empty_array():
return
if self.dim_state:
self.output_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.state),
self.output_equation.subs(self.constants_values),
**self.code_generator_args
)
else:
self.output_equation_function = self.code_generator(
[dynamicsymbols._t] + sp.flatten(self.input),
self.output_equation.subs(self.constants_values),
**self.code_generator_args
)
def prepare_to_integrate(self, t0, state_or_input=None):
self.update_output_equation_function()
self.update_state_equation_function()
if not self.dim_state and self.num_events:
self.update_equation_function(t0, state_or_input)
if self.dim_state or self.dim_input:
return self.output_equation_function(t0, state_or_input)
else:
return self.output_equation_function(t0)
def copy(self):
copy = self.__class__(
state_equation=self.state_equation,
state=self.state,
input_=self.input,
output_equation=self.output_equation,
constants_values=self.constants_values,
dt=self.dt
)
copy.output_equation_function = self.output_equation_function
copy.state_equation_function = self.state_equation_function
return copy
def equilibrium_points(self, input_=None):
return sp.solve(self.state_equation, self.state, dict=True)
class MemorylessSystem(DynamicalSystem):
"""
A system with no state.
With no input, can represent a signal (function of time only). For example,
a stochastic signal could interpolate points and use prepare_to_integrate
to re-seed the data.
"""
def __init__(self, input_=None, output_equation=None, **kwargs):
"""
DynamicalSystem constructor
Parameters
----------
input_ : array_like of sympy symbols
Vector of symbols representing the components of the input, in the
desired order. The output may depend on the system input.
output_equation : array_like of sympy Expressions
Vector valued expression for the output of the system.
"""
super().__init__(
input_=input_, output_equation=output_equation, **kwargs)
@property
def state(self):
return self._state
@state.setter
def state(self, state):
if state is None: # or other checks?
state = empty_array()
else:
raise ValueError("Memoryless system should not have state or " +
"state_equation")
self.dim_state = len(state)
self._state = state
| {
"content_hash": "b23992493df4ac8790679a71ccff0442",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 79,
"avg_line_length": 37.84014869888476,
"alnum_prop": 0.616661754592789,
"repo_name": "simupy/simupy",
"id": "2d55180406899a6954621988f3956024817accbb",
"size": "10179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simupy/systems/symbolic.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "140544"
},
{
"name": "TeX",
"bytes": "1003"
}
],
"symlink_target": ""
} |
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> html
u'<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>'
Whitespace behavior:
>>> markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
u'<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>'
>>> markdown.markdown('foo [[ ]] bar', ['wikilinks'])
u'<p>foo bar</p>'
To define custom settings the simple way:
>>> markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
u'<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>'
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> md.convert(text)
u'<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>'
MetaData should not carry over to next document:
>>> md.convert("No [[MetaData]] here.")
u'<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>'
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> md.convert('[[foo]]')
u'<p><a class="wikilink" href="/bar/">foo</a></p>'
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
'''
import markdown
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(markdown.Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([A-Za-z0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.config)
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(markdown.inlinepatterns.Pattern):
def __init__(self, pattern, config):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'][0](label, base_url, end_url)
a = markdown.etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url'][0]
end_url = self.config['end_url'][0]
html_class = self.config['html_class'][0]
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "004208010f201484a18a40c8ae48a828",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 102,
"avg_line_length": 34.4640522875817,
"alnum_prop": 0.551868006827233,
"repo_name": "ronreiter/interactive-tutorials",
"id": "95a4a95e43d22344e1f72b60b918e4e98eded455",
"size": "5296",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "markdown/extensions/wikilinks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "58440"
},
{
"name": "Dockerfile",
"bytes": "304"
},
{
"name": "HTML",
"bytes": "38066"
},
{
"name": "JavaScript",
"bytes": "263446"
},
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "634473"
}
],
"symlink_target": ""
} |
import os
import pickle
import os
from image_processing import batch_preprocess
__author__ = "Ronny Restrepo"
__copyright__ = "Copyright 2017, Ronny Restrepo"
__credits__ = ["Ronny Restrepo"]
__license__ = "Apache License"
__version__ = "2.0"
# MAP LABELS AND IDS
id2label = ['class_0', 'class_1', 'class_2', 'class_3', 'class_4', 'class_5', 'class_6', 'class_7', 'class_8', 'class_9']
label2id = {val:id for id,val in enumerate(id2label)}
# ==============================================================================
# MAYBE_MAKE_DIR
# ==============================================================================
def maybe_make_dir(path):
""" Checks if a directory path exists on the system, if it does not, then
it creates that directory (and any parent directories needed to
create that directory)
"""
if not os.path.exists(path):
os.makedirs(path)
# ==============================================================================
# GET_PARDIR
# ==============================================================================
def get_pardir(file):
""" Given a file path, it returns the parent directory of that file. """
return os.path.dirname(file)
# ==============================================================================
# MAYBE_MAKE_PARDIR
# ==============================================================================
def maybe_make_pardir(file):
""" Takes a path to a file, and creates the necessary directory structure
on the system to ensure that the parent directory exists (if it does
not already exist)
"""
pardir = os.path.dirname(file)
if pardir.strip() != "": # ensure pardir is not an empty string
if not os.path.exists(pardir):
os.makedirs(pardir)
# ==============================================================================
# FILE2STR
# ==============================================================================
def file2str(file):
""" Takes a file path and returns the contents of that file as a string."""
with open(file, "r") as textFile:
return textFile.read()
# ==============================================================================
# STR2FILE
# ==============================================================================
def str2file(s, file, mode="w"):
""" Writes a string to a file"""
# Ensure parent directory and necesary file structure exists
pardir = os.path.dirname(file)
if pardir.strip() != "": # ensure pardir is not an empty string
if not os.path.exists(pardir):
os.makedirs(pardir)
with open(file, mode=mode) as textFile:
textFile.write(s)
# ==============================================================================
# OBJ2PICKLE
# ==============================================================================
def obj2pickle(obj, file, protocol=2):
""" Saves an object as a binary pickle file to the desired file path. """
# Ensure parent directory and necesary file structure exists
pardir = os.path.dirname(file)
if pardir.strip() != "": # ensure pardir is not an empty string
if not os.path.exists(pardir):
os.makedirs(pardir)
with open(file, mode="wb") as fileObj:
pickle.dump(obj, fileObj, protocol=protocol)
# ==============================================================================
# PICKLE2OBJ
# ==============================================================================
def pickle2obj(file):
""" Loads the contents of a pickle as a python object. """
with open(file, mode = "rb") as fileObj:
obj = pickle.load(fileObj)
return obj
# ==============================================================================
# CREATE_DATA_DICT
# ==============================================================================
def create_data_dict(datadir):
assert False, "NOT IMPLEMENTED: create_data_dict not yet created"
# ==============================================================================
# PREPARE_DATA
# ==============================================================================
def prepare_data(data_file, valid_from_train=False, n_valid=1024, max_data=None, verbose=True):
data = pickle2obj(data_file)
# Create validation from train data
if valid_from_train:
data["X_valid"] = data["X_train"][:n_valid]
data["Y_valid"] = data["Y_train"][:n_valid]
data["X_train"] = data["X_train"][n_valid:]
data["Y_train"] = data["Y_train"][n_valid:]
if max_data:
data["X_train"] = data["X_train"][:max_data]
data["Y_train"] = data["Y_train"][:max_data]
if verbose:
# Print information about data
print("DATA SHAPES")
print("- X_valid: ", (data["X_valid"]).shape)
print("- Y_valid: ", (data["Y_valid"]).shape)
print("- X_train: ", (data["X_train"]).shape)
print("- Y_train: ", (data["Y_train"]).shape)
if "X_test" in data:
print("- X_test: ", (data["X_test"]).shape)
if "Y_test" in data:
print("- Y_test: ", (data["Y_test"]).shape)
return data
# ==============================================================================
# LOAD_BATCH_OF_IMAGES
# ==============================================================================
def load_batch_of_images(X_batch, img_shape=[299, 299]):
batch = batch_preprocess(X_batch, shape=img_shape, mode="RGB")
return batch
# ==============================================================================
# CALCULATE_CLASS_WEIGHTS
# ==============================================================================
def calculate_class_weights(Y, n_classes, method="paszke", c=1.02):
""" Given the training data labels Calculates the class weights.
Args:
Y: (numpy array) The training labels as class id integers.
The shape does not matter, as long as each element represents
a class id (ie, NOT one-hot-vectors).
n_classes: (int) Number of possible classes.
method: (str) The type of class weighting to use.
- "paszke" = use the method from from Paszke et al 2016
`1/ln(c + class_probability)`
- "eigen" = use the method from Eigen & Fergus 2014.
`median_freq/class_freq`
where `class_freq` is based only on images that
actually contain that class.
- "eigen2" = Similar to `eigen`, except that class_freq is
based on the frequency of the class in the
entire dataset, not just images where it occurs.
-"logeigen2" = takes the log of "eigen2" method, so that
incredibly rare classes do not completely overpower
other values.
c: (float) Coefficient to use, when using paszke method.
Returns:
weights: (numpy array) Array of shape [n_classes] assigning a
weight value to each class.
References:
Eigen & Fergus 2014: https://arxiv.org/abs/1411.4734
Paszke et al 2016: https://arxiv.org/abs/1606.02147
"""
# CLASS PROBABILITIES - based on empirical observation of data
ids, counts = np.unique(Y, return_counts=True)
n_pixels = Y.size
p_class = np.zeros(n_classes)
p_class[ids] = counts/n_pixels
# CLASS WEIGHTS
if method == "paszke":
weights = 1/np.log(c+p_class)
elif method == "eigen":
assert False, "TODO: Implement eigen method"
# TODO: Implement eigen method
# where class_freq is the number of pixels of class c divided by
# the total number of pixels in images where c is actually present,
# and median freq is the median of these frequencies.
elif method in {"eigen2", "logeigen2"}:
epsilon = 1e-8 # to prevent division by 0
median = np.median(p_class)
weights = median/(p_class+epsilon)
if method == "logeigen2":
weights = np.log(weights+1)
else:
assert False, "Incorrect choice for method"
return weights
| {
"content_hash": "f3e6abfb1613172ae406496102428aa5",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 121,
"avg_line_length": 43.88235294117647,
"alnum_prop": 0.43789097408400357,
"repo_name": "ronrest/convenience_py",
"id": "7c534627e53851519b924a01c2a1630866ccb85f",
"size": "8952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml/tf/workflow_image_classifier/data_processing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "395197"
},
{
"name": "Shell",
"bytes": "22274"
}
],
"symlink_target": ""
} |
from ScalarTransport import *
from ScalarTransportTests import *
from LevelSetTests import *
"""
test RKDG via ScalarTransport interface with quadrature for simple
advection problems
"""
def buildProblems(testFlag=0,
verbose=0):
"""
build data structures necessary for specifying test problems:
testFlag says which one to run?
0 --- LinearAD_DiracIC (1d)
1 --- rotating Cone (2d)
"""
testProblems = []
nd = {}
T = {}
coefficients = {}
getInitialConditions = {}
getDirichletConditions = {}
analyticalSolution = {}
if testFlag == 1:
test = 'RotatingCone2D'
testProblems.append(test)
nd[test]=2
getDirichletConditions[test]=getHomogeneousDBC2D
N=3.0
analyticalSolution[test] = RotatingCone2D(1.0/8.0)
#mwf correct one, but DG has problems for some reason
#coefficients[test]=UnitSquareRotation()
#mwf works better with this, still have no diffusion
coefficients[test]=UnitSquareRotationWithDiffusion(A0=0.0)
T[test]=0.5
getInitialConditions[test] = analyticalSolution[test]
coefficients[test].mass = 'linear'
coefficients[test].advection = 'linear'
#mwf correct coefficients[test].diffusion = None
#mwf correct coefficients[test].potential = None
#mwf worked better
coefficients[test].diffusion = 'constant'
#mwf worked better
coefficients[test].potential = 'linear'
coefficients[test].reaction = None
else:
#1d linear advection-diffusion with dirac initial condition:
#
#u_t + (bu - a u_x)_x = 0; u(0) = 1; u(1) = 0
#
test='LinearAD_DiracIC'
testProblems.append(test)
nd[test]=1
getDirichletConditions[test]=getDBC_hom
#a0=1.0e-4
a0=1.0e-2
#a0=1.0
#a0=0.0
A0=Numeric.array([[a0]])
b0=1.0
#b0=0.0
B0=Numeric.array([b0])
C0=1.0
M0=0.0
coefficients[test] = LinearADR_ConstantCoefficients(M=1.0,A=A0,B=B0,C=0.0)
analyticalSolution[test] = LinearAD_DiracIC(b=B0,a=a0,tStart=0.25)
T[test]=0.1 #0.5
getInitialConditions[test] = analyticalSolution[test]
coefficients[test].mass = 'linear'
coefficients[test].advection = 'linear'
coefficients[test].diffusion = 'constant'
coefficients[test].potential = 'linear'
coefficients[test].reaction = None
#end else on testFlag
#just put these in one dictionary so I know what all
#has to be specified
problems = {}
problems['testProblems'] =testProblems
problems['nd'] =nd
problems['T'] =T
problems['coefficients'] =coefficients
problems['initialConditions'] =getInitialConditions
problems['dirichletConditions']=getDirichletConditions
problems['analyticalSolution'] =analyticalSolution
return problems
#end buildProblems
def buildSimParams(test,TimeIntegrationClass,verbose=0):
"""
define the necessary flags and tolerances for performing test problems
"""
#mwf debug
print 'building simulation details for ',test
computeEigenvalues = False
if computeEigenvalues:
#set flags for eigenvalue computation
linearSolverType= levelLinearSolverType = 'DenseLU'
levelNonlinearSolverType = 'Newton'
nonlinearSolverType = 'NLNI'
else:
linearSolverType = levelLinearSolverType = 'SparseLU'
levelNonlinearSolverType = 'Newton'
nonlinearSolverType = 'Newton'
#end else on evalues
#tolerances
tolFac = 1.0e-4
linTolFac= 1.0e-2
#time stepping control
runCFL = 0.1
#order of approximation (1 unless doing SSPRK)
tOrder = 2
#pick finite element spaces
DG = True #False
if DG:
FemSpace = DG_AffineLinearOnSimplexWithNodalBasis
conservativeFlux = False
numericalFlux = True
stabilization= None
shockCapturing=None
#mwf added
shockCapturingDiffusion = None
quadratureOrder=3
preSmooths = None
postSmooths = None
cycles = None
nLevels=1
if test == 'RotatingCone2D':
nn =31
else:
nn = 51
else:
FemSpace = C0_AffineLinearOnSimplexWithNodalBasis
conservativeFlux = None#'pwc'
numericalFlux = None
stabilization='2'
shockCapturing= None#'1'
shockCapturingDiffusion = 0.15
quadratureOrder=3
preSmooths = 2
postSmooths = 2
cycles = 2
if test == 'RotatingCone2D':
nLevels = 3
else:
nLevels=6 #1d problem
nn=3 #number of nodes on the coarsest mesh
#end if on DG
#collect run parameters
par = {}
par['computeEigenvalues'] =computeEigenvalues
par['linearSolverType'] =linearSolverType
par['levelLinearSolverType'] =levelLinearSolverType
par['levelNonlinearSolverType']=levelNonlinearSolverType
par['nonlinearSolverType'] =nonlinearSolverType
par['tolFac'] =tolFac
par['linTolFac'] =linTolFac
par['runCFL'] =runCFL
par['DG'] =DG
par['FemSpace'] =FemSpace
par['conservativeFlux'] =conservativeFlux
par['numericalFlux'] =numericalFlux
par['stabilization'] =stabilization
par['shockCapturing'] =shockCapturing
par['shockCapturingDiffusion'] =shockCapturingDiffusion
par['quadratureOrder'] =quadratureOrder
par['preSmooths'] =preSmooths
par['postSmooths'] =postSmooths
par['cycles'] =cycles
par['nLevels'] =nLevels
par['nn'] =nn
par['timeIntegration'] =TimeIntegrationClass
par['fullNewtonFlag'] =False
par['timeIntOrder'] =tOrder
#par[''] =
#
return par
#end buildSimParams
def buildQuadrature(test,tpars,problems):
"""
setup numerical quadrature data structures
"""
quadrature = {}
gq = SimplexGaussQuadrature(problems['nd'][test])
gq.setOrder(tpars['quadratureOrder'])
for integral in OneLevelScalarTransport.integralKeys:
quadrature[integral] = gq
#end for
if tpars['stabilization'] is not None:
quadrature['stab'] = gq
if tpars['shockCapturing'] is not None:
quadrature['numDiff'] = gq
elementBoundaryQuadrature={}
ebgq = SimplexGaussQuadrature(problems['nd'][test]-1)
ebgq.setOrder(tpars['quadratureOrder'])
for elementBoundaryIntegral in OneLevelScalarTransport.elementBoundaryIntegralKeys:
elementBoundaryQuadrature[elementBoundaryIntegral] = ebgq
#end boundary quad integral
tpars['quadrature']= quadrature
tpars['elementBoundaryQuadrature']=elementBoundaryQuadrature
return tpars
#end build quadrature
def buildMultilevelMesh(test,tpars,problems):
mlMesh = []
nn = tpars['nn']
nLevels = tpars['nLevels']
if problems['nd'][test]==1:
mlMesh = MultiLevelEdgeMesh(nn,1,1,refinementLevels=nLevels)
elif problems['nd'][test]==2:
mlMesh = MultiLevelTriangularMesh(nn,nn,1,
refinementLevels=nLevels)
elif problems['nd'][test]==3:
mlMesh = MultiLevelTetrahedralMesh(nn,nn,nn,
refinementLevels=nLevels)
#end if on dim
return mlMesh
#end buildMultilevelMesh
def buildMultiLevelScalarTransport(test,tpars,problems,mlMesh):
"""
"""
tolList=[]
linTolList=[]
for l in range(tpars['nLevels']):
mlMesh.meshList[l].computeGeometricInfo()
tolList.append(tpars['tolFac']*(mlMesh.meshList[l].h**2))
linTolList.append(tpars['linTolFac']*(mlMesh.meshList[l].h**2))
#end l
atol = min(tolList)
lin_atol = min(linTolList)
if (tpars['computeEigenvalues'] or
tpars['linearSolverType'] == 'DenseLU'):
MatType = Mat
matType = 'dense'
else:
MatType = SparseMat
matType = 'csr'
#end if
mlScalarTransport = MultiLevelScalarTransport(
problems['nd'][test],
mlMesh,
tpars['FemSpace'],
tpars['FemSpace'],
matType,
problems['dirichletConditions'][test],
problems['coefficients'][test],
tpars['quadrature'],
tpars['elementBoundaryQuadrature'],
tpars['stabilization'],
tpars['shockCapturing'],
tpars['shockCapturingDiffusion'],
tpars['conservativeFlux'],
tpars['numericalFlux'],
tpars['timeIntegration'],
tpars['timeIntOrder'])
tpars['MatType'] =MatType
tpars['atol'] = atol
tpars['lin_atol']= lin_atol
tpars['tolList'] = tolList
tpars['linTolList']= linTolList
return mlScalarTransport,tpars
#end build mlScalarTransport
def buildSolvers(test,tpars,problems,mlScalarTransport,verbose=0):
"""
create linear and nonlinear solvers
"""
#how loud should nonlinear solver be
printNLinfo=False
if verbose > 3:
printNLinfo=True
levelLinearSolver = None
#force linearSolver to be SparseLU
if tpars['linearSolverType'] != 'SparseLU':
print 'WARNING setting linearSolverType to SparseLU'
print 'you need to check MatType to make sure SparseMat'
tpars['linearSolverType'] = 'SparseLU'
#end if
levelLinearSolverList=[]
for l in range(tpars['nLevels']):
levelLinearSolverList.append(
SparseLU(mlScalarTransport.jacobianList[l]))
#end l
levelLinearSolver = levelLinearSolverList
linearSolver = None
#do just plain Newton
linearSolver = levelLinearSolver
for l in range(tpars['nLevels']):
linearSolver[l].printInfo=False
#end l
directSolverFlag=True
#print "Setting up NonlinearSolver"
#for levelnonlinear solver to be Newton
if tpars['levelNonlinearSolverType'] != 'Newton':
print 'WARNING setting levelNonlinearSolverType to Newton'
tpars['levelNonlinearSolverType'] = 'Newton'
#end if
levelNonlinearSolverList=[]
for l in range(tpars['nLevels']):
levelNonlinearSolverList.append(
Newton(linearSolver=linearSolver[l],
F=mlScalarTransport.modelList[l],
J=mlScalarTransport.jacobianList[l],
rtol_r=tpars['tolList'][l],
atol_r=tpars['atol'],
maxIts=500,
convergenceTest = 'r',
printInfo=printNLinfo,
fullNewton=tpars['fullNewtonFlag'],
directSolver=directSolverFlag))
#end for l
#for nonlinear solver to be Newton
if tpars['nonlinearSolverType'] != 'Newton':
print 'WARNING setting nonlinearSolverType to Newton!'
tpars['nonlinearSolverType'] = 'Newton'
#end if
nonlinearSolver = levelNonlinearSolverList
return linearSolver,nonlinearSolver,levelLinearSolver
#end buildSolvers
def computeErrors(eSpace,eSpaceTime,eSpaceLast,
tn,mlScalarTransport,mlMesh,
test,pars,problems,verbose):
"""
go through and calculate errors on mesh hierarchy
"""
eCoarse=1.0
eFine=1.0
hCoarse=1.0
hFine=1.0
analyticalSolution = problems['analyticalSolution']
for m,jac,mesh in zip(mlScalarTransport.modelList,
mlScalarTransport.jacobianList,
mlMesh.meshList):
if analyticalSolution[test] is not None:
eCoarse=eFine
hCoarse=hFine
hFine = mesh.h
eFine = L2errorSFEMvsAF(analyticalSolution[test],
m.q['x'],
m.q['dx_m'],
m.q['u'],tn)
if eSpace.has_key(hFine):
eSpaceLast[hFine] = eSpace[hFine]
if eSpaceTime.has_key(hFine):
eSpaceTime[hFine] +=\
mlScalarTransport.DT*0.5*(eSpaceLast[hFine]**2 + eFine**2)
else:
eSpaceTime[hFine] =\
mlScalarTransport.DT*0.5*(eSpaceLast[hFine]**2 + eFine**2)
#end else on spaceTime
#end if on eSpace
eSpace[hFine] = eFine
#end analytical solution not none
#end for
if analyticalSolution[test] is not None:
hFine = 0
errors='||e||_{2}'
errorsSpaceTime=''
orders='|| ||e||_2 ||_2'
for mesh in mlMesh.meshList:
hCoarse=hFine
hFine = mesh.h
if hCoarse != 0:
if eSpace[hFine] != 0.0 and eSpace[hCoarse] != 0.0:
p = (log(eSpace[hFine]) - log(eSpace[hCoarse]))/(log(hFine) - log(hCoarse))
else:
p=0
else:
p = 0
#end if on hCoarse != 0
errors+="& %4.2e" % eSpace[hFine]
orders+="& %4.2e" % p
if eSpaceTime.has_key(hFine): #mwf added if
errorsSpaceTime+="& %4.2e" % sqrt(eSpaceTime[hFine])
#end for
print errors
print orders
print errorsSpaceTime
#end if analytical solution
return eSpace,eSpaceTime,eSpaceLast
def plotInitial(tn,test,tpars,problems,
mlMesh,mlScalarTransport):
"""
plot initial conditions and analytical solutions
"""
solPlot = None
aSolPlot= None
if tpars['DG'] == False:
solPlot = Gnuplot.Gnuplot()
solPlot("set terminal x11")
aSolPlot = Gnuplot.Gnuplot()
aSolPlot("set terminal x11")
if problems['nd'][test] == 1:
if problems['analyticalSolution'][test] is not None:
solPlot.title(test)
nap=101
dxap=Numeric.array([1.0/(nap - 1.0),0.0,0.0])
P = [(i*dxap) for i in range(nap)]
Px = [x[0] for x in P]
solPlot.plot(Gnuplot.Data(mlMesh.meshList[-1].nodeArray[:,0],
mlScalarTransport.modelList[-1].u.dof,
with='linespoints',
title='numerical solution'),
Gnuplot.Data(Px,
[problems['analyticalSolution'][test].uOfXT(x,tn) for x in P],
with='lines',
title='analytical solution'))
aSolPlot.plot(Gnuplot.Data(Px,
[problems['analyticalSolution'][test].uOfXT(x,tn) for x in P],
with='lines'))
#end if on analytical solution
elif problems['nd'][test]==2:
nx = (tpars['nn']-1)*(2**(tpars['nLevels']-1))+1
ny = nx
x = Numeric.arange(nx)/float(nx-1)
y = Numeric.arange(nx)/float(nx-1)
nSol = Numeric.reshape(mlScalarTransport.modelList[-1].u.dof,
(nx,ny))
solPlot('set parametric')
solPlot('set data style lines')
solPlot('set hidden')
solPlot('set contour base')
solPlot('set cntrparam levels incremental 0.1,0.1,1.0')
solPlot.xlabel('x')
solPlot.ylabel('y')
solPlot.splot(Gnuplot.GridData(nSol,
x,
y,
binary=0,
inline=0))
if problems['analyticalSolution'][test] is not None:
aSol = Numeric.zeros((nx,ny),Numeric.Float)
for i in range(nx):
for j in range(ny):
aSol[i,j]=problems['analyticalSolution'][test].uOfXT(Numeric.array([x[i],y[j],0.0]),tn)
aSolPlot('set parametric')
aSolPlot('set data style lines')
aSolPlot('set hidden')
aSolPlot('set contour base')
aSolPlot('set cntrparam levels incremental 0.1,0.1,1.0')
aSolPlot.xlabel('x')
aSolPlot.ylabel('y')
aSolPlot.splot(Gnuplot.GridData(aSol,
x,
y,
binary=0,
inline=0))
#end if on analytical solution
#end if on nd ==2
#end if on not DG
return solPlot,aSolPlot
def plotTimeStep(solPlot,aSolPlot,tn,test,tpars,problems,
mlMesh,mlScalarTransport,testOut):
"""
plot initial conditions and analytical solutions
"""
if solPlot is None or aSolPlot is None:
return solPlot,aSolPlot
#end nothing to plot with
if tpars['DG'] == False:
if problems['nd'][test] == 1:
if problems['analyticalSolution'][test] is not None:
solPlot.title(testOut)
nap=101
dxap=Numeric.array([1.0/(nap - 1.0),0.0,0.0])
P = [(i*dxap) for i in range(nap)]
Px = [x[0] for x in P]
solPlot.plot(Gnuplot.Data(mlMesh.meshList[-1].nodeArray[:,0],
mlScalarTransport.modelList[-1].u.dof,
with='linespoints',
title='numerical solution'),
Gnuplot.Data(Px,
[problems['analyticalSolution'][test].uOfXT(x,tn) for x in P],
with='lines',
title='analytical solution'))
else:
solPlot.title(testOut)
solPlot.plot(Gnuplot.Data(mlMesh.meshList[-1].nodeArray[:,0],
mlScalarTransport.modelList[-1].u.dof,
with='linespoints',
title='numerical solution'))
#end if on analytical solution
elif problems['nd'][test]==2:
nx = (tpars['nn']-1)*(2**(tpars['nLevels']-1))+1
ny = nx
x = Numeric.arange(nx)/float(nx-1)
y = Numeric.arange(nx)/float(nx-1)
nSol = Numeric.reshape(mlScalarTransport.modelList[-1].u.dof,
(nx,ny))
solPlot('set parametric')
solPlot('set data style lines')
solPlot('set hidden')
solPlot('set contour base')
solPlot('set cntrparam levels incremental 0.1,0.1,1.0')
solPlot.xlabel('x')
solPlot.ylabel('y')
solPlot.splot(Gnuplot.GridData(nSol,
x,
y,
binary=0,
inline=0))
if problems['analyticalSolution'][test] is not None:
aSol = Numeric.zeros((nx,ny),Numeric.Float)
for i in range(nx):
for j in range(ny):
aSol[i,j]=problems['analyticalSolution'][test].uOfXT(Numeric.array([x[i],y[j],0.0]),tn)
#end j
#end i
aSolPlot('set parametric')
aSolPlot('set data style lines')
aSolPlot('set hidden')
aSolPlot('set contour base')
aSolPlot('set cntrparam levels incremental 0.1,0.1,1.0')
aSolPlot.xlabel('x')
aSolPlot.ylabel('y')
aSolPlot.splot(Gnuplot.GridData(aSol,
x,
y,
binary=0,
inline=0))
#end if on analytical solution
#end if on nd ==2
#end if on not DG
return solPlot,aSolPlot
def plotFinal(solPlot,aSolPlot,tn,test,tpars,problems,
mlMesh,mlScalarTransport,testOut):
"""
plot out solution and mesh at last step in a couple of formats
"""
if tpars['DG'] == False:
solPlot.hardcopy(testOut+'_sol.eps', eps=1,enhanced=1,color=1)
aSolPlot.hardcopy(testOut+'_asol.eps', eps=1,enhanced=1,color=1)
#end if
mlMesh.meshList[-1].writeMeshEnsight(test,test)
mlScalarTransport.modelList[-1].u.name='u'
#mlScalarTransport.modelList[-1].writeBoundaryTermsEnsight(test)
mlScalarTransport.modelList[-1].u.writeFunctionEnsight(test,append=False)
return solPlot,aSolPlot
if __name__ == '__main__':
import sys
import numpy
from ScalarTransport import *
from LinearSolvers import *
from TimeIntegrationTools import *
verbose = 5
#testFlag = 0 # LinearAD_Dirac_IC
testFlag = 1 # rotating clone
problems = buildProblems(testFlag,verbose)
test = problems['testProblems'][0] #first test I hope
#pars = buildSimParams(test,BackwardEuler)
#pars = buildSimParams(test,ForwardEuler)
pars = buildSimParams(test,SSPRKintegration)
pars = buildQuadrature(test,pars,problems)
mlMesh = buildMultilevelMesh(test,pars,problems)
mlScalarTransport,pars = buildMultiLevelScalarTransport(test,pars,
problems,
mlMesh)
linearSolver,nonlinearSolver,levelLinearSolver = \
buildSolvers(test,pars,problems,mlScalarTransport,verbose=verbose)
#start time loop?
nstages= pars['timeIntOrder']
tn = 0.0
nSteps = 0
maxSteps= 1000
eSpace={}
eSpaceTime={}
eSpaceLast={}
mlScalarTransport.setInitialConditions(problems['initialConditions'][test],
tn)
#end ic set
solPlot,aSolPlot = plotInitial(tn,test,pars,problems,
mlMesh,mlScalarTransport)
mlScalarTransport.modelList[-1].timeIntegration.runCFL= pars['runCFL']
done = False
while not done:
mlScalarTransport.chooseDT()
dtMin = min(problems['T'][test]-tn,mlScalarTransport.DT)
mlScalarTransport.chooseDT(DTSET=dtMin)
if nSteps == 0:
mlScalarTransport.initializeTimeIntegration()
mlScalarTransport.initializeTimeIntegration()
#end if
tn += mlScalarTransport.DT
print 'taking step to t= ',tn
nSteps += 1
testOut = test + ('%4.4i' % nSteps)
#only Newton iteration for now
if pars['nonlinearSolverType'] != 'Newton':
print 'nonlinearSolverType must be Newton'
sys.exit(1)
#end if
#loop through stages
for s in range(nstages):
for l in range(pars['nLevels']):
mlScalarTransport.modelList[l].getResidual(u =
mlScalarTransport.uList[l],
r =
mlScalarTransport.rList[l])
nonlinearSolver[l].solve(u = mlScalarTransport.uList[l],
r = mlScalarTransport.rList[l])
#end l loop
mlScalarTransport.updateStage()
#end s loop
print 'max u on fine= ',max(mlScalarTransport.modelList[-1].u.dof.flat)
print 'min u on fine= ',min(mlScalarTransport.modelList[-1].u.dof.flat)
mlScalarTransport.modelList[-1].u.name = test
mlScalarTransport.modelList[-1].u.writeFunctionGnuplot(test,
append=False)
if pars['conservativeFlux'] == 'pwc':
mlScalarTransport.modelList[-1].getConservationFluxPWC()
elif pars['conservativeFlux'] == 'pwl':
mlScalarTransport.modelList[-1].getConservationFluxPWL()
elif pars['numericalFlux'] is not None:
mlScalarTransport.modelList[-1].e['conservationResidual'].flat[:]=0.0
for eN in range(mlScalarTransport.modelList[-1].mesh.nElements_global):
for i in range(mlScalarTransport.modelList[-1].nDOF_element):
mlScalarTransport.modelList[-1].e['conservationResidual'][eN]+=mlScalarTransport.modelList[-1].elementResidual[eN,i]
#end for eN
#print 'consRes=',mlScalarTransport.modelList[-1].e['conservationResidual']
print "Max mass cons error "+`max(abs(mlScalarTransport.modelList[-1].e['conservationResidual']))`
#end numerical flux is not None
mlScalarTransport.updateTimeHistory()
solPlot,aSolPlot = plotTimeStep(solPlot,aSolPlot,tn,test,pars,problems,
mlMesh,mlScalarTransport,testOut)
#compute error
eSpace,eSpaceTime,eSpaceLast = computeErrors(
eSpace,eSpaceTime,eSpaceLast,
tn,mlScalarTransport,mlMesh,test,pars,problems,verbose)
#figure out if done or not
done = (abs(tn - problems['T'][test]) < 1.0e-10
or nSteps >= maxSteps)
#end while
solPlot,aSolPlot = plotFinal(solPlot,aSolPlot,tn,test,pars,problems,
mlMesh,mlScalarTransport,testOut)
| {
"content_hash": "81316e2018b1e422c85b3ae10355d7e2",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 136,
"avg_line_length": 36.78,
"alnum_prop": 0.5539112871902432,
"repo_name": "erdc/proteus",
"id": "1cb9da2f0bfc740d9dae79053fa5088ff56431b4",
"size": "25769",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proteus/tests/sandbox/testRKDG.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2790"
},
{
"name": "Asymptote",
"bytes": "1569"
},
{
"name": "C",
"bytes": "2827957"
},
{
"name": "C++",
"bytes": "7262408"
},
{
"name": "Cython",
"bytes": "154607"
},
{
"name": "Dockerfile",
"bytes": "2738"
},
{
"name": "Fortran",
"bytes": "51671"
},
{
"name": "Jupyter Notebook",
"bytes": "33357"
},
{
"name": "Makefile",
"bytes": "19043"
},
{
"name": "Python",
"bytes": "12534530"
},
{
"name": "Roff",
"bytes": "322"
},
{
"name": "Shell",
"bytes": "14084"
}
],
"symlink_target": ""
} |
from requests import HTTPError # noqa
| {
"content_hash": "328d1fcdebb64d6621359a04659a593e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.7948717948717948,
"repo_name": "globocom/pluct",
"id": "8cb440ee6a78578a550a4bacd17f504226ccacb0",
"size": "64",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pluct/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "44441"
}
],
"symlink_target": ""
} |
import logging
from dplace_app.models import Language, ISOCode, LanguageFamily, Society
def load_languages(repos):
languoids = {
l.id: l for l in repos.read_csv('csv', 'glottolog.csv', namedtuples=True)}
families, languages, isocodes = {}, {}, {}
societies = {s.ext_id: s for s in Society.objects.all()}
count = 0
for ds in repos.datasets:
for soc in ds.societies:
ldata = languoids.get(soc.glottocode)
if not ldata: # pragma: no cover
logging.warning("No language found for %s, skipping" % soc.glottocode)
continue
soc = societies[soc.id]
soc.language = load_language(ldata, languages, families, isocodes)
soc.save()
count += 1
return count
def load_language(ldata, languages, families, isocodes):
# get or create the language family:
# Note: If the related languoid is an isolate or a top-level family, we create a
# LanguageFamily object with the data of the languoid.
family_id = ldata.family_id or ldata.id
family = families.get(family_id)
if not family:
family_name = ldata.family_name or ldata.name
family = LanguageFamily.objects.create(name=family_name)
family.save()
families[family_id] = family
# get or create the language:
language = languages.get(ldata.id)
if not language:
language = Language.objects.create(name=ldata.name, glotto_code=ldata.id)
language.family = family
if ldata.iso_code:
isocode = isocodes.get(ldata.iso_code)
if not isocode:
isocode = ISOCode.objects.create(iso_code=ldata.iso_code)
isocodes[ldata.iso_code] = isocode
language.iso_code = isocode
language.save()
languages[ldata.id] = language
return language
| {
"content_hash": "c8373bbecf069eed61e486fc96007953",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 86,
"avg_line_length": 35.45283018867924,
"alnum_prop": 0.6253326237360298,
"repo_name": "stefelisabeth/dplace",
"id": "23ca68370f8dd666767cdad9f43696ff08c5e397",
"size": "1903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dplace_app/loader/glottocode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10863"
},
{
"name": "HTML",
"bytes": "149761"
},
{
"name": "JavaScript",
"bytes": "156539"
},
{
"name": "Makefile",
"bytes": "189"
},
{
"name": "Python",
"bytes": "124392"
},
{
"name": "Shell",
"bytes": "826"
}
],
"symlink_target": ""
} |
"""
Validate ODC dataset documents
"""
import collections
import enum
import math
import multiprocessing
import os
import sys
from datetime import datetime
from functools import partial
from pathlib import Path
from textwrap import indent
from typing import (
Counter,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from urllib.parse import urljoin, urlparse
from urllib.request import urlopen
import attr
import ciso8601
import click
import numpy as np
import rasterio
import toolz
from boltons.iterutils import get_path
from click import echo, secho, style
from datacube import Datacube
from datacube.index.eo3 import prep_eo3
from datacube.utils import InvalidDocException, changes, is_url, read_documents
from datacube.utils.documents import load_documents
from rasterio import DatasetReader
from rasterio.crs import CRS
from rasterio.errors import CRSError
from shapely.validation import explain_validity
from eodatasets3 import model, serialise, utils
from eodatasets3.model import DatasetDoc
from eodatasets3.ui import bool_style, is_absolute, uri_resolve
from eodatasets3.utils import EO3_SCHEMA, default_utc
class Level(enum.Enum):
info = 1
warning = 2
error = 3
class DocKind(enum.Enum):
# EO3 datacube dataset.
dataset = 1
# Datacube product
product = 2
# Datacube Metadata Type
metadata_type = 3
# Stac Item
stac_item = 4
# Legacy datacube ("eo1") dataset
legacy_dataset = 5
# Legacy product config for ingester
ingestion_config = 6
# What kind of document each suffix represents.
# (full suffix will also have a doc type: .yaml, .json, .yaml.gz etc)
# Example: "my-test-dataset.odc-metadata.yaml"
SUFFIX_KINDS = {
".odc-metadata": DocKind.dataset,
".odc-product": DocKind.product,
".odc-type": DocKind.metadata_type,
}
# Inverse of above
DOC_TYPE_SUFFIXES = {v: k for k, v in SUFFIX_KINDS.items()}
def filename_doc_kind(path: Union[str, Path]) -> Optional["DocKind"]:
"""
Get the expected file type for the given filename.
Returns None if it does not follow any naming conventions.
>>> filename_doc_kind('LC8_2014.odc-metadata.yaml').name
'dataset'
>>> filename_doc_kind('/tmp/something/water_bodies.odc-metadata.yaml.gz').name
'dataset'
>>> filename_doc_kind(Path('/tmp/something/ls8_fc.odc-product.yaml')).name
'product'
>>> filename_doc_kind(Path('/tmp/something/ls8_wo.odc-product.json.gz')).name
'product'
>>> filename_doc_kind(Path('/tmp/something/eo3_gqa.odc-type.yaml')).name
'metadata_type'
>>> filename_doc_kind(Path('/tmp/something/some_other_file.yaml'))
"""
for suffix in reversed(Path(path).suffixes):
suffix = suffix.lower()
if suffix in SUFFIX_KINDS:
return SUFFIX_KINDS[suffix]
return None
def guess_kind_from_contents(doc: Dict):
"""
What sort of document do the contents look like?
"""
if "$schema" in doc and doc["$schema"] == EO3_SCHEMA:
return DocKind.dataset
if "metadata_type" in doc:
if "source_type" in doc:
return DocKind.ingestion_config
return DocKind.product
if ("dataset" in doc) and ("search_fields" in doc["dataset"]):
return DocKind.metadata_type
if "id" in doc:
if ("lineage" in doc) and ("platform" in doc):
return DocKind.legacy_dataset
if ("properties" in doc) and ("datetime" in doc["properties"]):
return DocKind.stac_item
return None
@attr.s(auto_attribs=True, frozen=True)
class ValidationMessage:
level: Level
code: str
reason: str
hint: str = None
def __str__(self) -> str:
hint = ""
if self.hint:
hint = f" (Hint: {self.hint})"
return f"{self.code}: {self.reason}{hint}"
def _info(code: str, reason: str, hint: str = None):
return ValidationMessage(Level.info, code, reason, hint=hint)
def _warning(code: str, reason: str, hint: str = None):
return ValidationMessage(Level.warning, code, reason, hint=hint)
def _error(code: str, reason: str, hint: str = None):
return ValidationMessage(Level.error, code, reason, hint=hint)
ValidationMessages = Generator[ValidationMessage, None, None]
def validate_dataset(
doc: Dict,
product_definition: Optional[Dict] = None,
metadata_type_definition: Optional[Dict] = None,
thorough: bool = False,
readable_location: Union[str, Path] = None,
expect_extra_measurements: bool = False,
expect_geometry: bool = True,
nullable_fields: Iterable[str] = ("label",),
) -> ValidationMessages:
"""
Validate a a dataset document, optionally against the given product.
By default this will only look at the metadata, run with thorough=True to
open the data files too.
:param product_definition: Optionally check that the dataset matches this product definition.
:param thorough: Open the imagery too, to check that data types etc match.
:param readable_location: Dataset location to use, if not the metadata path.
:param expect_extra_measurements:
Allow some dataset measurements to be missing from the product definition.
This is (deliberately) allowed by ODC, but often a mistake.
This flag disables the warning.
"""
schema = doc.get("$schema")
if schema is None:
yield _error(
"no_schema",
f"No $schema field. "
f"You probably want an ODC dataset schema {model.ODC_DATASET_SCHEMA_URL!r}",
)
return
if schema != model.ODC_DATASET_SCHEMA_URL:
yield _error(
"unknown_doc_type",
f"Unknown doc schema {schema!r}. Only ODC datasets are supported ({model.ODC_DATASET_SCHEMA_URL!r})",
)
return
has_doc_errors = False
for error in serialise.DATASET_SCHEMA.iter_errors(doc):
has_doc_errors = True
displayable_path = ".".join(error.absolute_path)
hint = None
if displayable_path == "crs" and "not of type" in error.message:
hint = "epsg codes should be prefixed with 'epsg:1234'"
context = f"({displayable_path}) " if displayable_path else ""
yield _error("structure", f"{context}{error.message} ", hint=hint)
if has_doc_errors:
return
dataset = serialise.from_doc(doc, skip_validation=True)
if not dataset.product.href:
_info("product_href", "A url (href) is recommended for products")
yield from _validate_geo(dataset, expect_geometry=expect_geometry)
# Note that a dataset may have no measurements (eg. telemetry data).
# (TODO: a stricter mode for when we know we should have geo and measurement info)
if dataset.measurements:
for name, measurement in dataset.measurements.items():
grid_name = measurement.grid
if grid_name != "default" or dataset.grids:
if grid_name not in dataset.grids:
yield _error(
"invalid_grid_ref",
f"Measurement {name!r} refers to unknown grid {grid_name!r}",
)
if is_absolute(measurement.path):
yield _warning(
"absolute_path",
f"measurement {name!r} has an absolute path: {measurement.path!r}",
)
yield from _validate_stac_properties(dataset)
required_measurements: Dict[str, ExpectedMeasurement] = {}
if product_definition is not None:
required_measurements.update(
{
m.name: m
for m in map(
ExpectedMeasurement.from_definition,
product_definition.get("measurements") or (),
)
}
)
product_name = product_definition.get("name")
if product_name != dataset.product.name:
# This is only informational as it's possible products may be indexed with finer-grained
# categories than the original datasets: eg. a separate "nrt" product, or test product.
yield _info(
"product_mismatch",
f"Dataset product name {dataset.product.name!r} "
f"does not match the given product ({product_name!r}",
)
for name in required_measurements:
if name not in dataset.measurements.keys():
yield _error(
"missing_measurement",
f"Product {product_name} expects a measurement {name!r})",
)
measurements_not_in_product = set(dataset.measurements.keys()).difference(
{m["name"] for m in product_definition.get("measurements") or ()}
)
if (not expect_extra_measurements) and measurements_not_in_product:
things = ", ".join(sorted(measurements_not_in_product))
yield _warning(
"extra_measurements",
f"Dataset has measurements not present in product definition for {product_name!r}: {things}",
hint="This may be valid, as it's allowed by ODC. Set `expect_extra_measurements` to mute this.",
)
if metadata_type_definition:
# Datacube does certain transforms on an eo3 doc before storage.
# We need to do the same, as the fields will be read from the storage.
prepared_doc = prep_eo3(doc)
for field_name, offsets in _get_field_offsets(
metadata_type=metadata_type_definition
):
if not any(_has_offset(prepared_doc, offset) for offset in offsets):
readable_offsets = " or ".join("->".join(offset) for offset in offsets)
yield _warning(
"missing_field",
f"Dataset is missing field {field_name!r}",
hint=f"Expected at {readable_offsets}",
)
continue
if field_name not in nullable_fields:
value = None
for offset in offsets:
value = toolz.get_in(offset, prepared_doc)
if value is None:
yield _info(
"null_field",
f"Value is null for configured field {field_name!r}",
)
dataset_location = dataset.locations[0] if dataset.locations else readable_location
# If we have a location:
# For each measurement, try to load it.
# If loadable:
if thorough:
for name, measurement in dataset.measurements.items():
full_path = uri_resolve(dataset_location, measurement.path)
expected_measurement = required_measurements.get(name)
band = measurement.band or 1
with rasterio.open(full_path) as ds:
ds: DatasetReader
if band not in ds.indexes:
yield _error(
"incorrect_band",
f"Measurement {name!r} file contains no rio index {band!r}.",
hint=f"contains indexes {ds.indexes!r}",
)
continue
if not expected_measurement:
# The measurement is not in the product definition
#
# This is only informational because a product doesn't have to define all
# measurements that the datasets contain.
#
# This is historically because dataset documents reflect the measurements that
# are stored on disk, which can differ. But products define the set of measurments
# that are mandatory in every dataset.
#
# (datasets differ when, for example, sensors go offline, or when there's on-disk
# measurements like panchromatic that GA doesn't want in their product definitions)
if required_measurements:
yield _info(
"unspecified_measurement",
f"Measurement {name} is not in the product",
)
else:
expected_dtype = expected_measurement.dtype
band_dtype = ds.dtypes[band - 1]
# TODO: NaN handling
if expected_dtype != band_dtype:
yield _error(
"different_dtype",
f"{name} dtype: "
f"product {expected_dtype!r} != dataset {band_dtype!r}",
)
ds_nodata = ds.nodatavals[band - 1]
# If the dataset is missing 'nodata', we can allow anything in product 'nodata'.
# (In ODC, nodata might be a fill value for loading data.)
if ds_nodata is None:
continue
# Otherwise check that nodata matches.
expected_nodata = expected_measurement.nodata
if expected_nodata != ds_nodata and not (
_is_nan(expected_nodata) and _is_nan(ds_nodata)
):
yield _error(
"different_nodata",
f"{name} nodata: "
f"product {expected_nodata !r} != dataset {ds_nodata !r}",
)
def _has_offset(doc: Dict, offset: List[str]) -> bool:
"""
Is the given offset present in the document?
"""
for key in offset:
if key not in doc:
return False
doc = doc[key]
return True
def validate_product(doc: Dict) -> ValidationMessages:
"""
Check for common product mistakes
"""
# Validate it against ODC's product schema.
has_doc_errors = False
for error in serialise.PRODUCT_SCHEMA.iter_errors(doc):
has_doc_errors = True
displayable_path = ".".join(map(str, error.absolute_path))
context = f"({displayable_path}) " if displayable_path else ""
yield _error("document_schema", f"{context}{error.message} ")
# The jsonschema error message for this (common error) is garbage. Make it clearer.
measurements = doc.get("measurements")
if (measurements is not None) and not isinstance(measurements, Sequence):
yield _error(
"measurements_list",
f"Product measurements should be a list/sequence "
f"(Found a {type(measurements).__name__!r}).",
)
# There's no point checking further if the core doc structure is wrong.
if has_doc_errors:
return
if not doc.get("license", "").strip():
yield _warning(
"no_license",
f"Product {doc['name']!r} has no license field",
hint='Eg. "CC-BY-4.0" (SPDX format), "various" or "proprietary"',
)
# Check measurement name clashes etc.
if measurements is None:
# Products don't have to have measurements. (eg. provenance-only products)
...
else:
seen_names_and_aliases = collections.defaultdict(list)
for measurement in measurements:
measurement_name = measurement.get("name")
dtype = measurement.get("dtype")
nodata = measurement.get("nodata")
if not numpy_value_fits_dtype(nodata, dtype):
yield _error(
"unsuitable_nodata",
f"Measurement {measurement_name!r} nodata {nodata!r} does not fit a {dtype!r}",
)
# Were any of the names seen in other measurements?
these_names = measurement_name, *measurement.get("aliases", ())
for new_field_name in these_names:
measurements_with_this_name = seen_names_and_aliases[new_field_name]
if measurements_with_this_name:
seen_in = " and ".join(
repr(s)
for s in ([measurement_name] + measurements_with_this_name)
)
# If the same name is used by different measurements, its a hard error.
yield _error(
"duplicate_measurement_name",
f"Name {new_field_name!r} is used by multiple measurements",
hint=f"It's duplicated in an alias. "
f"Seen in measurement(s) {seen_in}",
)
# Are any names duplicated within the one measurement? (not an error, but info)
for duplicate_name in _find_duplicates(these_names):
yield _info(
"duplicate_alias_name",
f"Measurement {measurement_name!r} has a duplicate alias named {duplicate_name!r}",
)
for field in these_names:
seen_names_and_aliases[field].append(measurement_name)
def validate_metadata_type(doc: Dict) -> ValidationMessages:
"""
Check for common metadata-type mistakes
"""
# Validate it against ODC's schema (there will be refused by ODC otherwise)
for error in serialise.METADATA_TYPE_SCHEMA.iter_errors(doc):
displayable_path = ".".join(map(str, error.absolute_path))
context = f"({displayable_path}) " if displayable_path else ""
yield _error("document_schema", f"{context}{error.message} ")
def _find_duplicates(values: Iterable[str]) -> Generator[str, None, None]:
"""Return any duplicate values in the given sequence
>>> list(_find_duplicates(('a', 'b', 'c')))
[]
>>> list(_find_duplicates(('a', 'b', 'b')))
['b']
>>> list(_find_duplicates(('a', 'b', 'b', 'a')))
['a', 'b']
"""
previous = None
for v in sorted(values):
if v == previous:
yield v
previous = v
def numpy_value_fits_dtype(value, dtype):
"""
Can the value be exactly represented by the given numpy dtype?
>>> numpy_value_fits_dtype(3, 'uint8')
True
>>> numpy_value_fits_dtype(3, np.dtype('uint8'))
True
>>> numpy_value_fits_dtype(-3, 'uint8')
False
>>> numpy_value_fits_dtype(3.5, 'float32')
True
>>> numpy_value_fits_dtype(3.5, 'int16')
False
>>> numpy_value_fits_dtype(float('NaN'), 'float32')
True
>>> numpy_value_fits_dtype(float('NaN'), 'int32')
False
"""
dtype = np.dtype(dtype)
if value is None:
value = 0
if _is_nan(value):
return np.issubdtype(dtype, np.floating)
else:
return np.all(np.array([value], dtype=dtype) == [value])
@attr.s(auto_attribs=True)
class ExpectedMeasurement:
name: str
dtype: str
nodata: int
@classmethod
def from_definition(cls, doc: Dict):
return ExpectedMeasurement(doc["name"], doc.get("dtype"), doc.get("nodata"))
# Name of a field and its possible offsets in the document.
FieldNameOffsetS = Tuple[str, Set[List[str]]]
def validate_paths(
paths: List[str],
thorough: bool = False,
expect_extra_measurements: bool = False,
product_definitions: Dict[str, Dict] = None,
metadata_type_definitions: Dict[str, Dict] = None,
) -> Generator[Tuple[str, List[ValidationMessage]], None, None]:
"""Validate the list of paths. Product documents can be specified before their datasets."""
products = dict(product_definitions or {})
metadata_types = dict(metadata_type_definitions or {})
for url, doc, was_specified_by_user in read_paths(paths):
messages = []
kind = filename_doc_kind(url)
if kind is None:
kind = guess_kind_from_contents(doc)
if kind and (kind in DOC_TYPE_SUFFIXES):
# It looks like an ODC doc but doesn't have the standard suffix.
messages.append(
_warning(
"missing_suffix",
f"Document looks like a {kind.name} but does not have "
f'filename extension "{DOC_TYPE_SUFFIXES[kind]}{_readable_doc_extension(url)}"',
)
)
if kind == DocKind.product:
messages.extend(validate_product(doc))
if "name" in doc:
products[doc["name"]] = doc
elif kind == DocKind.dataset:
messages.extend(
validate_eo3_doc(
doc,
url,
products,
metadata_types,
thorough,
expect_extra_measurements,
)
)
elif kind == DocKind.metadata_type:
messages.extend(validate_metadata_type(doc))
if "name" in doc:
metadata_types[doc["name"]] = doc
# Otherwise it's a file we don't support.
# If the user gave us the path explicitly, it seems to be an error.
# (if they didn't -- it was found via scanning directories -- we don't care.)
elif was_specified_by_user:
if kind is None:
raise ValueError(f"Unknown document type for {url}")
else:
raise NotImplementedError(
f"Cannot currently validate {kind.name} files"
)
else:
# Not a doc type we recognise, and the user didn't specify it. Skip it.
continue
yield url, messages
def _get_field_offsets(metadata_type: Dict) -> Iterable[FieldNameOffsetS]:
"""
Yield all fields and their possible document-offsets that are expected for this metadata type.
Eg, if the metadata type has a region_code field expected properties->region_code, this
will yield ('region_code', {['properties', 'region_code']})
(Properties can have multiple offsets, where ODC will choose the first non-null one, hence the
return of multiple offsets for each field.)
"""
dataset_section = metadata_type["dataset"]
search_fields = dataset_section["search_fields"]
# The fixed fields of ODC. 'id', 'label', etc.
for field in dataset_section:
if field == "search_fields":
continue
offset = dataset_section[field]
if offset is not None:
yield field, [offset]
# The configurable search fields.
for field, spec in search_fields.items():
offsets = []
if "offset" in spec:
offsets.append(spec["offset"])
offsets.extend(spec.get("min_offset", []))
offsets.extend(spec.get("max_offset", []))
yield field, offsets
def _readable_doc_extension(uri: str):
"""
>>> _readable_doc_extension('something.json.gz')
'.json.gz'
>>> _readable_doc_extension('something.yaml')
'.yaml'
>>> _readable_doc_extension('apple.odc-metadata.yaml.gz')
'.yaml.gz'
>>> _readable_doc_extension('products/tmad/tmad_product.yaml#part=1')
'.yaml'
>>> _readable_doc_extension('/tmp/human.06.tall.yml')
'.yml'
>>> # Not a doc, even though it's compressed.
>>> _readable_doc_extension('db_dump.gz')
>>> _readable_doc_extension('/tmp/nothing')
"""
path = urlparse(uri).path
compression_formats = (".gz",)
doc_formats = (
".yaml",
".yml",
".json",
)
suffix = "".join(
s.lower()
for s in Path(path).suffixes
if s.lower() in doc_formats + compression_formats
)
# If it's only compression, no doc format, it's not valid.
if suffix in compression_formats:
return None
return suffix or None
def read_paths(
input_paths: Iterable[str],
) -> Generator[Tuple[str, Union[Dict, str], bool], None, None]:
"""
Read the given input paths, returning a URL, document, and whether
it was explicitly given by the user.
When a local directory is specified, inner readable docs are returned, but will
be marked as not explicitly specified.
"""
for input_ in input_paths:
for uri, was_specified in expand_paths_as_uris([input_]):
try:
for full_uri, doc in read_documents(uri, uri=True):
yield full_uri, doc, was_specified
except InvalidDocException as e:
if was_specified:
raise
else:
echo(e, err=True)
def expand_paths_as_uris(
input_paths: Iterable[str],
) -> Generator[Tuple[Path, bool], None, None]:
"""
For any paths that are directories, find inner documents that are known.
Returns Tuples: path as a URL, and whether it was specified explicitly by user.
"""
for input_ in input_paths:
if is_url(input_):
yield input_, True
else:
path = Path(input_).resolve()
if path.is_dir():
for found_path in path.rglob("*"):
if _readable_doc_extension(found_path.as_uri()) is not None:
yield found_path.as_uri(), False
else:
yield path.as_uri(), True
def validate_eo3_doc(
doc: Dict,
location: Union[str, Path],
products: Dict[str, Dict],
metadata_types: Dict[str, Dict],
thorough: bool = False,
expect_extra_measurements=False,
) -> List[ValidationMessage]:
messages = []
# TODO: follow ODC's match rules?
matched_product = None
if products:
matched_product, messages = _match_product(doc, products)
else:
messages.append(
ValidationMessage(
Level.error if thorough else Level.info,
"no_product",
"No product provided: validating dataset information alone",
)
)
metadata_type = None
if metadata_types and matched_product:
metadata_type = matched_product["metadata_type"]
if metadata_type not in metadata_types:
messages.append(
ValidationMessage(
Level.error if thorough else Level.info,
"no_metadata_type",
f"Metadata type not provided {metadata_type}: not validating fields",
)
)
messages.extend(
validate_dataset(
doc,
product_definition=matched_product,
readable_location=location,
thorough=thorough,
metadata_type_definition=metadata_types.get(metadata_type),
expect_extra_measurements=expect_extra_measurements,
)
)
return messages
def _get_printable_differences(dict1: Dict, dict2: Dict):
"""
Get a series of lines to print that show the reason that dict1 is not a superset of dict2
"""
dict1 = dict(utils.flatten_dict(dict1))
dict2 = dict(utils.flatten_dict(dict2))
for path in dict2.keys():
v1, v2 = dict1.get(path), dict2.get(path)
if v1 != v2:
yield f"{path}: {v1!r} != {v2!r}"
def _get_product_mismatch_reasons(dataset_doc: Dict, product_definition: Dict):
"""
Which fields don't match the given dataset doc to a product definition?
Gives human-readable lines of text.
"""
yield from _get_printable_differences(dataset_doc, product_definition["metadata"])
def _match_product(
dataset_doc: Dict, product_definitions: Dict[str, Dict]
) -> Tuple[Optional[Dict], List[ValidationMessage]]:
"""Match the given dataset to a product definition"""
product = None
# EO3 datasets often put the product name directly inside.
specified_product_name = get_path(dataset_doc, ("product", "name"), default=None)
specified_product_name = specified_product_name or get_path(
dataset_doc, ("properties", "odc:product"), default=None
)
if specified_product_name and (specified_product_name in product_definitions):
product = product_definitions[specified_product_name]
matching_products = {
name: definition
for name, definition in product_definitions.items()
if changes.contains(dataset_doc, definition["metadata"])
}
# We we have nothing, give up!
if (not matching_products) and (not product):
# Find the product that most closely matches it, to helpfully show the differences!
closest_product_name = None
closest_differences = None
for name, definition in product_definitions.items():
diffs = tuple(_get_product_mismatch_reasons(dataset_doc, definition))
if (closest_differences is None) or len(diffs) < len(closest_differences):
closest_product_name = name
closest_differences = diffs
difference_hint = _differences_as_hint(closest_differences)
return None, [
_error(
"unknown_product",
"Dataset does not match the given products",
hint=f"Closest match is {closest_product_name}, with differences:"
f"\n{difference_hint}",
)
]
messages = []
if specified_product_name not in matching_products:
if product:
difference_hint = _differences_as_hint(
_get_product_mismatch_reasons(dataset_doc, product)
)
messages.append(
_info(
"strange_product_claim",
f"Dataset claims to be product {specified_product_name!r}, but doesn't match its fields",
hint=f"{difference_hint}",
)
)
else:
messages.append(
_info(
"unknown_product_claim",
f"Dataset claims to be product {specified_product_name!r}, but it wasn't supplied.",
)
)
if len(matching_products) > 1:
matching_names = ", ".join(matching_products.keys())
messages.append(
_error(
"product_match_clash",
"Multiple products match the given dataset",
hint=f"Maybe you need more fields in the 'metadata' section?\n"
f"Claims to be a {specified_product_name!r}, and matches {matching_names!r}"
if specified_product_name
else f"Maybe you need more fields in the 'metadata' section?\n"
f"Matches {matching_names!r}",
)
)
# (We wont pick one from the bunch here. Maybe they already matched one above to use in continuing validation.)
# Just like ODC, match rules will rule all. Even if their metadata has a "product_name" field.
if len(matching_products) == 1:
[product] = matching_products.values()
return product, messages
def _differences_as_hint(product_diffs):
return indent("\n".join(product_diffs), prefix="\t")
def _validate_stac_properties(dataset: DatasetDoc):
for name, value in dataset.properties.items():
if name not in dataset.properties.KNOWN_PROPERTIES:
yield _warning("unknown_property", f"Unknown stac property {name!r}")
else:
normaliser = dataset.properties.KNOWN_PROPERTIES.get(name)
if normaliser and value is not None:
try:
normalised_value = normaliser(value)
# A normaliser can return two values, the latter adding extra extracted fields.
if isinstance(normalised_value, tuple):
normalised_value = normalised_value[0]
# It's okay for datetimes to be strings
# .. since ODC's own loader does that.
if isinstance(normalised_value, datetime) and isinstance(
value, str
):
value = ciso8601.parse_datetime(value)
# Special case for dates, as "no timezone" and "utc timezone" are treated identical.
if isinstance(value, datetime):
value = default_utc(value)
if not isinstance(value, type(normalised_value)):
yield _warning(
"property_type",
f"Value {value} expected to be "
f"{type(normalised_value).__name__!r} (got {type(value).__name__!r})",
)
elif normalised_value != value:
if _is_nan(normalised_value) and _is_nan(value):
# Both are NaNs, ignore.
pass
else:
yield _warning(
"property_formatting",
f"Property {value!r} expected to be {normalised_value!r}",
)
except ValueError as e:
yield _error("invalid_property", f"{name!r}: {e.args[0]}")
if "odc:producer" in dataset.properties:
producer = dataset.properties["odc:producer"]
# We use domain name to avoid arguing about naming conventions ('ga' vs 'geoscience-australia' vs ...)
if "." not in producer:
yield _warning(
"producer_domain",
"Property 'odc:producer' should be the organisation's domain name. Eg. 'ga.gov.au'",
)
# This field is a little odd, but is expected by the current version of ODC.
# (from discussion with Kirill)
if not dataset.properties.get("odc:file_format"):
yield _warning(
"global_file_format",
"Property 'odc:file_format' is empty",
hint="Usually 'GeoTIFF'",
)
def _is_nan(v):
# Due to JSON serialisation, nan can also be represented as a string 'NaN'
if isinstance(v, str):
return v == "NaN"
return isinstance(v, float) and math.isnan(v)
def _validate_geo(dataset: DatasetDoc, expect_geometry: bool = True):
has_some_geo = _has_some_geo(dataset)
if not has_some_geo and expect_geometry:
yield _info("non_geo", "No geo information in dataset")
return
if dataset.geometry is None:
if expect_geometry:
yield _info("incomplete_geo", "Dataset has some geo fields but no geometry")
elif not dataset.geometry.is_valid:
yield _error(
"invalid_geometry",
f"Geometry is not a valid shape: {explain_validity(dataset.geometry)!r}",
)
# TODO: maybe we'll allow no grids: backwards compat with old metadata.
if not dataset.grids:
yield _error("incomplete_grids", "Dataset has some geo fields but no grids")
if not dataset.crs:
yield _error("incomplete_crs", "Dataset has some geo fields but no crs")
else:
# We only officially support epsg code (recommended) or wkt.
if dataset.crs.lower().startswith("epsg:"):
try:
CRS.from_string(dataset.crs)
except CRSError as e:
yield _error("invalid_crs_epsg", e.args[0])
if dataset.crs.lower() != dataset.crs:
yield _warning("mixed_crs_case", "Recommend lowercase 'epsg:' prefix")
else:
wkt_crs = None
try:
wkt_crs = CRS.from_wkt(dataset.crs)
except CRSError as e:
yield _error(
"invalid_crs",
f"Expect either an epsg code or a WKT string: {e.args[0]}",
)
if wkt_crs and wkt_crs.is_epsg_code:
yield _warning(
"non_epsg",
f"Prefer an EPSG code to a WKT when possible. (Can change CRS to 'epsg:{wkt_crs.to_epsg()}')",
)
def _has_some_geo(dataset):
return dataset.geometry is not None or dataset.grids or dataset.crs
def display_result_console(
url: str, is_valid: bool, messages: List[ValidationMessage], quiet=False
):
"""
Print validation messages to the Console (using colour if available).
"""
# Otherwise console output, with color if possible.
if messages or not quiet:
echo(f"{bool_style(is_valid)} {url}")
for message in messages:
hint = ""
if message.hint:
# Indent the hint if it's multi-line.
if "\n" in message.hint:
hint = "\t\tHint:\n"
hint += indent(message.hint, "\t\t" + (" " * 5))
else:
hint = f"\t\t(Hint: {message.hint})"
s = {
Level.info: dict(),
Level.warning: dict(fg="yellow"),
Level.error: dict(fg="red"),
}
displayable_code = style(f"{message.code}", **s[message.level], bold=True)
echo(f"\t{message.level.name[0].upper()} {displayable_code} {message.reason}")
if hint:
echo(hint)
def display_result_github(url: str, is_valid: bool, messages: List[ValidationMessage]):
"""
Print validation messages using Github Action's command language for warnings/errors.
"""
echo(f"{bool_style(is_valid)} {url}")
for message in messages:
hint = ""
if message.hint:
# Indent the hint if it's multi-line.
if "\n" in message.hint:
hint = "\n\nHint:\n"
hint += indent(message.hint, (" " * 5))
else:
hint = f"\n\n(Hint: {message.hint})"
if message.level == Level.error:
code = "::error"
else:
code = "::warning"
text = f"{message.reason}{hint}"
# URL-Encode any newlines
text = text.replace("\n", "%0A")
# TODO: Get the real line numbers?
echo(f"{code} file={url},line=1::{text}")
_OUTPUT_WRITERS = dict(
plain=display_result_console,
quiet=partial(display_result_console, quiet=True),
github=display_result_github,
)
@click.command(
help=__doc__
+ """
Paths can be products, dataset documents, or directories to scan (for files matching
names '*.odc-metadata.yaml' etc), either local or URLs.
Datasets are validated against matching products that have been scanned already, so specify
products first, and datasets later, to ensure they can be matched.
"""
)
@click.version_option()
@click.argument("paths", nargs=-1)
@click.option(
"--warnings-as-errors",
"-W",
"strict_warnings",
is_flag=True,
help="Fail if any warnings are produced",
)
@click.option(
"-f",
"--output-format",
help="Output format",
type=click.Choice(list(_OUTPUT_WRITERS)),
# Are we in Github Actions?
# Send any warnings/errors in its custom format
default="github" if "GITHUB_ACTIONS" in os.environ else "plain",
show_default=True,
)
@click.option(
"--thorough",
is_flag=True,
help="Attempt to read the data/measurements, and check their properties match",
)
@click.option(
"--expect-extra-measurements/--warn-extra-measurements",
is_flag=True,
default=False,
help="Allow some dataset measurements to be missing from the product definition. "
"This is (deliberately) allowed by ODC, but often a mistake. This flag disables the warning.",
)
@click.option(
"--explorer-url",
"explorer_url",
help="Use product definitions from the given Explorer URL to validate datasets. "
'Eg: "https://explorer.dea.ga.gov.au/"',
)
@click.option(
"--odc",
"use_datacube",
is_flag=True,
help="Use product definitions from datacube to validate datasets",
)
@click.option(
"-q",
"--quiet",
is_flag=True,
default=False,
help="Only print problems, one per line",
)
def run(
paths: List[str],
strict_warnings,
quiet,
thorough: bool,
expect_extra_measurements: bool,
explorer_url: str,
use_datacube: bool,
output_format: str,
):
validation_counts: Counter[Level] = collections.Counter()
invalid_paths = 0
current_location = Path(".").resolve().as_uri() + "/"
product_definitions = _load_remote_product_definitions(use_datacube, explorer_url)
if output_format == "plain" and quiet:
output_format = "quiet"
write_file_report = _OUTPUT_WRITERS[output_format]
for url, messages in validate_paths(
paths,
thorough=thorough,
expect_extra_measurements=expect_extra_measurements,
product_definitions=product_definitions,
):
if url.startswith(current_location):
url = url[len(current_location) :]
levels = collections.Counter(m.level for m in messages)
is_invalid = levels[Level.error] > 0
if strict_warnings:
is_invalid |= levels[Level.warning] > 0
if quiet:
# Errors/Warnings only. Remove info-level.
messages = [m for m in messages if m.level != Level.info]
if is_invalid:
invalid_paths += 1
for message in messages:
validation_counts[message.level] += 1
write_file_report(
url=url,
is_valid=not is_invalid,
messages=messages,
)
# Print a summary on stderr for humans.
if not quiet:
result = (
style("failure", fg="red", bold=True)
if invalid_paths > 0
else style("valid", fg="green", bold=True)
)
secho(f"\n{result}: ", nl=False, err=True)
if validation_counts:
echo(
", ".join(
f"{v} {k.name}{'s' if v > 1 else ''}"
for k, v in validation_counts.items()
),
err=True,
)
else:
secho(f"{len(paths)} paths", err=True)
sys.exit(invalid_paths)
def _load_remote_product_definitions(
from_datacube: bool = False,
from_explorer_url: Optional[str] = None,
) -> Dict[str, Dict]:
product_definitions = {}
# Load any remote products that were asked for.
if from_explorer_url:
for definition in _load_explorer_product_definitions(from_explorer_url):
product_definitions[definition["name"]] = definition
secho(f"{len(product_definitions)} Explorer products", err=True)
if from_datacube:
# The normal datacube environment variables can be used to choose alternative configs.
with Datacube(app="eo3-validate") as dc:
for product in dc.index.products.get_all():
product_definitions[product.name] = product.definition
secho(f"{len(product_definitions)} ODC products", err=True)
return product_definitions
def _load_doc(url):
return list(load_documents(url))
def _load_explorer_product_definitions(
explorer_url: str,
workers: int = 6,
) -> Generator[Dict, None, None]:
"""
Read all product yamls from the given Explorer instance,
eg: https://explorer.dea.ga.gov.au/products/ls5_fc_albers.odc-product.yaml
"""
product_urls = [
urljoin(explorer_url, f"/products/{name.strip()}.odc-product.yaml")
for name in urlopen(urljoin(explorer_url, "products.txt")) # nosec
.read()
.decode("utf-8")
.split("\n")
]
count = 0
with multiprocessing.Pool(workers) as pool:
for product_definitions in pool.imap_unordered(_load_doc, product_urls):
count += 1
echo(f"\r{count} Explorer products", nl=False)
yield from product_definitions
pool.close()
pool.join()
echo()
| {
"content_hash": "49e116187edd907c8ae7c31b92d13158",
"timestamp": "",
"source": "github",
"line_count": 1248,
"max_line_length": 119,
"avg_line_length": 34.97275641025641,
"alnum_prop": 0.5797323924300051,
"repo_name": "jeremyh/eo-datasets",
"id": "c8d491d1c9d54c7d9d40e888c0d4decdd916cde5",
"size": "43646",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "eodatasets3/validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2134"
},
{
"name": "Makefile",
"bytes": "2035"
},
{
"name": "Python",
"bytes": "731651"
},
{
"name": "Shell",
"bytes": "788"
}
],
"symlink_target": ""
} |
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serialize objects in batches; By default, the batch size is chosen based
on the size of objects, also configurable by SparkContext's C{batchSize} parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (lists) of objects from the input stream.
if the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hook namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple
# those created in other module can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid sevialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| {
"content_hash": "fbb099c229119f2b3af7ba62b4ace5bc",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 97,
"avg_line_length": 30.97923875432526,
"alnum_prop": 0.5989053948397185,
"repo_name": "wangyixiaohuihui/spark2-annotation",
"id": "664e59eb30af525ddb445fb6e2b20ef796cdcff4",
"size": "18706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33815"
},
{
"name": "Batchfile",
"bytes": "24294"
},
{
"name": "C",
"bytes": "1542"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10012"
},
{
"name": "HiveQL",
"bytes": "1828674"
},
{
"name": "Java",
"bytes": "3737029"
},
{
"name": "JavaScript",
"bytes": "143063"
},
{
"name": "Makefile",
"bytes": "7980"
},
{
"name": "PLpgSQL",
"bytes": "9666"
},
{
"name": "PowerShell",
"bytes": "3751"
},
{
"name": "Python",
"bytes": "2248750"
},
{
"name": "R",
"bytes": "1027534"
},
{
"name": "Roff",
"bytes": "14420"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "22897473"
},
{
"name": "Shell",
"bytes": "156941"
},
{
"name": "Thrift",
"bytes": "33665"
},
{
"name": "q",
"bytes": "147332"
}
],
"symlink_target": ""
} |
import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import answer_numeric_q, open_accordion_section
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException, NoSuchElementException
__all__ = ["numeric_q_known_wrong_sf_tagged"]
#####
# Test : Numeric Questions Known Wrong Answer, Wrong Sig Figs
#####
@TestWithDependency("NUMERIC_Q_KNOWN_WRONG_SF_TAGGED", ["NUMERIC_Q_ANSWER_CHANGE"])
def numeric_q_known_wrong_sf_tagged(driver, ISAAC_WEB, WAIT_DUR, **kwargs):
"""Test numeric question behaviour on content editor entered wrong answer with
significant figures tag added to explanation.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB + "/questions/_regression_test_")
time.sleep(WAIT_DUR)
try:
open_accordion_section(driver, 3)
num_question = driver.find_element_by_xpath("//div[@ng-switch-when='isaacNumericQuestion']")
except NoSuchElementException:
log(ERROR, "Can't find the numeric question; can't continue!")
return False
log(INFO, "Attempt to enter known (content-editor specified) wrong answer, tagged as 'sig_figs'.")
if not answer_numeric_q(num_question, "12345", "None", wait_dur=WAIT_DUR):
log(ERROR, "Couldn't answer Numeric Question; can't continue!")
return False
time.sleep(WAIT_DUR)
try:
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h2[text()='Significant Figures']")
log(INFO, "A 'Sig Figs' banner was displayed instead of 'Incorrect'.")
wait_for_xpath_element(driver, "(//div[@ng-switch-when='isaacNumericQuestion']//p[text()='This should say \"Significant Figures\" above!'])[1]")
log(INFO, "The content editor entered message was correctly shown.")
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h5[text()='Please try again.']")
log(INFO, "The 'Please try again.' message was correctly shown.")
bg_colour1 = num_question.find_element_by_xpath("(.//div[@class='ru-answer-block-panel'])[1]").value_of_css_property('background-color')
assert bg_colour1 in ['#be4c4c', 'rgba(190, 76, 76, 1)', 'rgb(190, 76, 76)']
log(INFO, "Red highlighting shown around value box.")
log(INFO, "Avoid rate limiting: wait 1 minute.")
time.sleep(60)
log(PASS, "Numeric Question 'correct value, correct unit, incorrect sig fig' behavior as expected.")
return True
except TimeoutException:
image_div(driver, "ERROR_numeric_q_known_wrong_sf")
log(INFO, "The sig fig warning should not have been shown. if it was, this is likely the error.")
log(ERROR, "The messages shown for a known incorrect answer were not all displayed; see 'ERROR_numeric_q_known_wrong_sf.png'!")
return False
except AssertionError:
image_div(driver, "ERROR_numeric_q_known_wrong_sf")
log(ERROR, "The value box was not highlighted red correctly; see 'ERROR_numeric_q_known_wrong_sf.png'!")
return False
| {
"content_hash": "43fd153c5283bde9739f8fecd9eb56ee",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 152,
"avg_line_length": 55.19672131147541,
"alnum_prop": 0.6836946836946837,
"repo_name": "jsharkey13/isaac-selenium-testing",
"id": "f3e610d6b81833748a50babda121903e61d5b983",
"size": "3367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isaactest/tests/numeric_q_known_wrong_sf_tagged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341129"
},
{
"name": "Shell",
"bytes": "1439"
}
],
"symlink_target": ""
} |
"""Runner for workflow jobs
"""
# built-in
import os
import time
# 3rd party
import yaml
class JobContext(object):
"""Represents the context under which a job is executed
"""
image = None
volumes = {}
working_dir = None
build_dir = None
def __init__(self, **kwargs):
"""Initialize defaults and customization of a context
"""
for k, v in kwargs.iteritems():
if hasattr(self, k):
setattr(self, k, v)
class Job(object):
image = None
context = {}
name = None
description = None
output = None
checks = []
pre_steps = []
steps = []
post_steps = []
build_dir = None
requires = []
def __init__(self, **kwargs):
"""Represents a job
"""
for k, v in kwargs.iteritems():
if hasattr(self, k):
setattr(self, k, v)
if not self.name:
self.name = "Job-{0}".format(time.time())
self.ctx = JobContext(**self.context)
self.build_dir = self.ctx.build_dir
def dockerize(self):
"""Generate a Dockerfile based on the job properties
"""
if not os.path.exists(self.build_dir):
os.mkdir(self.build_dir)
entrypoint_script = os.path.join(self.build_dir, 'entrypoint-{0}'.format(self.name))
docker_file = os.path.join(self.build_dir, "Dockerfile-{0}".format(self.name))
# add the steps to an entrypoint script
with open(entrypoint_script, 'w') as f:
f.write('\n'.join(self.steps))
print 'Generated entrypoint script {0}'.format(entrypoint_script)
l_entrypoint = os.path.basename(entrypoint_script)
with open(docker_file, 'w') as f:
f.write("FROM {0}\n".format(self.image))
f.write("ADD {0} /usr/local/bin/entrypoint.sh\n".format(l_entrypoint))
f.write("RUN chmod 755 /usr/local/bin/entrypoint.sh\n")
f.write("ENTRYPOINT /usr/local/bin/entrypoint.sh\n")
print 'Created Dockerfile {0}'.format(docker_file)
return docker_file
def compose(self):
"""Generate a docker-compose file based on the job properties
celery-builder:
build: .
dockerfile: Dockerfile-celery-sdist
volumes:
- /Users/cindy.cao/workspace:/workspace
- /Users/cindy.cao/builds:/builds
working_dir: /workspace
"""
compose_file = os.path.join(self.build_dir, "docker-compose-{0}.yml".format(self.name))
compose_conf = {"build": self.build_dir,
"dockerfile": os.path.basename(self.dockerize()),
"working_dir": self.ctx.working_dir
}
if self.ctx.volumes:
volumes = []
for local_dir, mnt_point in self.ctx.volumes.iteritems():
volumes.append("{0}:{1}".format(local_dir, mnt_point))
compose_conf["volumes"] = volumes
compose = {self.name: compose_conf}
with open(compose_file, 'w') as f:
yaml.dump(compose, f, indent=4, default_flow_style=False)
return compose_file
| {
"content_hash": "c69005d20e9924ed00b068b44a1e2d91",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 95,
"avg_line_length": 34.04210526315789,
"alnum_prop": 0.5534941249226963,
"repo_name": "coolhacks/docker-hacks",
"id": "9625edee34697ee3b9e39eb4ec50bfd59d82eba0",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/docker-flow/dockerflow/job.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "23973"
},
{
"name": "CSS",
"bytes": "1227"
},
{
"name": "Groff",
"bytes": "37179"
},
{
"name": "HTML",
"bytes": "50500"
},
{
"name": "Nginx",
"bytes": "3121"
},
{
"name": "Python",
"bytes": "10073"
},
{
"name": "Shell",
"bytes": "35683"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.template import loader
from django.db.models import Avg
from sites.models import *
def summary_sum(request):
template = loader.get_template('summary/summary.html')
context = {}
sites = Sites.objects.all()
for site in sites:
site.A_value = sum([item.a_value for item in site.site_set.all()])
site.B_value = sum([item.b_value for item in site.site_set.all()])
context['sites'] = sites
context['sum'] = True
return HttpResponse(template.render(context, request))
def summary_avg(request):
template = loader.get_template('summary/summary.html')
context = {}
sites = Sites.objects.all()
for site in sites:
site.A_value = site.site_set.all().aggregate(a_avg= Avg('a_value'))['a_avg']
site.B_value = site.site_set.all().aggregate(b_avg= Avg('b_value'))['b_avg']
context['sites'] = sites
context['average'] = True
return HttpResponse(template.render(context, request)) | {
"content_hash": "498b03b8b6abafba26aa8f0f57780450",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 33.225806451612904,
"alnum_prop": 0.6485436893203883,
"repo_name": "ZhibinCH/my3MW",
"id": "ac864bcc24065b89f5d32bb88fb0527c4420bc72",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summary/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4119"
},
{
"name": "Python",
"bytes": "15872"
}
],
"symlink_target": ""
} |
"""
ex-json borrowed from Marcin Kuzminski
source: https://secure.rhodecode.org/ext-json
"""
import datetime
import functools
import decimal
import imp
__all__ = ['json', 'simplejson', 'stdlibjson']
def _is_aware(value):
"""
Determines if a given datetime.time is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return (value.tzinfo is not None
and value.tzinfo.utcoffset(value) is not None)
def _obj_dump(obj):
"""
Custom function for dumping objects to JSON, if obj has __json__ attribute
or method defined it will be used for serialization
:param obj:
"""
if isinstance(obj, complex):
return [obj.real, obj.imag]
# See "Date Time String Format" in the ECMA-262 specification.
# some code borrowed from django 1.4
elif isinstance(obj, datetime.datetime):
r = obj.isoformat()
# increase precision
# if obj.microsecond:
# r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, decimal.Decimal):
return str(obj)
elif isinstance(obj, datetime.time):
if _is_aware(obj):
raise ValueError("JSON can't represent timezone-aware times.")
r = obj.isoformat()
if obj.microsecond:
r = r[:12]
return r
elif isinstance(obj, set):
return list(obj)
elif hasattr(obj, '__json__'):
if callable(obj.__json__):
return obj.__json__()
else:
return obj.__json__
else:
raise NotImplementedError
# Import simplejson
try:
# import simplejson initially
_sj = imp.load_module('_sj', *imp.find_module('simplejson'))
def extended_encode(obj):
try:
return _obj_dump(obj)
except NotImplementedError:
pass
raise TypeError("%r is not JSON serializable" % (obj,))
# we handle decimals our own it makes unified behavior of json vs
# simplejson
sj_version = [int(x) for x in _sj.__version__.split('.')]
major, minor = sj_version[0], sj_version[1]
if major < 2 or (major == 2 and minor < 1):
# simplejson < 2.1 doesnt support use_decimal
_sj.dumps = functools.partial(_sj.dumps,
default=extended_encode)
_sj.dump = functools.partial(_sj.dump,
default=extended_encode)
else:
_sj.dumps = functools.partial(_sj.dumps,
default=extended_encode,
use_decimal=False)
_sj.dump = functools.partial(_sj.dump,
default=extended_encode,
use_decimal=False)
simplejson = _sj
except ImportError:
# no simplejson set it to None
simplejson = None
try:
# simplejson not found try out regular json module
_json = imp.load_module('_json', *imp.find_module('json'))
# extended JSON encoder for json
class ExtendedEncoder(_json.JSONEncoder):
def default(self, obj):
try:
return _obj_dump(obj)
except NotImplementedError:
pass
raise TypeError("%r is not JSON serializable" % (obj,))
# monkey-patch JSON encoder to use extended version
_json.dumps = functools.partial(_json.dumps, cls=ExtendedEncoder)
_json.dump = functools.partial(_json.dump, cls=ExtendedEncoder)
except ImportError:
json = None
stdlibjson = _json
# set all available json modules
if simplejson:
json = _sj
elif _json:
json = _json
else:
raise ImportError('Could not find any json modules')
| {
"content_hash": "874828d5fc372880d65ae9dc88db6a5a",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 78,
"avg_line_length": 29.984615384615385,
"alnum_prop": 0.5785017957927142,
"repo_name": "jpwilliams/appenlight-client-python",
"id": "93763ccc2995b3a95565c720bdbe0e8a4ab295fb",
"size": "3898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "appenlight_client/ext_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "199009"
}
],
"symlink_target": ""
} |
r"""
======
Pyplot
======
.. currentmodule:: bqplot.pyplot
.. autosummary::
:toctree: _generate/
figure
show
axes
plot
scatter
hist
bar
ohlc
geo
clear
close
current_figure
scales
xlim
ylim
"""
from collections import OrderedDict
from IPython.display import display
from ipywidgets import VBox
from numpy import arange, issubdtype
from .figure import Figure
from .scales import Scale, LinearScale, Mercator
from .axes import Axis
from .marks import Lines, Scatter, Hist, Bars, OHLC, Pie, Map, Label
from .toolbar import Toolbar
from .interacts import (BrushIntervalSelector, FastIntervalSelector,
BrushSelector, IndexSelector, MultiSelector,
LassoSelector)
from traitlets.utils.sentinel import Sentinel
Keep = Sentinel('Keep', 'bqplot.pyplot', '''
Used in bqplot.pyplot to specify that the same scale should be used for
a certain dimension.
''')
# `_context` object contains the global information for pyplot.
# `figure`: refers to the current figure to which marks will be added.
# `scales`: The current set of scales which will be used for drawing a mark. if
# the scale for an attribute is not present, it is created based on the range
# type.
# `scale_registry`: This is a dictionary where the keys are the context names and
# the values are the set of scales which were used on the last plot in that
# context. This is useful when switching context.
# `last_mark`: refers to the last mark that has been plotted.
# `current_key`: The key for the current context figure. If there is no key,
# then the value is `None`.
_context = {
'figure': None,
'figure_registry': {},
'scales': {},
'scale_registry': {},
'last_mark': None,
'current_key': None
}
LINE_STYLE_CODES = OrderedDict([(':', 'dotted'), ('-.', 'dash_dotted'),
('--', 'dashed'), ('-', 'solid')])
COLOR_CODES = {'b': 'blue', 'g': 'green', 'r': 'red', 'c': 'cyan',
'm': 'magenta', 'y': 'yellow', 'k': 'black'}
MARKER_CODES = {'o': 'circle', 'v': 'triangle-down', '^': 'triangle-up',
's': 'square', 'd': 'diamond', '+': 'cross'}
def show(key=None, display_toolbar=True):
"""Shows the current context figure in the output area.
Parameters
----------
key : hashable, optional
Any variable that can be used as a key for a dictionary.
display_toolbar: bool (default: True)
If True, a toolbar for different mouse interaction is displayed with
the figure.
Raises
------
KeyError
When no context figure is associated with the provided key.
Examples
--------
>>> import numpy as np
>>> import pyplot as plt
>>> n = 100
>>> x = np.arange(n)
>>> y = np.cumsum(np.random.randn(n))
>>> plt.plot(x,y)
>>> plt.show()
"""
if key is None:
figure = current_figure()
else:
figure = _context['figure_registry'][key]
if display_toolbar:
if not hasattr(figure, 'pyplot'):
figure.pyplot = Toolbar(figure=figure)
display(VBox([figure, figure.pyplot]))
else:
display(figure)
def figure(key=None, fig=None, **kwargs):
"""Creates figures and switches between figures.
If a ``bqplot.Figure`` object is provided via the fig optional argument,
this figure becomes the current context figure.
Otherwise:
- If no key is provided, a new empty context figure is created.
- If a key is provided for which a context already exists, the
corresponding context becomes current.
- If a key is provided and no corresponding context exists, a new context
is reated for that key and becomes current.
Besides, optional arguments allow to set or modify Attributes
of the selected context figure.
Parameters
----------
key: hashable, optional
Any variable that can be used as a key for a dictionary
fig: Figure, optional
A bqplot Figure
"""
scales_arg = kwargs.pop('scales', {})
_context['current_key'] = key
if fig is not None: # fig provided
_context['figure'] = fig
if key is not None:
_context['figure_registry'][key] = fig
for arg in kwargs:
setattr(_context['figure'], arg, kwargs[arg])
else: # no fig provided
if key is None: # no key provided
_context['figure'] = Figure(**kwargs)
else: # a key is provided
if key not in _context['figure_registry']:
if 'title' not in kwargs:
kwargs['title'] = 'Figure' + ' ' + str(key)
_context['figure_registry'][key] = Figure(**kwargs)
_context['figure'] = _context['figure_registry'][key]
for arg in kwargs:
setattr(_context['figure'], arg, kwargs[arg])
scales(key, scales=scales_arg)
# Set the axis reference dictionary. This dictionary contains the mapping
# from the possible dimensions in the figure to the list of scales with
# respect to which axes have been drawn for this figure.
# Used to automatically generate axis.
if(getattr(_context['figure'], 'axis_registry', None) is None):
setattr(_context['figure'], 'axis_registry', {})
def close(key):
"""Closes and unregister the context figure corresponding to the key.
Parameters
----------
key: hashable
Any variable that can be used as a key for a dictionary
"""
figure_registry = _context['figure_registry']
if key not in figure_registry:
return
if _context['figure'] == figure_registry[key]:
figure()
fig = figure_registry[key]
if hasattr(fig, 'pyplot'):
fig.pyplot.close()
fig.close()
del figure_registry[key]
del _context['scale_registry'][key]
def scales(key=None, scales={}):
"""Creates and switches between context scales.
If no key is provided, a new blank context is created.
If a key is provided for which a context already exists, the existing
context is set as the current context.
If a key is provided and no corresponding context exists, a new context is
created for that key and set as the current context.
Parameters
----------
key: hashable, optional
Any variable that can be used as a key for a dictionary
scales: dictionary
Dictionary of scales to be used in the new context
Example
-------
>>> scales(scales={
>>> 'x': Keep,
>>> 'color': ColorScale(min=0, max=1)
>>> })
This creates a new scales context, where the 'x' scale is kept from the
previous context, the 'color' scale is an instance of ColorScale
provided by the user. Other scales, potentially needed such as the 'y'
scale in the case of a line chart will be created on the fly when
needed.
Notes
-----
Every call to the function figure triggers a call to scales.
The `scales` parameter is ignored if the `key` argument is not Keep and
context scales already exist for that key.
"""
old_ctxt = _context['scales']
if key is None: # No key provided
_context['scales'] = {_get_attribute_dimension(k): scales[k] if scales[k] is not Keep
else old_ctxt[_get_attribute_dimension(k)] for k in scales}
else: # A key is provided
if key not in _context['scale_registry']:
_context['scale_registry'][key] = {_get_attribute_dimension(k): scales[k]
if scales[k] is not Keep
else old_ctxt[_get_attribute_dimension(k)]
for k in scales}
_context['scales'] = _context['scale_registry'][key]
def xlim(min, max):
"""Set the domain bounds of the current 'x' scale.
"""
return set_lim(min, max, 'x')
def ylim(min, max):
"""Set the domain bounds of the current 'y' scale.
"""
return set_lim(min, max, 'y')
def set_lim(min, max, name):
"""Set the domain bounds of the scale associated with the provided key.
Parameters
----------
name: hashable
Any variable that can be used as a key for a dictionary
Raises
------
KeyError
When no context figure is associated with the provided key.
"""
scale = _context['scales'][_get_attribute_dimension(name)]
scale.min = min
scale.max = max
return scale
def axes(mark=None, options={}, **kwargs):
"""Draws axes corresponding to the scales of a given mark.
It also returns a dictionary of drawn axes. If the mark is not provided,
the last drawn mark is used.
Parameters
----------
mark: Mark or None (default: None)
The mark to inspect to create axes. If None, the last mark drawn is
used instead.
options: dict (default: {})
Options for the axes to be created. If a scale labeled 'x' is required
for that mark, options['x'] contains optional keyword arguments for the
constructor of the corresponding axis type.
"""
if mark is None:
mark = _context['last_mark']
if mark is None:
return {}
fig = kwargs.get('figure', current_figure())
scales = mark.scales
fig_axes = [axis for axis in fig.axes]
axes = {}
for name in scales:
if name not in mark.class_trait_names(scaled=True):
# The scale is not needed.
continue
scale_metadata = mark.scales_metadata.get(name, {})
dimension = scale_metadata.get('dimension', scales[name])
axis_args = dict(scale_metadata,
**(options.get(name, {})))
axis = _fetch_axis(fig, dimension, scales[name])
if axis is not None:
# For this figure, an axis exists for the scale in the given
# dimension. Apply the properties and return back the object.
_apply_properties(axis, options.get(name, {}))
axes[name] = axis
continue
# An axis must be created. We fetch the type from the registry
# the key being provided in the scaled attribute decoration
key = mark.class_traits()[name].get_metadata('atype')
if(key is not None):
axis_type = Axis.axis_types[key]
axis = axis_type(scale=scales[name], **axis_args)
axes[name] = axis
fig_axes.append(axis)
# Update the axis registry of the figure once the axis is added
_update_fig_axis_registry(fig, dimension, scales[name], axis)
fig.axes = fig_axes
return axes
def _draw_mark(mark_type, options={}, axes_options={}, **kwargs):
"""Draw the mark of specified mark type.
Parameters
----------
mark_type: type
The type of mark to be drawn
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
fig = kwargs.pop('figure', current_figure())
scales = kwargs.pop('scales', {})
# Going through the list of data attributes
for name in mark_type.class_trait_names(scaled=True):
dimension = _get_attribute_dimension(name, mark_type)
# TODO: the following should also happen if name in kwargs and
# scales[name] is incompatible.
if name not in kwargs:
# The scaled attribute is not being passed to the mark. So no need
# create a scale for this.
continue
elif name in scales:
_context['scales'][dimension] = scales[name]
# Scale has to be fetched from the conext or created as it has not
# been passed.
elif dimension not in _context['scales']:
# Creating a scale for the dimension if a matching scale is not
# present in _context['scales']
traitlet = mark_type.class_traits()[name]
rtype = traitlet.get_metadata('rtype')
dtype = traitlet.validate(None, kwargs[name]).dtype
# Fetching the first matching scale for the rtype and dtype of the
# scaled attributes of the mark.
compat_scale_types = [Scale.scale_types[key]
for key in Scale.scale_types
if Scale.scale_types[key].rtype == rtype and
issubdtype(dtype, Scale.scale_types[key].dtype)]
# TODO: something better than taking the FIRST compatible
# scale type.
scales[name] = compat_scale_types[0](**options.get(name, {}))
# Adding the scale to the conext scales
_context['scales'][dimension] = scales[name]
else:
scales[name] = _context['scales'][dimension]
mark = mark_type(scales=scales, **kwargs)
_context['last_mark'] = mark
fig.marks = [m for m in fig.marks] + [mark]
if kwargs.get('axes', True):
axes(mark, options=axes_options)
return mark
def plot(*args, **kwargs):
"""Draw lines in the current context figure.
Signature: `plot(x, y, **kwargs)` or `plot(y, **kwargs)`, depending of the
length of the list of positional arguments. In the case where the `x` array
is not provided.
Parameters
----------
x: numpy.ndarray or list, 1d or 2d (optional)
The x-coordinates of the plotted line. When not provided, the function
defaults to `numpy.arange(len(y))`
x can be 1-dimensional or 2-dimensional.
y: numpy.ndarray or list, 1d or 2d
The y-coordinates of the plotted line. If argument `x` is 2-dimensional
it must also be 2-dimensional.
marker_str: string
string representing line_style, marker and color.
For e.g. 'g--o', 'sr' etc
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
marker_str = None
if len(args) == 1:
kwargs['y'] = args[0]
kwargs['x'] = arange(len(args[0]))
elif len(args) == 2:
if type(args[1]) == str:
kwargs['y'] = args[0]
kwargs['x'] = arange(len(args[0]))
marker_str = args[1].strip()
else:
kwargs['x'] = args[0]
kwargs['y'] = args[1]
elif len(args) == 3:
kwargs['x'] = args[0]
kwargs['y'] = args[1]
if type(args[2]) == str:
marker_str = args[2].strip()
if marker_str:
line_style, color, marker = _get_line_styles(marker_str)
# only marker specified => draw scatter
if marker and not line_style:
kwargs['marker'] = marker
if color:
kwargs['default_colors'] = [color]
return _draw_mark(Scatter, **kwargs)
else: # draw lines in all other cases
kwargs['line_style'] = line_style or 'solid'
if marker:
kwargs['marker'] = marker
if color:
kwargs['colors'] = [color]
return _draw_mark(Lines, **kwargs)
else:
return _draw_mark(Lines, **kwargs)
def ohlc(*args, **kwargs):
"""Draw OHLC bars or candle bars in the current context figure.
Signature: `ohlc(x, y, **kwargs)` or `ohlc(y, **kwargs)`, depending of the
length of the list of positional arguments. In the case where the `x` array
is not provided
Parameters
----------
x: numpy.ndarray or list, 1d (optional)
The x-coordinates of the plotted line. When not provided, the function
defaults to `numpy.arange(len(y))`.
y: numpy.ndarray or list, 2d
The ohlc (open/high/low/close) information. A two dimensional array. y
must have the shape (n, 4).
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
if len(args) == 2:
kwargs['x'] = args[0]
kwargs['y'] = args[1]
elif len(args) == 1:
kwargs['y'] = args[0]
length = len(args[0])
kwargs['x'] = arange(length)
return _draw_mark(OHLC, **kwargs)
def scatter(x, y, **kwargs):
"""Draw a scatter in the current context figure.
Parameters
----------
x: numpy.ndarray, 1d
The x-coordinates of the data points.
y: numpy.ndarray, 1d
The y-coordinates of the data points.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
kwargs['x'] = x
kwargs['y'] = y
return _draw_mark(Scatter, **kwargs)
def hist(sample, options={}, **kwargs):
"""Draw a histogram in the current context figure.
Parameters
----------
sample: numpy.ndarray, 1d
The sample for which the histogram must be generated.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'counts'
is required for that mark, options['counts'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'counts' is
required for that mark, axes_options['counts'] contains optional
keyword arguments for the constructor of the corresponding axis type.
"""
kwargs['sample'] = sample
scales = kwargs.pop('scales', {})
if 'count' not in scales:
dimension = _get_attribute_dimension('count', Hist)
if dimension in _context['scales']:
scales['count'] = _context['scales'][dimension]
else:
scales['count'] = LinearScale(**options.get('count', {}))
_context['scales'][dimension] = scales['count']
kwargs['scales'] = scales
return _draw_mark(Hist, options=options, **kwargs)
def bar(x, y, **kwargs):
"""Draws a bar chart in the current context figure.
Parameters
----------
x: numpy.ndarray, 1d
The x-coordinates of the data points.
y: numpy.ndarray, 1d
The y-coordinates of the data pints.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
kwargs['x'] = x
kwargs['y'] = y
return _draw_mark(Bars, **kwargs)
def pie(sizes, **kwargs):
"""Draws a Pie in the current context figure.
Parameters
----------
sizes: numpy.ndarray, 1d
The proportions to be represented by the Pie.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
kwargs['sizes'] = sizes
return _draw_mark(Pie, **kwargs)
def label(text, **kwargs):
"""Draws a Label in the current context figure.
Parameters
----------
text: string
The label to be displayed.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
kwargs['text'] = text
return _draw_mark(Label, **kwargs)
def geo(map_data, **kwargs):
"""Draw a map in the current context figure.
Parameters
----------
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
scales = kwargs.pop('scales', _context['scales'])
options = kwargs.get('options', {})
if 'projection' not in scales:
scales['projection'] = Mercator(**options.get('projection', {}))
kwargs['scales'] = scales
kwargs['map_data'] = map_data
return _draw_mark(Map, **kwargs)
def _add_interaction(int_type, **kwargs):
"""Add the interaction for the specified type.
If a figure is passed using the key-word argument `figure` it is used. Else
the context figure is used.
If a list of marks are passed using the key-word argument `marks` it is used.
Else the latest mark that is passed is used as the only mark associated with
the selector.
Parameters
----------
int_type: type
The type of interaction to be added.
"""
fig = kwargs.pop('figure', current_figure())
marks = kwargs.pop('marks', [_context['last_mark']])
for name, traitlet in int_type.class_traits().items():
dimension = traitlet.get_metadata('dimension')
if dimension is not None:
# only scales have this attribute in interactions
kwargs[name] = _get_context_scale(dimension)
kwargs['marks'] = marks
interaction = int_type(**kwargs)
if fig.interaction is not None:
fig.interaction.close()
fig.interaction = interaction
return interaction
def _get_context_scale(dimension):
"""Return the scale instance in the current context for a given dimension.
Parameters
----------
dimension: string
The dimension along which the current context scale is to be fetched.
"""
return _context['scales'][dimension]
def _create_selector(int_type, func, trait, **kwargs):
"""Create a selector of the specified type.
Also attaches the function `func` as an `on_trait_change` listener
for the trait `trait` of the selector.
This is an internal function which should not be called by the user.
Parameters
----------
int_type: type
The type of selector to be added.
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the Selector trait whose change triggers the
call back function `func`.
"""
interaction = _add_interaction(int_type, **kwargs)
if func is not None:
interaction.on_trait_change(func, trait)
return interaction
def brush_int_selector(func=None, trait='selected', **kwargs):
"""Create a `BrushIntervalSelector` interaction for the `figure`.
Also attaches the function `func` as an event listener for the
specified trait.
Parameters
----------
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the BrushIntervalSelector trait whose change triggers the
call back function `func`.
"""
return _create_selector(BrushIntervalSelector, func, trait, **kwargs)
def int_selector(func=None, trait='selected', **kwargs):
"""Creates a `FastIntervalSelector` interaction for the `figure`.
Also attaches the function `func` as an event listener for the trait `trait`.
Parameters
----------
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the IntervalSelector trait whose change triggers the
call back function `func`.
"""
return _create_selector(FastIntervalSelector, func, trait, **kwargs)
def index_selector(func=None, trait='selected', **kwargs):
"""Creates an `IndexSelector` interaction for the `figure`.
Also attaches the function `func` as an event listener for the trait `trait`.
Parameters
----------
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the IndexSelector trait whose change triggers the
call back function `func`.
"""
return _create_selector(IndexSelector, func, trait, **kwargs)
def brush_selector(func=None, trait='selected', **kwargs):
"""Creates a `BrushSelector` interaction for the `figure`.
Also attaches the function `func` as an event listener for the trait `trait`.
Parameters
----------
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the BrushSelector trait whose change triggers the
call back function `func`.
"""
return _create_selector(BrushSelector, func, trait, **kwargs)
def multi_selector(func=None, trait='selected', **kwargs):
"""Creates a `MultiSelector` interaction for the `figure`.
Also attaches the function `func` as an event listener for the trait `trait`.
Parameters
----------
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the MultiSelector trait whose change triggers the
call back function `func`.
"""
return _create_selector(MultiSelector, func, trait, **kwargs)
def lasso_selector(func=None, trait='selected', **kwargs):
"""Creates a `LassoSelector` interaction for the `figure`.
Also attaches the function `func` as an event listener for the specified trait.
Parameters
----------
func: function
The call back function. It should take atleast two arguments. The name
of the trait and the value of the trait are passed as arguments.
trait: string
The name of the LassoSelector trait whose change triggers the
call back function `func`.
"""
return _create_selector(LassoSelector, func, trait, **kwargs)
def clear():
"""Clears the current context figure of all marks axes and grid lines."""
fig = _context['figure']
if fig is not None:
fig.marks = []
fig.axes = []
setattr(fig, 'axis_registry', {})
_context['scales'] = {}
key = _context['current_key']
if key is not None:
_context['scale_registry'][key] = {}
def current_figure():
"""Returns the current context figure."""
if _context['figure'] is None:
figure()
return _context['figure']
# FOR DEBUG ONLY
def get_context():
"""Used for debug only. Return the current global context dictionary."""
return _context
def _fetch_axis(fig, dimension, scale):
# Internal utility function.
# Given a figure instance `fig`, the dimension of the scaled attribute and
# the instance of a scale, returns the axis if an axis is present for that
# combination. Else returns `None`
axis_registry = getattr(fig, 'axis_registry', {})
dimension_data = axis_registry.get(dimension, [])
dimension_scales = [dim['scale'] for dim in dimension_data]
dimension_axes = [dim['axis'] for dim in dimension_data]
try:
return dimension_axes[dimension_scales.index(scale)]
except (ValueError, IndexError):
return None
def _update_fig_axis_registry(fig, dimension, scale, axis):
axis_registry = fig.axis_registry
dimension_scales = axis_registry.get(dimension, [])
dimension_scales.append({'scale': scale, 'axis': axis})
axis_registry[dimension] = dimension_scales
setattr(fig, 'axis_registry', axis_registry)
def _get_attribute_dimension(trait_name, mark_type=None):
"""Returns the dimension for the name of the trait for the specified mark.
If `mark_type` is `None`, then the `trait_name` is returned
as is.
Returns `None` if the `trait_name` is not valid for `mark_type`.
"""
if(mark_type is None):
return trait_name
scale_metadata = mark_type.class_traits()['scales_metadata'].default_args[0]
return scale_metadata.get(trait_name, {}).get('dimension', None)
def _apply_properties(widget, properties={}):
"""Applies the specified properties to the widget.
`properties` is a dictionary with key value pairs corresponding
to the properties to be applied to the widget.
"""
with widget.hold_sync():
for key, value in properties.items():
setattr(widget, key, value)
def _get_line_styles(marker_str):
"""Return line style, color and marker type from specified marker string.
For example, if ``marker_str`` is 'g-o' then the method returns
``('solid', 'green', 'circle')``.
"""
def _extract_marker_value(marker_str, code_dict):
"""Extracts the marker value from a given marker string.
Looks up the `code_dict` and returns the corresponding marker for a
specific code.
For example if `marker_str` is 'g-o' then the method extracts
- 'green' if the code_dict is color_codes,
- 'circle' if the code_dict is marker_codes etc.
"""
val = None
for code in code_dict:
if code in marker_str:
val = code_dict[code]
break
return val
return [_extract_marker_value(marker_str, code_dict) for
code_dict in [LINE_STYLE_CODES, COLOR_CODES, MARKER_CODES]]
| {
"content_hash": "85e2d3c49a2af0b9b9cae75f07c7e720",
"timestamp": "",
"source": "github",
"line_count": 914,
"max_line_length": 93,
"avg_line_length": 35.167396061269145,
"alnum_prop": 0.6220639019382136,
"repo_name": "rmenegaux/bqplot",
"id": "553c356127945bea116ae4e854d5861e3dd00777",
"size": "32730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bqplot/pyplot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5500"
},
{
"name": "JavaScript",
"bytes": "641299"
},
{
"name": "Python",
"bytes": "166038"
}
],
"symlink_target": ""
} |
from . import grammar
from . import cps
__all___ = ['print_program']
def print_program(program):
writer = Writer()
writer(program)
return ''.join(writer._buffer)
_cases = []
def case(cls):
def register(f):
_cases.append((cls, f))
return f
return register
class Writer(object):
def __init__(self):
self._indent = ''
self._buffer = []
def __call__(self, *args):
for arg in args:
self.write(arg)
def write(self, node):
if isinstance(node, basestring):
self._buffer.append(node)
return
for (cls, handler) in _cases:
if isinstance(node, cls):
handler(self, node)
return
if isinstance(node, (list, tuple)):
write_body(self, node, '')
return
print 'not found', type(node), node
raise NotImplementedError
@case(cps.Call)
def write_call(writer, node):
writer('goto ', node.continue_label, ' with ', node.left, '(', node.right, ')')
@case(cps.CondGoto)
def write_cond_goto(writer, node):
writer('if ', node.test, ' goto ', node.true_label, ' else ', node.false_label)
@case(cps.Goto)
def write_goto(writer, node):
writer('goto ', node.label)
write_elements(writer, '(', node.args, ')')
@case(cps.Label)
def write_label(writer, node):
writer('label ', node.name)
if node.params:
write_elements(writer, '(', node.params, ')')
@case(cps.TailCall)
def write_tail_call(writer, node):
writer('return with ', node.left, '(', node.right, ')')
@case(grammar.Definition)
def write_definition(writer, node):
writer(node.left, ' = ', node.right)
@case(grammar.Func)
def write_func(writer, node):
writer('func')
if node.is_predicate:
writer(' is')
if node.name is not None:
writer(' ', node.name)
write_elements(writer, '(', node.params, ')')
if node.returns is not None:
writer(': ', node.returns)
if node.body is not None:
# writer(writer._indent, ' label @Enter()\n')
write_body(writer, node.body, indent=' ')
def write_body(writer, body, indent):
if not isinstance(body, (list, tuple)):
writer(body)
return
writer('\n')
prev = writer._indent
try:
writer._indent += indent
for line in body:
if isinstance(line, (grammar.Func, cps.Label)):
writer('\n')
if isinstance(line, cps.Label) and len(writer._indent) >= 2:
writer._indent = writer._indent[:-2]
writer(writer._indent, line, '\n')
if isinstance(line, cps.Label):
writer._indent += ' '
finally:
writer._indent = prev
def write_elements(writer, open, elements, close):
writer(open)
for index, element in enumerate(elements):
if index != 0:
writer(', ')
writer(element)
writer(close)
@case(grammar.Parameter)
def write_param(writer, param):
writer(param.name)
if param.type is not None:
writer(': ', param.type)
if param.default is not None:
writer(' = ', param.default)
@case(grammar.Record)
def write_record(writer, node):
write_elements(writer, '[', node.elements, ']')
@case(grammar.ReturnStmt)
def write_return_stmt(writer, stmt):
writer('return')
if stmt.value is not None:
writer(' ', stmt.value)
@case(grammar.Tokens.WholeNumber)
def write_whole_number(writer, num):
writer(num.content)
| {
"content_hash": "b14306ff4dac94dc33708c903d0f4dae",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 83,
"avg_line_length": 24.372413793103448,
"alnum_prop": 0.5797962648556876,
"repo_name": "jvs/stride",
"id": "61c8bf5d137fb1e6a64f46ae5863d0c971b1d804",
"size": "3534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stride/printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "182"
},
{
"name": "Python",
"bytes": "51719"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import glob
import sys
from tempfile import mkdtemp
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import pairwise_distances
import dendropy
from dendropy.calculate.treesum import TreeSummarizer
if sys.version_info[0] == 2:
from ete2 import Tree as EteTree
elif sys.version_info[0] == 3:
from ete3 import Tree as EteTree
from . distance_metrics import minmax
from . clustering.cluster import VNClusterer, Clusterer
def pca(corpus, nb_dimensions=2):
"""
Apply dimension reduction to the vectorized
texts in the corpus, using Principal Components
Analysis.
Parameters
----------
corpus : string, default=None
The corpus to be analyzed.
Expects that the corpus has been vectorized.
nb_dimensions : int, default=2
The nb of components desired.
Returns
----------
(pca_matrix, pca_loadings): tuple
Returns a tuple with:
- the projection of the corpus texts in
the reduced space, [n_texts, nb_dimensions]
- the loadings of the features on each
component, [n_features, nb_dimensions]
"""
try:
X = corpus.vectorizer.X
except AttributeError:
ValueError('Your corpus does not seem to have been vectorized yet.')
prin_comp = PCA(n_components=nb_dimensions)
try:
pca_matrix = prin_comp.fit_transform(X.toarray()) # unsparsify
except AttributeError:
pca_matrix = prin_comp.fit_transform(X) # input not sparse
pca_loadings = prin_comp.components_.transpose()
return pca_matrix, pca_loadings
def tsne(corpus, nb_dimensions=2):
"""
Apply dimension reduction to the vectorized
texts in the corpus, using t-Distributed
Stochastic Neighbor Embedding (t-SNE).
See: L.J.P. van der Maaten and G.E. Hinton.
Visualizing High-Dimensional Data Using t-SNE.
Journal of Machine Learning Research 9(Nov):
2579-2605, 2008.
Parameters
----------
corpus : string, default=None
The corpus to be analyzed.
Expects that the corpus has been vectorized.
nb_dimensions : int, default=2
The nb of dimensions in which to project
the corpus.
Returns
----------
tsne_matrix : array-like, [n_texts, n_dimensions]
The projection of the corpus texts in
the reduced space, [n_texts, nb_dimensions]
"""
try:
X = corpus.vectorizer.X
except AttributeError:
ValueError('Your corpus does not seem to have been vectorized yet.')
tsne = TSNE(n_components=nb_dimensions)
try:
return tsne.fit_transform(X.toarray()) # unsparsify
except AttributeError:
return tsne.fit_transform(X) # input already sparse
def distance_matrix(corpus=None, X=None, metric='manhattan'):
"""
Calculate a square distance matrix for
all the texts in the corpus.
Parameters
----------
corpus : string, default=None
The corpus to be analyzed.
Expects that the corpus has been vectorized.
metric : str, default='manhattan'
The distance metric to be used for the pairwise
distance calculations. Currently supports:
'manhattan', 'cityblock', 'euclidean',
'cosine', 'minmax'.
Returns
----------
distance_matrix : 2D-array, [n_texts, n_texts]
A square distance table holding all the
pairwise distance calculations.
Notes:
----------
For a comparison/explication of the metrics consult:
- S. Argamon, 'Interpreting Burrows's Delta: Geometric
and Probabilistic Foundations', LLC 23:3 (2008).
- Evert S. et al., Towards a better understanding of Burrows's
Delta in literary authorship attribution. Proceedings of the
Fourth Workshop on Computational Linguistics for Literature
(at NAACL HLT 2015), 2015.
- Koppel et al., Determining if two documents are written by
the same author, JASIST 2014 (minmax in particular).
"""
if not metric in ('manhattan', 'cityblock', 'euclidean', 'cosine', 'minmax'):
raise ValueError('Unsupported distance metric: %s' %(metric))
# we unsparsify here to make it easier for contributors
# to add distance functions (in `pystyl.distance_metrics.py`).
if corpus:
try:
X = corpus.vectorizer.X
try:
X = X.toarray()
except AttributeError:
pass
except AttributeError:
ValueError('Your corpus does not seem to have been vectorized yet.')
if metric == 'minmax':
return pairwise_distances(X, metric=minmax)
else:
return pairwise_distances(X, metric=metric)
def hierarchical_clustering(distance_matrix, linkage):
"""
Run hierarchical cluster analysis on the texts
in the corpus.
Parameters
----------
distance_matrix : 2D-array, [n_texts, n_texts]
A square distance table holding all the
pairwise distance calculations.
linkage : string
The linkage function to be used in the corpus.
Returns
----------
cluster : A fitted `Clusterer`
"""
tree = Clusterer(distance_matrix, linkage=linkage)
tree.cluster(verbose=0)
return tree
def vnc_clustering(distance_matrix, linkage):
"""
Run a the variability-based neighbor clustering (VNC)
on the texts in the corpus. The analysis has the property
that it will respect the order of the texts in the corpus.
Useful for stylochronometry (or any other application where
the order of the texts in relevant).
Will assume that the corpus holds the texts in
the correct chronological order.
Also see: `corpus.temporal_sort()`
The VNC method been described in e.g.:
Gries, S. et al., Variability-based neighbor clustering:
A bottom-up approach to periodization in historical
linguistics, The Oxford Handbook of the History of
English, OUP, 2012.
Parameters
----------
distance_matrix : 2D-array, [n_texts, n_texts]
A square distance table holding all the
pairwise distance calculations.
linkage : string
The linkage function to be used in the corpus.
Returns
----------
cluster : A fitted `VNClusterer`
"""
tree = VNClusterer(distance_matrix, linkage=linkage)
tree.cluster(verbose=0)
return tree
def bootstrapped_distance_matrices(corpus, n_iter=100, random_prop=0.50,
metric='manhattan', random_state=1985):
dms = []
try:
X = corpus.vectorizer.X
try:
X = X.toarray()
except AttributeError:
pass
except AttributeError:
ValueError('Your corpus does not seem to have been vectorized yet.')
full_size = X.shape[1]
bootstrap_size = int(full_size*float(random_prop))
# set random state for replicability:
np.random.seed(random_state)
for i in range(n_iter):
rnd_indices = np.random.randint(low=0, high=full_size, size=bootstrap_size)
bootstrap_matrix = X[:,rnd_indices]
dms.append(distance_matrix(X=bootstrap_matrix, metric=metric))
return dms
def bootstrap_consensus_tree(corpus, trees=[], consensus_level=0.5):
tmp_dir = mkdtemp()
for idx, tree in enumerate(trees):
t = tree.dendrogram.to_ete(labels=corpus.titles)
t.write(outfile=tmp_dir+'/tree_'+str(idx)+'.newick')
trees = []
tns = dendropy.TaxonNamespace(corpus.titles, label="label")
for filename in glob.glob(tmp_dir+'/*.newick'):
tree = dendropy.Tree.get(path=filename,
schema='newick',
preserve_underscores=True,
taxon_namespace=tns)
trees.append(tree)
tsum = TreeSummarizer(support_as_labels=True,
support_as_edge_lengths=False,
support_as_percentages = True,
add_node_metadata = True,
weighted_splits = True)
taxon_namespace = trees[0].taxon_namespace
split_distribution = dendropy.SplitDistribution(taxon_namespace=taxon_namespace)
tsum.count_splits_on_trees(trees,
split_distribution=split_distribution,
is_bipartitions_updated=False)
tree = tsum.tree_from_splits(split_distribution,
min_freq=consensus_level,
rooted=False,
include_edge_lengths=False) # this param is crucial
ete_tree = EteTree(tree.as_string("newick").replace('[&U] ', '')+';')
return ete_tree
| {
"content_hash": "345fa282833a3abea7f28fd500d09537",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 84,
"avg_line_length": 33.76538461538462,
"alnum_prop": 0.6361772411436383,
"repo_name": "mikekestemont/PyStyl",
"id": "349707421ac161ed3709549685f6ed6a02f591b5",
"size": "8803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pystyl/analysis.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41"
},
{
"name": "HTML",
"bytes": "1666553"
},
{
"name": "JavaScript",
"bytes": "1800"
},
{
"name": "Jupyter Notebook",
"bytes": "326808"
},
{
"name": "Python",
"bytes": "93651"
}
],
"symlink_target": ""
} |
def polygon(canvas, points, line_width, line_color, fill_color):
""" Add a polygon item on the canvas """
canvas.create_polygon(points, width = int(line_width),
outline = line_color, fill = fill_color)
| {
"content_hash": "ad5348ce04c06d19db9c36a6418a8323",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 40.5,
"alnum_prop": 0.6008230452674898,
"repo_name": "jem-gh/STplayer",
"id": "c0a159a1bffdf32975a55c796e936d202944de8c",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplegui2tkinter_API/STmodules/STdrawing/STpolygon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49848"
}
],
"symlink_target": ""
} |
import billboard
import unittest
from nose.tools import raises
from requests.exceptions import ConnectionError
import six
class MiscTest(unittest.TestCase):
@raises(ConnectionError)
def testTimeout(self):
"""Checks that using a very small timeout prevents connection."""
billboard.ChartData("hot-100", timeout=1e-9)
@raises(billboard.BillboardNotFoundException)
def testNonExistentChart(self):
"""Checks that requesting a non-existent chart fails."""
billboard.ChartData("does-not-exist")
def testUnicode(self):
"""Checks that the Billboard website does not use Unicode characters."""
chart = billboard.ChartData("hot-100", date="2018-01-27")
self.assertEqual(
chart[97].title, six.text_type("El Bano")
) # With Unicode this should be "El Baño"
def testDifficultTitleCasing(self):
"""Checks that a difficult chart title receives proper casing."""
chart = billboard.ChartData("greatest-r-b-hip-hop-songs")
self.assertEqual(chart.title, "Greatest of All Time Hot R&B/Hip-Hop Songs")
| {
"content_hash": "c09ac92c63597b78abe645eb5070164c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 38.310344827586206,
"alnum_prop": 0.6921692169216922,
"repo_name": "guoguo12/billboard-charts",
"id": "3c9923c76cd6c885ce5df3f8912a5f86a243295e",
"size": "1137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24764"
},
{
"name": "Shell",
"bytes": "95"
}
],
"symlink_target": ""
} |
from __future__ import division
__author__ = 'Alejandro Hdz. Cruz'
# Imports
import csv
import random
import numpy as np # install in your computer: http://stackoverflow.com/questions/1273203/cant-import-numpy-in-python
# ====== Declare global variables ======
# === Static constants ===
# change to test different simulation results
eDividedByK = .001 # to answer question 1 {.001, .01, .1}
bufferSizeInPackages = 3 # to answer question 1 {50, 100, 250 & 500 } (n)
frameMinSuccessRate = .9 # to answer question 1 {.9, .95, .99}
probServerSaturationLimit = .01 # to answer question 2 {.05, .01, .001}
fixedUsers = 15 # to answer question 2 {5, 10, 15, 20}
# fixed
simulationTime = 30 # Time the simulation will run
Lambda = 0.5 # rate of requests entering the system every second (1/tp)
requestArrivalTime = 1/Lambda # Average time between any user request (tp = 2)
waitTimeMax = 1 # U. 1 second in seconds
framesTobeServedPerUser = 2000 # number of frames that every user receives
throughputPackages = 4500 # rate of packages leaving the buffer every second (Miu)
packageLeavesBufferTime = round(1/throughputPackages, 6) # Average time in which a package leaves the buffer(.000222)
randomFrameRangeMin = 10 # Used to choose the start of the frames to be sent to any user (randrange)
randomFrameRangeMax = 87997 # Used to choose the end of the frames to be sent to any user (randrange)
wifiUserLimit = 256 #
fps = 24.0 # Each video runs at 24 frames per second (frame request per user)
videoRatePerUser = round(1/fps, 6) # Each user needs one frame 24 times per second
bandwidthMaxInPackages = 4500
# UPDATE: the limitation of 100 fps was removed of the simulation
# frameLeavesApplicationLayer = .01 # r
# throughputFrames = 1/frameLeavesApplicationLayer # number of frames going out of the application layer (1/r= 100)
# === Variables ===
# time
time = 0 # current simulation time in seconds
serviceTimePackage = 0 # total service time in seconds (occupation time)
serviceTimeFrame = 0 # total service time in seconds (occupation time)
serviceTimeUser = 0 # total service time in seconds (occupation time)
# users
usersInSystem = 0 # number of users currently in the system
usersServed = 0 # users whose 2000 frames came through the system
usersSuccess = 0 # number of users that were well served by the system
usersFailed = 0 # users that received a corrupt video
usersAcceptedInBuffer = 0 #
usersRejectedFromBuffer = 0 #
usersDelivered = 0 #
usersNotDelivered = 0 # these users received a corrupt video (without 1 or more packages)
usersBeingServed = 0 # users being served in any moment (max 256)
currentUserIndex = 0 # position that the current user has in the users/petitions array
servedFramesOfCurrentUser = 0 #
lu = 0 # average of users in system
# frames
framesInSystem = 0 # number of frames currently in the system
framesServed = 0 # frames that went out of the system
framesSuccess = 0 # frames that were successfully served through the system
framesFailed = 0 # frames that failed due to waiting or e
framesAcceptedInBuffer = 0 # frames that waited less than 1 second
framesRejectedFromBuffer = 0 # frames that waited more than 1 second
framesDelivered = 0 #
framesNotDelivered = 0 #
currentFrameSize = 0 #
currentFrameIndex = 0 # id of the frame being sent to the user
currentFrameStreamingIndex = 0 # position of the original frame sequence being streamed to the current user
numPackagesOfCurrentFrame = 0 #
servedPackagesOfCurrentFrame = 0 #
lastFrameServed = -1 #
lf = 0 # average of frames in system
# packages
packagesInSystem = 0 # number of packages currently in the system
packagesServed = 0 # packages that went out of the system
packagesSuccess = 0 # packages that were successfully served through the system
packagesFailed = 0 # packages that failed to be delivered or accedpted in the system
packagesAcceptedInBuffer = 0
packagesRejectedFromBuffer = 0
packagesDelivered = 0 # made it to their destiny
packagesNotDelivered = 0 # failed because of e
# packagesInBuffer = 0 # number of packages waiting to go out of the system
packagesPendingForCurrentFrame = 0 #
currentPackageIndex = 0 #
currentPackageSize = 0 #
lastPackageServed = -1 #
lp = 0 # average of packages in system
# probabilities
G = 0 # probability of buffer saturation
e = eDividedByK*usersInSystem # probability of package not arriving its destination
randomE = 0 # to compare e with and determine if a package arrives successfully to client
# delays
delay = 0 # total waiting time: timeInBuffer + amazonCurrentDelay
timeInBuffer = 0 # time between arrival and departure of the current package
amazonCurrentDelay = 0 #
amazonDelayIndexLimit = 52686 # determined from the csv file provided
amazonCurrentDelayIndex = 0 # used to read the delay from the delays array
# utilization
utilizationUser = 0 # percentage of utilization of the system
utilizationFrame = 0 # percentage of utilization of the system
utilizationPackage = 0 # percentage of utilization of the system
# bandwidth
currentBandwidth = 0 # bits coming out of the system
# === Arrays ===
# probabilities
probabilityServerSaturationArr = [] # G
probabilityPackageSendFailArr = [] # e
# users
arrivalTimeUsersArr = [] # time in which every request that reaches the system arrived
departureTimeUsersArr = [] # time in which every request that left the system
usersInSystemArr = [] # history of the number of users in the system
usersBeingServedArr = [] # history of the users going out of the system (not waiting in buffer)
startStreamingPositionPerUser = [] # in which position of the framesArray every user starts
framesDeliveredPerUserArr = [] # count of how many frames have been sent to every user, up to 2000
currentFramePerUserIndex = [] # to know which frame is being served to every user
usersAcceptedInBufferArr = [] # accepted or rejected
usersFullyDeliveredArr = [] # yes or no, depending on it's packages e check
# frames
arrivalTimeFramesArr = [] # time in which every frame entered the system
departureTimeFramesArr = [] # time in which every frame left the system
framesArray = [] # sequence of the frames to be served
framesOriginalArray = [] # all the video frames that can be served (from the .csv file)
framesInSystemArr = [] # history of the number of frames in the system
# framesServed = [] # frames that came out of the system
frameSizeArr = []
frameOwnersArr = [] # list of users by frame in the system (owner)
packagesPerFrameArr = [] # history of how many packages has every frame produced
packagesServedPerFrameArr = [] # to know hoy many packages of each frame have left the system
framesAcceptedInBufferArr = [] # to save which frames were taken or rejected of the buffer
framesFullyDeliveredArr = [] # yes or no, depending on the error e of its packages
# packages
arrivalTimePackagesArr = [] # time in which every package entered the system
departureTimePackagesArray = [] # time in which every package left the system
packagesArray = [] # Sequence of the packages
packagesInSystemArr = [] # history of the number of packages in the system
packagesAcceptedInBufferArr = [] # accepted or rejected, depending on the error e
packagesDeliveryStatusArr = [] # delivered or failed
packageSizeArr = [] #
packageOwnersArr = [] # user that will needs every package
frameOfEveryPackage = [] # to know of which frame every package is part of
# delay
delayArr = [] # history of the # total waiting time: timeInBuffer + amazonCurrentDelay
timeInBufferPerPackageArr = [] # history of how many seconds ech petition waited in buffer
amazonDelaysArr = [] # Amazon delays from the .csv file
# bandwidth
bandwidthArr = [] # bits transferred every second
# Read Amazon delay data from csv file
delaysReader = csv.reader(open('AmazonS3_delays-Ag-15.csv', 'rb'), delimiter= ',', quotechar='"')
for i in delaysReader:
amazonDelaysArr.append(i)
framesReader = csv.reader(open('Terse_DieHardIII.csv', 'rb'), delimiter= ',', quotechar='"')
for i in framesReader:
framesOriginalArray.append(i)
# ====== Main ======
while time < simulationTime:
# save system's status (saving this might be too much data and we should save it every .01 seconds)
usersInSystemArr.append(usersInSystem)
usersBeingServedArr.append(usersBeingServed)
framesInSystemArr.append(framesInSystem)
packagesInSystemArr.append(packagesInSystem)
bandwidthArr.append(currentBandwidth)
probabilityServerSaturationArr.append(G)
probabilityPackageSendFailArr.append(e)
timeInBufferPerPackageArr.append(timeInBuffer)
delayArr.append(delay)
time += 0.000001
time = round(time, 6)
# define amazonCurrentDelay
amazonCurrentDelay = amazonDelaysArr[amazonCurrentDelayIndex]
# === five users arrive ===
if (time*1000000) - (requestArrivalTime*1000000) == 0:
# one user enters the system every 2 seconds
i = 0
while i < fixedUsers:
usersInSystem += 1
print time, ": user added. Total: ", usersInSystem
arrivalTimeUsersArr.append(time)
usersFullyDeliveredArr.append("no")
usersAcceptedInBufferArr.append("-")
departureTimeUsersArr.append("-")
framesDeliveredPerUserArr.append(0)
startStreamingPositionPerUser.append(random.randrange(randomFrameRangeMin, randomFrameRangeMax))
currentFramePerUserIndex.append(0)
i += 1
# === new frames needed ===
elif ((time*1000000) % (videoRatePerUser*100000) < 0.000001) and (usersInSystem > 0):
# every 1/24 seconds, every user asks for a frame
currentUserIndex = 0
if usersBeingServed < wifiUserLimit: # serve only 256 users
# find next user that needs one of her 2000 frames
for index, item in enumerate(framesDeliveredPerUserArr):
if item <= framesTobeServedPerUser and index > currentUserIndex:
currentUserIndex = index
# get index of next currentFrame for current user
# nextFrameForThisUser = framesOriginalArray[currentFramePerUserIndex[currentUserIndex] + startStreamingPositionPerUser[currentUserIndex]]
# this shouldn't fail because the randomFrameRangeMax is the frame sequence - 2000
currentFrameStreamingIndex = currentFramePerUserIndex[currentUserIndex]
if currentFrameStreamingIndex == 0:
currentFrameStreamingIndex += startStreamingPositionPerUser[currentUserIndex]
else:
currentFrameStreamingIndex += 1
# save new currentFrameIndex for this user
currentFramePerUserIndex[currentUserIndex] = currentFrameStreamingIndex
# add frame to the system
currentFrameSize = framesOriginalArray[currentFrameStreamingIndex]
currentFrameSize = np.int32(currentFrameSize)
arrivalTimeFramesArr.append(time)
departureTimeFramesArr.append("-")
packagesServedPerFrameArr.append(0)
framesFullyDeliveredArr.append("no")
frameOwnersArr.append(currentUserIndex)
frameSizeArr.append(currentFrameSize)
print time, ": CurrentFrameSize: ", currentFrameSize
usersBeingServed += 1
# divide frame in packages
while currentFrameSize >= 1500:
# subdivide frames into packages
currentFrameSize -= 1500
numPackagesOfCurrentFrame += 1
if currentFrameSize > 0:
numPackagesOfCurrentFrame += 1
packagesPerFrameArr.append(numPackagesOfCurrentFrame)
packs = 0
# save data for n-1 packages of the frame
while packs < (numPackagesOfCurrentFrame-1):
packageSizeArr.append(1500)
frameOfEveryPackage.append(currentFrameIndex)
packageOwnersArr.append(currentUserIndex)
arrivalTimePackagesArr.append(time)
departureTimePackagesArray.append("-")
packagesDeliveryStatusArr.append("-")
packs += 1
# save data for last package of the frame
packageSizeArr.append(currentPackageSize)
frameOfEveryPackage.append(currentFrameIndex)
packageOwnersArr.append(currentUserIndex)
arrivalTimePackagesArr.append(time)
departureTimePackagesArray.append("-")
packagesDeliveryStatusArr.append("-")
if delay > 1 and packagesInSystem+numPackagesOfCurrentFrame < bufferSizeInPackages:
# reject the frame
print time, ": Frame rejected. Total: ", framesInSystem, "..."
framesFailed += 1
framesRejectedFromBuffer += 1
framesAcceptedInBufferArr.append("rejected")
usersAcceptedInBufferArr[currentUserIndex] = "rejected"
rej = 0
# mark all the packages of this frame as rejected and failed
while rej < numPackagesOfCurrentFrame:
print time, ":", numPackagesOfCurrentFrame, " Packages rejected. Total: ", packagesInSystem
packagesRejectedFromBuffer += 1
packagesAcceptedInBufferArr.append("rejected")
rej += 1
else:
# accept the frame in the system
framesInSystem += 1
print time, ": Frame added. - Total: ", framesInSystem, "..."
framesAcceptedInBuffer += 1
framesAcceptedInBufferArr.append("accepted")
acc = 0
# mark all the packages of this frame as accepted and in the system
packagesInSystem += numPackagesOfCurrentFrame
print time, ":", numPackagesOfCurrentFrame, " packages added. Total: ", packagesInSystem
while acc < numPackagesOfCurrentFrame:
packagesAcceptedInBuffer += 1
packagesAcceptedInBufferArr.append("accepted")
packagesArray.append(currentPackageIndex)
acc += 1
# === package leaves ===
elif (time*1000000) % (packageLeavesBufferTime*1000000) < .000001 and packagesInSystem > 0:
# one package leaves the buffer every .000222 seconds
currentBandwidth = 0
isInSys = False
inx = 0
# find next package in system
while isInSys is False:
packageInSystemStatus = packagesAcceptedInBufferArr[inx]
if packageInSystemStatus is "accepted":
currentPackageIndex = inx
isInSys = True
# Compute bandwidth
currentBandwidth = 1/bandwidthMaxInPackages # since only one package is being served
inx += 1
# remove package from system
departureTimePackagesArray[currentPackageIndex] = time
packagesInSystem -= 1
print time, ": Package served. Total: ", packagesInSystem
packagesServed += 1
packagesAcceptedInBufferArr[currentPackageIndex] = "served" # package was served
# we need cpi to get arrivalTime of package that entered the system
cpi = 0
for index, package in enumerate(packagesArray):
if package == currentPackageIndex:
currentUserIndex = index
cpi = index
# calculate service time
serviceTimePackage += time - arrivalTimePackagesArr[cpi]
servedPackagesOfCurrentFrame = packagesServedPerFrameArr[currentFrameIndex]
servedPackagesOfCurrentFrame += 1
packagesServedPerFrameArr[currentFrameIndex] = servedPackagesOfCurrentFrame
# get this package's user and frame
currentFrameIndex = frameOfEveryPackage[currentPackageIndex]
currentUserIndex = packageOwnersArr[currentPackageIndex]
numPackagesOfCurrentFrame = np.int32(packagesPerFrameArr[currentFrameIndex])
# compute waiting time in Buffer/System
timeInBuffer = 1/(4500 - numPackagesOfCurrentFrame)
delay = timeInBuffer + np.float64(amazonCurrentDelay)
# delay = departureTimePackagesArray[currentPackageIndex] - arrivalTimePackagesArr[currentPackageIndex]
# delay += amazonCurrentDelay
# compute and check e
e = eDividedByK*usersInSystem
randomE = round(random.uniform(0, 1), 4) # probability with 4 decimals
if randomE > e:
packagesDeliveryStatusArr[currentPackageIndex] = "delivered"
packagesDelivered += 1
packagesSuccess += 1
# tag package's frame
if framesFullyDeliveredArr[currentFrameIndex] is not "no":
framesFullyDeliveredArr[currentFrameIndex] = "yes"
# tag package's user
if usersFullyDeliveredArr[currentUserIndex] is not "no":
usersFullyDeliveredArr[currentUserIndex] = "yes"
else:
packagesDeliveryStatusArr[currentPackageIndex] = "failed"
packagesFailed += 1
packagesNotDelivered += 1
# tag package's frame
framesFullyDeliveredArr[currentFrameIndex] = "no"
# tag package's user
usersFullyDeliveredArr[currentUserIndex] = "no"
# === Follow-up of the packages->frames->users final departure times ===
# When all the packages of one frame are served
if servedPackagesOfCurrentFrame == packagesPerFrameArr[currentFrameIndex]:
# remove frames from the system
framesInSystem -= 1
print time, ": Frame served. Total: ", framesInSystem
departureTimeFramesArr.append(time)
serviceTimeFrame += time - arrivalTimeFramesArr[currentFrameIndex]
# up-count the good frames
framesServed += 1
if framesFullyDeliveredArr[currentFrameIndex] is "yes":
framesDelivered += 1
framesSuccess += 1
else:
framesFailed += 1
framesNotDelivered += 1
# tag user's delivery status
usersFullyDeliveredArr[currentUserIndex] = "no"
fdpu = framesDeliveredPerUserArr[currentUserIndex]
fdpu += 1
framesDeliveredPerUserArr[currentUserIndex] = fdpu
# When the system has already sent 2000 frames to the current user
if fdpu == framesTobeServedPerUser:
print time, ": User served. Total: ", usersInSystem
# remove user from system
usersServed += 1
usersInSystem -= 1
departureTimeUsersArr[currentUserIndex] = time
# compute service time
serviceTimeUser += time - arrivalTimeUsersArr[currentUserIndex]
# tag user's delivery status
if usersFullyDeliveredArr[currentUserIndex] != "no":
usersFullyDeliveredArr[currentUserIndex] = "yes"
usersSuccess += 1
else:
usersFailed += 1
if usersAcceptedInBufferArr[currentUserIndex] != "rejected":
usersAcceptedInBufferArr[currentUserIndex] = "accepted"
usersAcceptedInBuffer += 1
# Compute G
if packagesServed > 0:
G = packagesFailed/packagesServed
if G > probServerSaturationLimit:
bufferSizeInPackages += 1
if amazonCurrentDelay < amazonDelayIndexLimit:
amazonCurrentDelayIndex += 1
else:
amazonCurrentDelayIndex = 0
# ====== Compute L & U ======
# average of objects in the system
lp = (packagesServed+packagesInSystem)/time
lf = (framesServed+framesInSystem)/time
lu = (usersServed+usersInSystem)/time
utilizationPackage = serviceTimePackage/time
utilizationFrame = serviceTimeFrame/time
utilizationUser = serviceTimeUser/time
# ======= Print all csv files ======
rows = []
with open("question2.csv", "w") as a:
writerr = csv.writer(a)
row = [bufferSizeInPackages]
rows.append(row)
writerr.writerows(rows)
with open("general_info.csv", "w") as f:
writer = csv.writer(f)
row = [time, usersInSystem, usersServed, usersSuccess, usersFailed, usersAcceptedInBuffer, usersRejectedFromBuffer, usersDelivered, usersNotDelivered, usersBeingServed, lu, lf, lp, utilizationPackage, utilizationFrame, utilizationUser]
rows.append(row)
writer.writerows(rows)
with open("users.csv", "w") as g:
writer0 = csv.writer(g)
for index, t in enumerate(arrivalTimeUsersArr):
arrTUsr = arrivalTimeUsersArr[index]
depTUsr = departureTimeUsersArr[index]
startStr = startStreamingPositionPerUser[index]
frDeliv = framesDeliveredPerUserArr[index]
currFr = currentFramePerUserIndex[index]
accBuff = usersAcceptedInBufferArr[index]
fullDeliv = usersFullyDeliveredArr[index]
row = [arrTUsr, depTUsr, startStr, frDeliv, currFr, accBuff, fullDeliv]
rows.append(row)
writer0.writerows(rows)
with open("frames.csv", "w") as h:
writer1 = csv.writer(h)
for index, t in enumerate(arrivalTimeFramesArr):
arrTFr = arrivalTimeFramesArr[index]
depTFr = departureTimeFramesArr[index]
# frameIndx = framesArray[index]
frSize = frameSizeArr[index]
owner = frameOwnersArr[index]
accBuff = framesAcceptedInBufferArr[index]
fullDeliv = framesFullyDeliveredArr[index]
packsFr = packagesPerFrameArr[index]
packsServFr = packagesServedPerFrameArr[index]
row = [arrTFr, depTFr, frSize, owner, accBuff, fullDeliv, packsFr, packsServFr]
rows.append(row)
writer1.writerows(rows)
with open("packages.csv", "w") as i:
writer2 = csv.writer(i)
for index, t in enumerate(arrivalTimePackagesArr):
arrTPack = arrivalTimePackagesArr[index]
depTPack = departureTimePackagesArray[index]
# packageIndx = packagesArray[index]
packSize = packageSizeArr[index]
owner = packageOwnersArr[index]
accBuff = packagesAcceptedInBufferArr[index]
deliverySts = packagesDeliveryStatusArr[index]
row = [arrTPack, depTPack, packSize, owner, accBuff, deliverySts]
rows.append(row)
writer2.writerows(rows)
with open("every_microsecond_data.csv", "w") as j:
writer3 = csv.writer(j)
for index, i in enumerate(usersInSystemArr):
uinSys = usersInSystemArr[index]
uBingServed = usersBeingServedArr[index]
frInSys = framesInSystemArr[index]
packInSys = packagesInSystemArr[index]
probG = probabilityServerSaturationArr[index]
probE = probabilityPackageSendFailArr[index]
tBuff = timeInBufferPerPackageArr[index]
dlay = delayArr[index]
bn = bandwidthArr[index]
row = [uinSys, uBingServed, frInSys, packInSys, probG, probE, tBuff, dlay, bn]
rows.append(row)
writer3.writerows(rows)
print "END in time: ", time
| {
"content_hash": "38cd3e2cfb0bc2e25538fc2209d9e76e",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 239,
"avg_line_length": 45.23076923076923,
"alnum_prop": 0.6531027044964327,
"repo_name": "AlejandroHCruz/VideoServerNURXSimulation",
"id": "bf6e760256570a80b4674c24f03d80de80b6c138",
"size": "24108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Main2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19663"
},
{
"name": "Java",
"bytes": "1377"
},
{
"name": "Python",
"bytes": "106400"
}
],
"symlink_target": ""
} |
"""Unittest for ciscoasa acl rendering module."""
from absl.testing import absltest
from unittest import mock
from capirca.lib import ciscoasa
from capirca.lib import naming
from capirca.lib import policy
GOOD_HEADER = """
header {
comment:: "this is a test acl"
target:: ciscoasa test-filter
}
"""
GOOD_TERM_1 = """
term good-term-1 {
verbatim:: ciscoasa "mary had a little lamb"
verbatim:: iptables "mary had second lamb"
verbatim:: juniper "mary had third lamb"
}
"""
GOOD_TERM_2 = """
term good-term-2 {
verbatim:: ciscoasa "mary had a little lamb"
policer:: batman
}
"""
SUPPORTED_TOKENS = {
'action',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'expiration',
'icmp_type',
'stateless_reply',
'logging',
'name',
'option',
'owner',
'platform',
'platform_exclude',
'protocol',
'source_address',
'source_address_exclude',
'source_port',
'translated',
'verbatim',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny', 'reject', 'next',
'reject-with-tcp-rst'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request', 'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request', 'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
'option': {'established', 'tcp-established'}}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class CiscoASATest(absltest.TestCase):
def setUp(self):
super().setUp()
self.naming = mock.create_autospec(naming.Naming)
def testBuildTokens(self):
pol1 = ciscoasa.CiscoASA(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_1,
self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
def testBuildWarningTokens(self):
pol1 = ciscoasa.CiscoASA(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2,
self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEqual(st, SUPPORTED_TOKENS)
self.assertEqual(sst, SUPPORTED_SUB_TOKENS)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "992e1fcff545e4455d322f813a632a4f",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 74,
"avg_line_length": 27.015384615384615,
"alnum_prop": 0.6173120728929385,
"repo_name": "google/capirca",
"id": "fb9b48527edf5f4e5a52f7e78fb391a41b7781e2",
"size": "4109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/ciscoasa_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "368"
},
{
"name": "Dockerfile",
"bytes": "169"
},
{
"name": "Perl",
"bytes": "1590"
},
{
"name": "Python",
"bytes": "1778745"
},
{
"name": "Shell",
"bytes": "2576"
}
],
"symlink_target": ""
} |
import sys
import os.path
sys.path.append(os.path.abspath('../../../'))
from distutils.core import setup, Extension
from src.util.FileSystem import FileSystem
extDir = FileSystem.getExtDir()
jsonPath = os.path.join(extDir, 'SimpleJSON-master/src')
jsonObj1Path = os.path.join(extDir, 'SimpleJSON-master/obj/JSON.o')
jsonObj2Path = os.path.join(extDir, 'SimpleJSON-master/obj/JSONValue.o')
module1 = Extension(
'PyMatch',
sources = ['PyMatch.cpp', 'Match.cpp'],
extra_objects = [jsonObj1Path, jsonObj2Path],
include_dirs = ['.', jsonPath],
library_dirs = [],
libraries = [],
extra_compile_args = ['-fPIC']
)
setup (name = 'PyMatch',version = '0.1',description = 'Matches octave code.',ext_modules = [module1], packages = [])
| {
"content_hash": "7964071f501f3b43e08d1dec962a4143",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 116,
"avg_line_length": 31.708333333333332,
"alnum_prop": 0.6819973718791065,
"repo_name": "tanonev/codewebs",
"id": "bb3dc286e605f51deb611e51456adb8576cf5005",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/matching/pythonMatching/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "790"
},
{
"name": "C++",
"bytes": "301221"
},
{
"name": "Java",
"bytes": "479184"
},
{
"name": "Makefile",
"bytes": "5459"
},
{
"name": "Matlab",
"bytes": "50455"
},
{
"name": "Python",
"bytes": "230306"
},
{
"name": "Shell",
"bytes": "13311"
}
],
"symlink_target": ""
} |
from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(config['directory'].get(),
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
| {
"content_hash": "d0208a0cb5a3f3edcaea3afe549b175c",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 30.85148514851485,
"alnum_prop": 0.6267650834403081,
"repo_name": "multikatt/beets",
"id": "a85bff6b50e937860f0b8aabab19c8d00fb6bade",
"size": "3116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beetsplug/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85938"
},
{
"name": "Python",
"bytes": "1488443"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
import datetime
import warnings
from contextlib import suppress
from typing import Optional, Union
import numpy as np
import pandas as pd
from pydantic import PositiveInt, confloat, validate_arguments
from toolbox_utils import tsutils
from tstoolbox import tstoolbox
from typing_extensions import Literal
from . import tdew as tdew_melo
from .melodist.melodist.humidity import (
calculate_month_hour_precip_mean,
disaggregate_humidity,
)
from .melodist.melodist.radiation import disaggregate_radiation
from .melodist.melodist.temperature import disaggregate_temperature, get_shift_by_data
from .melodist.melodist.util.util import (
calculate_mean_daily_course_by_month,
get_sun_times,
)
from .melodist.melodist.wind import disaggregate_wind
@tsutils.transform_args(source_units=tsutils.make_list, target_units=tsutils.make_list)
@validate_arguments
def single_target_units(source_units, target_units, default=None, cnt=1):
if default is None:
return source_units
if target_units is None:
return [default] * len(source_units)
tunits = set(target_units)
if len(tunits) != cnt:
raise ValueError(
tsutils.error_wrapper(
f"""
Since creating a single disaggregated time-series there can
only be a single "target_units". You gave "{target_units}".
"""
)
)
if len(source_units) == len(target_units):
return target_units
return [target_units[0]] * len(source_units)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def temperature(
method: Literal[
"sine_min_max", "sine_mean", "sine", "mean_course_min_max", "mean_course_mean"
],
source_units,
min_max_time: Literal["fix", "sun_loc", "sun_loc_shift"] = "fix",
mod_nighttime: bool = False,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
print_input=False,
target_units=None,
max_delta: bool = False,
temp_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_mean_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
lat: Optional[confloat(ge=-90, le=90)] = None,
lon: Optional[confloat(ge=-180, le=180)] = None,
hourly: Optional[Union[str, pd.Series]] = None,
):
"""Disaggregate daily temperature to hourly temperature."""
target_units = single_target_units(source_units, target_units, "degC")
pd.options.display.width = 60
if (
method in ("mean_course_min_max", "mean_course_mean")
or min_max_time == "sun_loc_shift"
or max_delta
) and hourly is None:
raise ValueError(
tsutils.error_wrapper(
"""
The methods "mean_course_min", "mean_course_mean", or if
`max_delta` is True, or if `min_max_time` is "sun_loc_shift"
require a HOURLY temperature values in the CSV file specified
by the keyword `hourly`.
"""
)
)
if (
method in ("mean_course_min_max", "mean_course_mean")
or min_max_time == "sun_loc_shift"
or max_delta
):
hourly = tstoolbox.read(hourly)
mean_course = calculate_mean_daily_course_by_month(
hourly.squeeze(), normalize=True
)
else:
mean_course = None
if min_max_time == "sun_loc_shift" or max_delta:
max_delta = get_shift_by_data(hourly.squeeze(), lon, lat, round(lon / 15.0))
else:
max_delta = None
if temp_min_col is None or temp_max_col is None:
raise ValueError(
tsutils.error_wrapper(
f"""
For "temperature" disaggregation you need to supply the daily
minimum column (name or number, data column numbering starts at
1) and the daily maximum column (name or number).
Instead `temp_min_col` is {temp_min_col} and `temp_max_col` is
{temp_max_col}
"""
)
)
with suppress(TypeError):
temp_min_col = int(temp_min_col)
with suppress(TypeError):
temp_max_col = int(temp_max_col)
columns = [temp_min_col, temp_max_col]
if temp_mean_col is not None:
with suppress(TypeError):
temp_mean_col = int(temp_mean_col)
columns.append(temp_mean_col)
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
if len(tsd.columns) == 3:
tsd.columns = ("tmin", "tmax", "temp")
else:
tsd.columns = ("tmin", "tmax")
if any((tsd.tmax <= tsd.tmin).dropna()):
raise ValueError(
tsutils.error_wrapper(
f"""
On the following dates:
{tsd[tsd.tmax <= tsd.tmin].index},
minimum temperature values in column "{temp_min_col}" are
greater than or equal to the maximum temperature values in
column "{temp_max_col}".
"""
)
)
if temp_mean_col is None:
warnings.warn(
tsutils.error_wrapper(
"""
Since `temp_mean_col` is None, the average daily temperature
will be estimated by the average of `temp_min_col` and
`temp_max_col`
"""
)
)
tsd["temp"] = (tsd.tmin + tsd.tmax) / 2.0
if any((tsd.tmin >= tsd.temp).dropna()) or any((tsd.tmax <= tsd.temp).dropna()):
raise ValueError(
tsutils.error_wrapper(
f"""
On the following dates:
{tsd[tsd.tmin >= tsd.temp | tsd.tmax <= tsd.temp]},
the daily average is either below or equal to the minimum
temperature in column {temp_min_col} or higher or equal to
the maximum temperature in column {temp_max_col}.
"""
)
)
if min_max_time == "fix":
# Not dependent on sun, just average values.
sun_times = pd.DataFrame(
index=[1], columns=("sunrise", "sunnoon", "sunset", "daylength")
)
sun_times.sunrise = 7
sun_times.sunnoon = 12
sun_times.sunset = 19
sun_times.daylength = 12
elif lat is None or lon is None:
raise ValueError(
tsutils.error_wrapper(
f"""
The `min_max_time` options other than "fix" require calculation
of sunrise, sun noon, sunset, and day length. The calculation
requires the latitude with keyword "lat" and longitude with
keyword "lon".
You gave:
lat={lat}
lon={lon}
"""
)
)
else:
sun_times = get_sun_times(tsd.index, float(lon), float(lat), round(lon / 15.0))
ntsd = pd.DataFrame(
disaggregate_temperature(
tsd,
method=method,
min_max_time=min_max_time,
mod_nighttime=mod_nighttime,
max_delta=max_delta,
mean_course=mean_course,
sun_times=sun_times,
)
)
ntsd.columns = [f"temperature:{target_units[0]}:disagg"]
return tsutils.return_input(print_input, tsd, ntsd)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def prepare_hum_tdew(
method: Literal[
"equal",
"minimal",
"dewpoint_regression",
"linear_dewpoint_variation",
"min_max",
"month_hour_precip_mean",
],
source_units,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
hum_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
hum_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
hum_mean_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
precip_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
a0=None,
a1=None,
kr=None,
hourly_temp=None,
hourly_precip_hum=None,
preserve_daily_mean=None,
):
"""Disaggregate daily humidity to hourly humidity data."""
target_units = single_target_units(source_units, target_units, "")
if method == "equal" and hum_mean_col is None:
raise ValueError(
tsutils.error_wrapper(
"""
If `method` is "equal" then the mean daily humidity is
a required column identified with the keyword `hum_mean_col`
"""
)
)
if method == "month_hour_precip_mean" and precip_col is None:
raise ValueError(
tsutils.error_wrapper(
"""
If `method` is "month_hour_precip_mean" then the daily precip
is a required column identified with the keyword
`precip_col`
"""
)
)
if (
method in ("minimal", "dewpoint_regression", "linear_dewpoint_variation")
and temp_min_col is None
):
raise ValueError(
tsutils.error_wrapper(
"""
If `method` is "minimal", "dewpoint_regression", or
"linear_dewpoint_variation" then the minimum daily temperature
is a required column identified with the keyword
`temp_min_col`.
"""
)
)
if method == "min_max" and (
hum_min_col is None
or hum_max_col is None
or temp_min_col is None
or temp_max_col is None
):
raise ValueError(
tsutils.error_wrapper(
f"""
If `method` is "min_max" then:
Minimum daily humidity is a required column identified with the
keyword `hum_min_col`. You gave {hum_min_col}.
Maximum daily humidity is a required column identified with the
keyword `hum_max_col`. You gave {hum_max_col}.
Minimum daily temperature is a required column identified with
the keyword `temp_min_col`. You gave {temp_min_col}.
Maximum daily temperature is a required column identified with
the keyword `temp_max_col`. You gave {temp_max_col}.
"""
)
)
if method in ("dewpoint_regression", "linear_dewpoint_variation") and (
a0 is None or a1 is None
):
raise ValueError(
tsutils.error_wrapper(
"""
If `method` is "dewpoint_regression" or
"linear_dewpoint_variation" then a0 and a1 must be given.
"""
)
)
if method == "linear_dewpoint_variation" and kr is None:
raise ValueError(
tsutils.error_wrapper(
"""
If `method` is "linear_dewpoint_variation" then kr must be
given
"""
)
)
if (
method
in (
"minimal",
"dewpoint_regression",
"linear_dewpoint_variation",
"min_max",
)
and hourly_temp is None
):
raise ValueError(
tsutils.error_wrapper(
"""
If `method` is "minimal", "dewpoint_regression",
"linear_dewpoint_variation", or "min_max" then hourly
temperature is required identified by the filename in keyword
`hourly_temp`.
"""
)
)
pd.options.display.width = 60
columns = []
if method == "equal":
with suppress(TypeError):
hum_mean_col = int(hum_mean_col)
columns.append(hum_mean_col)
if method == "min_max":
with suppress(TypeError):
temp_min_col = int(temp_min_col)
columns.append(temp_min_col)
with suppress(TypeError):
temp_max_col = int(temp_max_col)
columns.append(temp_max_col)
with suppress(TypeError):
hum_min_col = int(hum_min_col)
columns.append(hum_min_col)
with suppress(TypeError):
hum_max_col = int(hum_max_col)
columns.append(hum_max_col)
if method in ("minimal", "dewpoint_regression", "linear_dewpoint_variation"):
with suppress(TypeError):
temp_min_col = int(temp_min_col)
columns.append(temp_min_col)
if method == "month_hour_precip_mean":
with suppress(TypeError):
precip_col = int(precip_col)
columns.append(precip_col)
if preserve_daily_mean is not None and method in (
"minimal",
"dewpoint_regression",
"linear_dewpoint_variation",
"min_max",
"month_hour_precip_mean",
):
with suppress(TypeError):
hum_mean_col = int(preserve_daily_mean)
columns.append(hum_mean_col)
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
if method == "equal":
tsd.columns = ["hum"]
if preserve_daily_mean is not None:
if method in ("minimal", "dewpoint_regression", "linear_dewpoint_variation"):
tsd.columns = ("tmin", "hum")
if method == "min_max":
tsd.columns = ("tmin", "tmax", "hum_min", "hum_max", "hum")
elif method == "month_hour_precip_mean":
tsd.columns = ("precip", "hum")
preserve_daily_mean = True
else:
if method in ("minimal", "dewpoint_regression", "linear_dewpoint_variation"):
tsd.columns = "tmin"
if method == "min_max":
tsd.columns = ("tmin", "tmax", "hum_min", "hum_max")
elif method == "month_hour_precip_mean":
tsd.columns = "precip"
if method in (
"minimal",
"dewpoint_regression",
"linear_dewpoint_variation",
"min_max",
):
hourly_temp = tstoolbox.read(hourly_temp)
hourly_temp = hourly_temp.astype(float).squeeze()
if method == "month_hour_precip_mean":
hourly_precip_hum = tstoolbox.read(hourly_precip_hum)
month_hour_precip_mean = calculate_month_hour_precip_mean(hourly_precip_hum)
else:
month_hour_precip_mean = "None"
return tsd, hourly_temp, month_hour_precip_mean
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def humidity(
method: Literal[
"equal",
"minimal",
"dewpoint_regression",
"linear_dewpoint_variation",
"min_max",
"month_hour_precip_mean",
],
source_units,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
hum_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
hum_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
hum_mean_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
precip_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
a0=None,
a1=None,
kr=None,
hourly_temp=None,
hourly_precip_hum=None,
preserve_daily_mean=None,
):
"""Disaggregate daily humidity to hourly humidity data."""
target_units = single_target_units(source_units, target_units, "")
tsd, hourly_temp, month_hour_precip_mean = prepare_hum_tdew(
method,
source_units,
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
dropna=dropna,
clean=clean,
round_index=round_index,
skiprows=skiprows,
index_type=index_type,
names=names,
target_units=target_units,
print_input=print_input,
hum_min_col=hum_min_col,
hum_max_col=hum_max_col,
hum_mean_col=hum_mean_col,
temp_min_col=temp_min_col,
temp_max_col=temp_max_col,
precip_col=precip_col,
a0=a0,
a1=a1,
kr=kr,
hourly_temp=hourly_temp,
hourly_precip_hum=hourly_precip_hum,
preserve_daily_mean=preserve_daily_mean,
)
ntsd = pd.DataFrame(
disaggregate_humidity(
tsd.astype(float),
method=method,
temp=hourly_temp,
a0=a0,
a1=a1,
kr=kr,
preserve_daily_mean=preserve_daily_mean,
month_hour_precip_mean=month_hour_precip_mean,
)
)
ntsd.columns = ["humidity:{0}:disagg"]
return tsutils.return_input(print_input, tsd, ntsd)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def dewpoint_temperature(
method: Literal[
"equal",
"minimal",
"dewpoint_regression",
"linear_dewpoint_variation",
"min_max",
"month_hour_precip_mean",
],
source_units,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
hum_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
hum_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
hum_mean_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_min_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
temp_max_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
precip_col: Optional[Union[PositiveInt, str, pd.Series]] = None,
a0=None,
a1=None,
kr=None,
hourly_temp=None,
hourly_precip_hum=None,
preserve_daily_mean=None,
):
"""Disaggregate daily humidity to hourly humidity data."""
# target_units = single_target_units(source_units, target_units, "")
target_units = single_target_units(source_units, target_units, "degK")
tsd, hourly_temp, month_hour_precip_mean = prepare_hum_tdew(
method,
source_units,
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
dropna=dropna,
clean=clean,
round_index=round_index,
skiprows=skiprows,
index_type=index_type,
names=names,
target_units=target_units,
hum_min_col=hum_min_col,
hum_max_col=hum_max_col,
hum_mean_col=hum_mean_col,
temp_min_col=temp_min_col,
temp_max_col=temp_max_col,
precip_col=precip_col,
a0=a0,
a1=a1,
kr=kr,
hourly_temp=hourly_temp,
hourly_precip_hum=hourly_precip_hum,
preserve_daily_mean=preserve_daily_mean,
)
ntsd = pd.DataFrame(
tdew_melo.disaggregate_tdew(
tsd.astype(float),
method=method,
temp=hourly_temp,
a0=a0,
a1=a1,
kr=kr,
preserve_daily_mean=preserve_daily_mean,
month_hour_precip_mean=month_hour_precip_mean,
)
)
ntsd.columns = ["dewpoint_temp:degK:disagg"]
ntsd_units = tsutils._normalize_units(
ntsd, source_units="degK", target_units=target_units[0]
)
return tsutils.return_input(print_input, tsd, ntsd)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def wind_speed(
method: Literal["equal", "cosine", "random"],
source_units,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
a=None,
b=None,
t_shift=None,
):
"""Disaggregate daily to hourly data."""
target_units = single_target_units(source_units, target_units, "m/s")
if method == "cosine" and (a is None or b is None or t_shift is None):
raise ValueError(
tsutils.error_wrapper(
f"""
For the "cosine" method, requires the `a`, `b`, and `t_shift`
keywords. You gave:
a = {a}
b = {b}
t_shift = {t_shift}
"""
)
)
if method in ("equal", "random") and not (
a is None or b is None or t_shift is None
):
warnings.warn(
tsutils.error_wrapper(
"""
The a, b, and t_shift options are ignored for the "equal" and
"random" methods.
"""
)
)
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
ndf = pd.DataFrame()
for column_name, column_data in tsd.iteritems():
df = disaggregate_wind(column_data, method=method, a=a, b=b, t_shift=t_shift)
ndf = ndf.join(df, how="outer")
return tsutils.return_input(
print_input,
tsd,
ndf,
)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def radiation(
method: Literal["pot_rad", "pot_rad_via_ssd", "pot_rad_via_bc", "mean_course"],
source_units,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
pot_rad=None,
angstr_a=0.25,
angstr_b=0.5,
bristcamp_a=0.75,
bristcamp_c=2.4,
hourly_rad=None,
lat=None,
lon=None,
glob_swr_col=None,
):
"""Disaggregate daily to hourly data."""
target_units = single_target_units(source_units, target_units, "W/m2")
target_units = target_units[0] * len(source_units)
pd.options.display.width = 60
if method == "mean_course" and hourly_rad is None:
raise ValueError(
tsutils.error_wrapper(
"""
If method is "mean_course" need to supply CSV filename of
hourly radiation by the `hourly_rad` keyword."""
)
)
if method in ("pot_rad", "mean_course") and glob_swr_col is None:
raise ValueError(
tsutils.error_wrapper(
"""
If method is "pot_rad" or "mean_course" need to supply the
daily global short wave radiation as column name or index with
keyword `glob_swr_col`
"""
)
)
if method == "pot_rad_via_bc" and (bristcamp_a is None or bristcamp_c is None):
raise ValueError(
tsutils.error_wrapper(
"""
If method is "pot_rad_via_bc" need to supply the keywords
`bristcamp_a` and `bristcamp_c`.
"""
)
)
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
if method in ("pot_rad", "mean_course"):
with suppress(ValueError):
glob_swr_col = glob_swr_col - 1
tsd["glob"] = tsd[glob_swr_col]
sun_times = None
if method == "pot_rad_via_ssd":
sun_times = get_sun_times(tsd.index, float(lon), float(lat), round(lon / 15.0))
return tsutils.return_input(
print_input,
tsd,
pd.DataFrame(
disaggregate_radiation(
tsd,
sun_times=sun_times,
pot_rad=pot_rad,
method=method,
angstr_a=angstr_a,
angstr_b=angstr_b,
bristcamp_a=bristcamp_a,
bristcamp_c=bristcamp_c,
mean_course=hourly_rad,
)
),
)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def precipitation(
method: Literal["equal", "cascade", "masterstation"],
source_units,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
columns=None,
masterstation_hour_col: Optional[Union[PositiveInt, str]] = None,
):
"""Disaggregate daily to hourly data."""
target_units = single_target_units(source_units, target_units, "mm")
pd.options.display.width = 60
tsd = tsutils.common_kwds(
input_tsd=tsutils.make_list(input_ts),
skiprows=skiprows,
index_type=index_type,
start_date=start_date,
end_date=end_date,
round_index=round_index,
names=names,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
usecols=columns,
)
if method == "masterstation":
try:
# If masterstations_hour_col is a column name:
masterstation_hour_col = tsd.columns.get_loc(masterstation_hour_col)
except KeyError:
# If masterstations_hour_col is a column number:
masterstation_hour_col = int(masterstation_hour_col) - 1
try:
mhour = tsd[masterstation_hour_col].to_frame()
except:
mhour = tsutils.common_kwds(
input_tsd=tsutils.make_list(input_ts),
skiprows=skiprows,
index_type=index_type,
start_date=start_date,
end_date=end_date,
round_index=round_index,
names=names,
dropna=dropna,
clean=clean,
source_units=source_units,
target_units=target_units,
usecols=columns,
)
# Should only be one hourly column in the input.
dsum = mhour.groupby(pd.Grouper(freq="D")).sum().asfreq("H", method="ffill")
master = mhour.join(dsum, rsuffix="sum")
mask = master.iloc[:, 0] > 0.0
master = (
master.loc[mask, master.columns[0]] / master.loc[mask, master.columns[1]]
).to_frame()
print(master)
ntsd = tsd.loc[:, tsd.columns != masterstation_hour_col].asfreq(
"H", method="ffill"
)
print(ntsd)
ntsd = ntsd.join(master)
print(ntsd)
ntsd = ntsd.loc[:, tsd.columns != masterstation_hour_col].multiply(
ntsd.iloc[:, -1:], axis="index"
)
print(ntsd)
sys.exit()
# All the remaining columns are daily.
ntsd = (
tsd.loc[:, tsd.columns != masterstation_hour_col]
.asfreq("H", method="ffill")
.mul(master, axis="rows")
)
return tsutils.return_input(print_input, tsd, ntsd)
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def evaporation(
method: Literal["trap", "fixed"],
source_units,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
target_units=None,
print_input=False,
lat: Optional[confloat(ge=-90, le=90)] = None,
):
"""Disaggregate daily to hourly data."""
target_units = single_target_units(source_units, target_units)
pd.options.display.width = 60
if method == "trap" and lat is None:
raise ValueError(
tsutils.error_wrapper(
f"""
The "trap" method requires latitude with the `lat` keyword.
You gave "{lat}".
"""
)
)
tsd = tsutils.common_kwds(
tsutils.read_iso_ts(
input_ts, skiprows=skiprows, names=names, index_type=index_type
),
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
ntsd = tsd.append(
pd.DataFrame(
columns=tsd.columns, index=[tsd.index[-1] + datetime.timedelta(days=1)]
)
)
ndata = ntsd.resample("H").ffill()
fdata = pd.DataFrame(columns=ndata.columns, index=ndata.index, dtype="f")
if method == "trap":
lrad = lat * np.pi / 180.0
ad = 0.40928 * np.cos(0.0172141 * (172 - tsd.index.dayofyear))
ss = np.sin(lrad) * np.sin(ad)
cs = np.cos(lrad) * np.cos(ad)
x2 = -ss / cs
delt = 7.6394 * (np.pi / 2.0 - np.arctan(x2 / np.square(1 - x2**2)))
sunr = 12.0 - delt / 2.0
# develop hourly distribution given sunrise,
# sunset and length of day (DELT)
dtr2 = delt / 2.0
dtr4 = delt / 4.0
tr2 = sunr + dtr4
tr3 = tr2 + dtr2
tr4 = tr3 + dtr4
for index, toss in enumerate(sunr):
cdate = ntsd.index[index]
fdata.loc[
datetime.datetime(cdate.year, cdate.month, cdate.day, int(sunr[index])),
:,
] = 0.0
fdata.loc[
datetime.datetime(
cdate.year, cdate.month, cdate.day, int(tr4[index]) + 1
),
:,
] = 0.0
fdata.loc[
datetime.datetime(
cdate.year, cdate.month, cdate.day, int(round(tr2[index]))
),
:,
] = 1.0
fdata.loc[
datetime.datetime(
cdate.year, cdate.month, cdate.day, int(round(tr3[index]))
),
:,
] = 1.0
fdata.iloc[0, :] = 0.0
fdata.iloc[-1, :] = 0.0
fdata = fdata.interpolate("linear")
fdata = fdata.fillna(0.0)
fdata = fdata / fdata.groupby(pd.Grouper(freq="D")).sum().resample("H").ffill()
fdata = fdata * ndata
fdata = fdata.iloc[:-1, :]
elif method == "fixed":
# DATA EVAPDIST / 0.000,0.000,0.000,0.000,0.000,0.000,0.019,0.041,
# $ 0.067,0.088,0.102,0.110,0.110,0.110,0.105,0.095,
# $ 0.081,0.055,0.017,0.000,0.000,0.000,0.000,0.000
fdata = fdata.fillna(0.0)
fdata[fdata.index.hour == 7] = 0.019
fdata[fdata.index.hour == 8] = 0.041
fdata[fdata.index.hour == 9] = 0.067
fdata[fdata.index.hour == 10] = 0.088
fdata[fdata.index.hour == 11] = 0.102
fdata[fdata.index.hour == 12] = 0.110
fdata[fdata.index.hour == 13] = 0.110
fdata[fdata.index.hour == 14] = 0.110
fdata[fdata.index.hour == 15] = 0.105
fdata[fdata.index.hour == 16] = 0.095
fdata[fdata.index.hour == 17] = 0.081
fdata[fdata.index.hour == 18] = 0.055
fdata[fdata.index.hour == 19] = 0.017
fdata = fdata * ndata
fdata = fdata.iloc[:-1, :]
return tsutils.print_input(print_input, tsd, fdata, None)
| {
"content_hash": "93828444a31b2513273163dfccb16652",
"timestamp": "",
"source": "github",
"line_count": 1069,
"max_line_length": 88,
"avg_line_length": 30.507015902712816,
"alnum_prop": 0.5537532196737397,
"repo_name": "timcera/mettoolbox",
"id": "5a8c43de32a920c0f60cceb67f50a1892d7bb891",
"size": "32612",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/mettoolbox/disaggregate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1077048"
},
{
"name": "Python",
"bytes": "208651"
}
],
"symlink_target": ""
} |
"""
AUTHOR : Lang
PURPOSE : Multi Self Deep Learning
"""
__author__ = 'Lang'
import tensorflow as tf, sys
import os
import shutil
# change this as you see fit
graph_path_temple = sys.argv[1]
label_path_temple = sys.argv[2]
graph_path = os.path.abspath(graph_path_temple)
label_path = os.path.abspath(label_path_temple)
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile(label_path)]
# Unpersists graph from file
with tf.gfile.FastGFile(graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
count = 0
tracing = open("processing.txt",'w')
tracing.close()
move_directory = "Removed_Pictures"
try:
os.stat(move_directory)
except:
os.mkdir(move_directory)
for image_dir_path in os.listdir('.'):
try:
for image_path in os.listdir(image_dir_path):
try:
# Read in the image_data
image_data = tf.gfile.FastGFile(image_dir_path+'/'+image_path, 'rb').read()
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
if label_lines[top_k[0]] == "no":
shutil.move(image_dir_path+'/'+image_path, move_directory+'/'+image_dir_path + ' ' + image_path)
print('removed picture '+image_path)
else:
print('remain picture '+image_path)
except:
os.remove(image_dir_path+'/'+image_path)
print('removed picture'+image_path)
count = count +1
tracing = open("processing.txt",'a')
tracing.write("finish " + str(count) + " kinds of removing not flower pictures\n")
tracing.close()
except:
print('error:'+ image_dir_path)
tracing = open("processing.txt",'a')
tracing.write("all finished")
tracing.close()
| {
"content_hash": "c119471ee64ea93ba865f91ebbaeff12",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 120,
"avg_line_length": 31.157894736842106,
"alnum_prop": 0.582347972972973,
"repo_name": "HeavenMin/PlantImageRecognition",
"id": "d4d4074133cdcf69d751385a79dbfab1e660de3a",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset Process/Move_Anything_Not_Flower.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "119507"
},
{
"name": "Python",
"bytes": "168234"
}
],
"symlink_target": ""
} |
"""Test parsing of COUNTER 5 TRJ2 report (turnaways)"""
import datetime
def test_metric(trj2_report):
assert trj2_report.metric is None # Multiple metrics per report
def test_type(trj2_report):
assert trj2_report.report_type == u"TR_J2"
def test_data(trj2_report):
i = iter(trj2_report)
row = next(i)
item = next(iter(row))
assert item == (datetime.date(2017, 1, 1), u"Limit_Exceeded", 3)
| {
"content_hash": "a5692d49b355de3d09b2b32424e4cf23",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 23.38888888888889,
"alnum_prop": 0.672209026128266,
"repo_name": "pitthsls/pycounter",
"id": "d7e97bef42ee8a73ced3122621e89a3c51f6cc39",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pycounter/test/counter5/test_trj2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114031"
}
],
"symlink_target": ""
} |
from vispy import testing
from vispy.visuals.graphs.layouts import get_layout
from vispy.visuals.graphs.layouts.networkx_layout import NetworkxCoordinates
import numpy as np
# conditional import
try:
import networkx as nx
except ModuleNotFoundError:
nx = None
def test_networkx_layout_with_graph():
"""
Testing the various inputs to the networkx layout
"""
settings = dict(name="networkx_layout")
if nx:
# empty input
# testing.assert_raises(ValueError("Requires networkx input"), get_layout(**settings))
# define graph
graph = nx.complete_graph(5)
# define positions
layout = np.random.rand(5, 2)
settings['graph'] = graph
settings['layout'] = layout
# test numpy array input
testing.assert_true(isinstance(
get_layout(**settings), NetworkxCoordinates))
# testing string input
settings['layout'] = 'circular'
testing.assert_true(isinstance(
get_layout(**settings), NetworkxCoordinates))
# testing dict input
settings['layout'] = nx.circular_layout(graph)
testing.assert_true(isinstance(
get_layout(**settings), NetworkxCoordinates))
def test_networkx_layout_no_networkx():
settings = dict(name="networkx_layout")
testing.assert_raises(ValueError, get_layout, **settings)
| {
"content_hash": "7cac8c0af687abed034242ca76a922d8",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 94,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6608695652173913,
"repo_name": "Eric89GXL/vispy",
"id": "4369514bb3f0e92262f57f32052bb79bf2edc91c",
"size": "1539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/testing/tests/test_networkx_layout.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2461885"
}
],
"symlink_target": ""
} |
import sys
import hashlib
import time
import json
import os
import binascii
import datetime
import random
import requests # pip install requests
from requests import Request, Session
AUTH_KEY = os.environ['AUTH_KEY']
USERNAME = os.environ['USERNAME'] if 'USERNAME' in os.environ else 'joe'
PASSPHRASE = os.environ['PASSPHRASE'] if 'PASSPHRASE' in os.environ else 'clipperz'
#URLS = os.environ['URLS'] if 'URLS' in os.environ else ['https://clipperz.is', 'https://dev.clipperz.is', 'https://app2.cloud.clipperz.is']
URLS = os.environ['URLS'] if 'URLS' in os.environ else ['https://clipperz.is']
def md5(content):
hash = hashlib.md5()
hash.update(content)
result = bytearray(hash.digest())
return result
def sha256(content):
hash = hashlib.sha256()
hash.update(content)
result = bytearray(hash.digest())
return result
def shaD256(content):
return sha256(sha256(content))
def hash(content):
return shaD256(content)
def stringHash(value):
return binascii.hexlify(hash(value))
def dataToInt(data):
return int(binascii.hexlify(data), 16)
def intToHex(value):
return hex(value).rstrip("L").lstrip("0x")
def downloadApp(session, label, url):
sys.stdout.write('Downloading application version {}'.format(label))
request = Request('GET', url)
preparedRequest = session.prepare_request(request)
preparedRequest.headers['Accept'] = 'text/html'
preparedRequest.headers['Accept-Encoding'] = 'gzip,deflate,sdch'
# SNI will never be supported in Python 2 series: http://stackoverflow.com/questions/18578439/using-requests-with-tls-doesnt-give-sni-support#comment30104870_18579484
start = time.time()
response = session.send(preparedRequest, verify=False)
loadTime = time.time() - start
result = {
'url': url,
'status': response.status_code,
'etag': response.headers['etag'],
'lastModified': response.headers['last-modified'],
'timing': loadTime,
}
if response.status_code == 200:
# result['content'] = response.headers['content-encoding'],
result['size'] = len(response.content)
result['signature'] = binascii.hexlify(md5(response.content))
print(' -> signature: {} - size: {}'.format(result['signature'], str(result['size'])))
else:
print(" error: " + response.status_code)
return result
def payToll(toll):
def prefixMatchingBits(value, target):
result = 0
c = min(len(value), len(target))
i = 0
while (i < c) and (value[i] == target[i]):
result += 8
i += 1
if (i < c):
xorValue = value[i] ^ target[i]
if xorValue >= 64:
result += 1
elif xorValue >= 32:
result += 2
elif xorValue >= 16:
result += 3
elif xorValue >= 8:
result += 4
elif xorValue >= 4:
result += 5
elif xorValue >= 2:
result += 6
elif xorValue >= 1:
result += 7
return result
def increment(value):
i = len(value) - 1
done = False
while (i >= 0) and (done == False):
currentValue = value[i]
if currentValue == 0xff:
value[i] = 0x00
if i >= 0:
i -= 1
else:
done = True
else:
value[i] = currentValue + 1
done = True
return value
cost = toll['cost']
target = bytearray(toll['targetValue'].decode("hex"))
payment = bytearray(os.urandom(32))
while True:
if prefixMatchingBits(sha256(payment), target) > cost:
break
else:
payment = increment(payment)
result = binascii.hexlify(payment)
return result
def postPayload(session, url, payload):
start = time.time()
request = Request('POST', url, data=payload)
preparedRequest = session.prepare_request(request)
response = session.send(preparedRequest, verify=False)
timing = time.time() - start
result = response.json()
return timing, result
def knock(session, url):
payload = {
'method': 'knock',
'version': 'fake-app-version',
'parameters': json.dumps({
'requestType': 'CONNECT'
})
}
timing, result = postPayload(session, url, payload)
toll = result['toll']
return timing, toll
def handshake_connect(session, url, C, A, toll, payment):
payload = {
'method': 'handshake',
'version': 'fake-app-version',
'parameters': json.dumps({
"parameters": {
"message": "connect",
"version": "0.2",
"parameters": {
"C": C,
"A": A
}
},
"toll": {
"targetValue": toll['targetValue'],
"toll": payment
}
})
}
timing, result = postPayload(session, url, payload)
toll = result['toll']
challenge = result['result']
return timing, challenge, toll
def handshake_credentialCheck(session, url, M1, toll, payment):
payload = {
'method': 'handshake',
'version': 'fake-app-version',
'parameters': json.dumps({
"parameters": {
"message": "credentialCheck",
"version": "0.2",
"parameters": {
"M1": M1
}
},
"toll": {
"targetValue": toll['targetValue'],
"toll": payment
}
})
}
timing, result = postPayload(session, url, payload)
toll = result['toll']
info = result['result']
return timing, info, toll
def message_getUserDetails(session, url, sharedSecret, toll, payment):
payload = {
'method': 'message',
'version': 'fake-app-version',
'parameters': json.dumps({
"parameters": {
"message": "getUserDetails",
"srpSharedSecret": sharedSecret,
"parameters": {}
},
"toll": {
"targetValue": toll['targetValue'],
"toll": payment
}
})
}
timing, result = postPayload(session, url, payload)
toll = result['toll']
details = result['result']
return timing, details, toll
def doLogin(session, url, username, passphrase):
sys.stdout.write("Doing login ...")
try:
start = time.time()
g = 2
n = int('115b8b692e0e045692cf280b436735c77a5a9e8a9e7ed56c965f87db5b2a2ece3', 16)
k = int('64398bff522814e306a97cb9bfc4364b7eed16a8c17c5208a40a2bad2933c8e', 16)
knockTiming, toll = knock(session, url)
C = stringHash(username + passphrase)
p = stringHash(passphrase + username)
a = dataToInt(bytearray(os.urandom(32)))
A = pow(g, a, n)
connectTiming, challenge, toll = handshake_connect(session, url, C, intToHex(A), toll, payToll(toll))
B = int(challenge['B'], 16)
s = challenge['s']
u = dataToInt(hash(str(A) + str(B)))
x = dataToInt(hash(('0000000000000000000000000000000000000000000000000000000000000000' + s)[-64:] + p))
S = pow((B - k * pow(g, x, n)), (a + u * x), n)
K = stringHash(str(S))
M1 = stringHash(
"597626870978286801440197562148588907434001483655788865609375806439877501869636875571920406529" +
stringHash(C) +
str(int(s, 16)) +
str(A) +
str(B) +
K
)
credentialCheckTiming, info, toll = handshake_credentialCheck(session, url, M1, toll, payToll(toll))
sharedSecret = K
getUserDetailsTiming, details, toll = message_getUserDetails(session, url, sharedSecret, toll, payToll(toll))
result = {
'knock': knockTiming,
'connect': connectTiming,
'credentialCheck': credentialCheckTiming,
'getUserDetails': getUserDetailsTiming,
'total': time.time() - start
}
except Exception as exception:
result = {
'error': str(exception)
}
print(" done")
return result, C
#def collectCurrentLocationInfo():
# return {
# 'timestamp': datetime.datetime.utcnow().isoformat(),
# 'ip': requests.get('http://ifconfig.me/ip').text.rstrip().encode("ascii")
# }
def main (baseUrl, username, passphrase):
session = Session()
betaInfo = downloadApp(session, 'beta', baseUrl + '/beta')
gammaInfo = downloadApp(session, 'gamma', baseUrl + '/gamma')
deltaInfo = downloadApp(session, 'delta', baseUrl + '/delta')
connectInfo, C = doLogin(session, baseUrl + '/json', username, passphrase)
# currentLocationInfo = collectCurrentLocationInfo()
result = {
'info': {
'host': baseUrl,
'user': C
},
'beta': betaInfo,
'gamma': gammaInfo,
'delta': deltaInfo,
'timing': connectInfo
# 'info': currentLocationInfo
}
data = json.dumps(result)
print("Collected data:\n" + json.dumps(result, indent=4))
response = requests.post('http://collector.stats.clipperz.is/submit', data, auth=('x', AUTH_KEY))
# response = requests.post('http://localhost:8888/submit', data, auth=('x', AUTH_KEY))
if response.status_code != 200:
# raise Exception("failed to submit data")
print("Sorry. Failed to submit data: " + str(response.status_code))
else:
print("Data successfully submitted. Thanks!")
if __name__ == "__main__":
for url in URLS:
waitingTime = int(random.random() * 5 * 60)
print("....z.zz.. (waiting for " + str(waitingTime) + " seconds)")
time.sleep(waitingTime)
main(url, USERNAME, PASSPHRASE)
| {
"content_hash": "8e390cf472de9b9142e14a8a6e9b973f",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 167,
"avg_line_length": 24.737609329446062,
"alnum_prop": 0.6651738361814967,
"repo_name": "clipperz/stats-collector",
"id": "66622cb95bcc9063cc1688ff4d092cd5c0452f80",
"size": "8504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collectData.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12425"
}
],
"symlink_target": ""
} |
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from fuel.iterator import DataIterator
@add_metaclass(ABCMeta)
class AbstractDataStream(object):
"""A stream of data separated into epochs.
A data stream is an iterable stream of examples/minibatches. It shares
similarities with Python file handles return by the ``open`` method.
Data streams can be closed using the :meth:`close` method and reset
using :meth:`reset` (similar to ``f.seek(0)``).
Parameters
----------
iteration_scheme : :class:`.IterationScheme`, optional
The iteration scheme to use when retrieving data. Note that not all
datasets support the same iteration schemes, some datasets require
one, and others don't support any. In case when the data stream
wraps another data stream, the choice of supported iteration
schemes is typically even more limited. Be sure to read the
documentation of the dataset or data stream in question.
Attributes
----------
iteration_scheme : :class:`.IterationScheme`
The iteration scheme used to retrieve data. Can be ``None`` when
not used.
sources : tuple of strings
The names of the data sources returned by this data stream, as
given by the dataset.
"""
def __init__(self, iteration_scheme=None):
self.iteration_scheme = iteration_scheme
def get_data(self, request=None):
"""Request data from the dataset or the wrapped stream.
Parameters
----------
request : object
A request fetched from the `request_iterator`.
"""
pass
@abstractmethod
def reset(self):
"""Reset the data stream."""
pass
@abstractmethod
def close(self):
"""Gracefully close the data stream, e.g. releasing file handles."""
pass
@abstractmethod
def next_epoch(self):
"""Switch the data stream to the next epoch."""
pass
@abstractmethod
def get_epoch_iterator(self, as_dict=False):
return DataIterator(self, self.iteration_scheme.get_request_iterator()
if self.iteration_scheme else None,
as_dict=as_dict)
def iterate_epochs(self, as_dict=False):
"""Allow iteration through all epochs.
Notes
-----
This method uses the :meth:`get_epoch_iterator` method to retrieve
the :class:`DataIterator` for each epoch. The default
implementation of this method resets the state of the data stream
so that the new epoch can read the data from the beginning.
However, this behavior only works as long as the ``epochs``
property is iterated over using e.g. ``for epoch in
stream.epochs``. If you create the data iterators in advance (e.g.
using ``for i, epoch in zip(range(10), stream.epochs`` in Python 2)
you must call the :meth:`reset` method yourself.
"""
while True:
yield self.get_epoch_iterator(as_dict=as_dict)
class DataStream(AbstractDataStream):
"""A stream of data from a dataset.
Parameters
----------
dataset : instance of :class:`Dataset`
The dataset from which the data is fetched.
"""
def __init__(self, dataset, **kwargs):
super(DataStream, self).__init__(**kwargs)
self.dataset = dataset
self.data_state = self.dataset.open()
self._fresh_state = True
@property
def sources(self):
if hasattr(self, '_sources'):
return self._sources
return self.dataset.sources
@sources.setter
def sources(self, value):
self._sources = value
def close(self):
self.data_state = self.dataset.close(self.data_state)
def reset(self):
self.data_state = self.dataset.reset(self.data_state)
self._fresh_state = True
def next_epoch(self):
self.data_state = self.dataset.next_epoch(self.data_state)
def get_data(self, request=None):
"""Get data from the dataset."""
return self.dataset.get_data(self.data_state, request)
def get_epoch_iterator(self, **kwargs):
"""Get an epoch iterator for the data stream."""
if not self._fresh_state:
self.next_epoch()
else:
self._fresh_state = False
return super(DataStream, self).get_epoch_iterator(**kwargs)
| {
"content_hash": "1d4a28232c63d4f60574d9136963d5d4",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 32.56934306569343,
"alnum_prop": 0.628641864634693,
"repo_name": "mducoffe/fuel",
"id": "0286c20af0c21e9684ec5d931c9a0c4983b7abd9",
"size": "4462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel/streams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100247"
}
],
"symlink_target": ""
} |
from wagtail.contrib.modeladmin.options import (
ModelAdmin, modeladmin_register)
from unusualbusiness.howtos.models import HowToPage
class HowToPageModelAdmin(ModelAdmin):
model = HowToPage
menu_label = 'Knowledge pools' # ditch this to use verbose_name_plural from model
menu_icon = 'radio-empty howtos' # change as required
menu_order = 200 # will put in 3rd place (000 being 1st, 100 2nd)
list_display = ('title', )
list_filter = ('live', )
search_fields = ('title',)
# Now you just need to register your customised ModelAdmin class with Wagtail
modeladmin_register(HowToPageModelAdmin)
| {
"content_hash": "02d05203d6927906e413a6a5ca7a5066",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 85,
"avg_line_length": 39.125,
"alnum_prop": 0.7348242811501597,
"repo_name": "jeremy-c/unusualbusiness",
"id": "da9b56ddaa1bd3450286c64aa289b65e7d84a5f9",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unusualbusiness/howtos/wagtail_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "120772"
},
{
"name": "HTML",
"bytes": "136629"
},
{
"name": "JavaScript",
"bytes": "39256"
},
{
"name": "Nginx",
"bytes": "1127"
},
{
"name": "Python",
"bytes": "209333"
},
{
"name": "Shell",
"bytes": "3340"
}
],
"symlink_target": ""
} |
from mock import Mock
from allura.lib import helpers as h
from tg import tmpl_context as c
from forgetracker.tracker_main import MilestoneController
def test_unicode_lookup():
# can't use name= in constructor, that's special attribute for Mock
milestone = Mock()
milestone.name = 'Перспектива'
milestone_field = Mock(milestones=[milestone])
milestone_field.name = '_milestone'
app = Mock(globals=Mock(milestone_fields=[milestone_field]))
with h.push_config(c, app=app):
root = None
field = 'milestone'
# u'Перспектива'
milestone_urlparam = '%D0%9F%D0%B5%D1%80%D1%81%D0%BF%D0%B5%D0%BA%D1%82%D0%B8%D0%B2%D0%B0'
mc = MilestoneController(root, field, milestone_urlparam)
assert mc.milestone # check that it is found
assert mc.milestone.name == 'Перспектива'
| {
"content_hash": "18099ffa417ae20f589cba1fa7ed8cfe",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 97,
"avg_line_length": 33.48,
"alnum_prop": 0.6881720430107527,
"repo_name": "apache/allura",
"id": "9634eb62d004e4b9064047be92c5ab590def40e9",
"size": "1741",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ForgeTracker/forgetracker/tests/unit/test_milestone_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "181457"
},
{
"name": "Dockerfile",
"bytes": "4748"
},
{
"name": "HTML",
"bytes": "867332"
},
{
"name": "JavaScript",
"bytes": "1191836"
},
{
"name": "Makefile",
"bytes": "6248"
},
{
"name": "Python",
"bytes": "4499987"
},
{
"name": "RAML",
"bytes": "27600"
},
{
"name": "Roff",
"bytes": "41"
},
{
"name": "Ruby",
"bytes": "1280"
},
{
"name": "SCSS",
"bytes": "27742"
},
{
"name": "Shell",
"bytes": "131207"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from mezzanine.pages.page_processors import processor_for
from hs_core.models import BaseResource, ResourceManager, resource_processor, \
CoreMetaData, AbstractMetaDataElement
from .utils import get_SupportedResTypes_choices
class ToolResource(BaseResource):
objects = ResourceManager('ToolResource')
class Meta:
proxy = True
verbose_name = 'Web App Resource'
@classmethod
def get_supported_upload_file_types(cls):
# no file types are supported
return ()
@classmethod
def allow_multiple_file_upload(cls):
# no file can be uploaded
return False
@classmethod
def can_have_multiple_files(cls):
# resource can't have any files
return False
@property
def metadata(self):
md = ToolMetaData()
return self._get_metadata(md)
@property
def can_be_published(self):
return False
processor_for(ToolResource)(resource_processor)
class AppHomePageUrl(AbstractMetaDataElement):
term = 'AppHomePageUrl'
value = models.CharField(max_length=1024, blank=True, default="")
class Meta:
# AppHomePageUrl element is not repeatable
unique_together = ("content_type", "object_id")
class RequestUrlBase(AbstractMetaDataElement):
term = 'RequestUrlBase'
value = models.CharField(max_length=1024, blank=True, default="")
class Meta:
# RequestUrlBase element is not repeatable
unique_together = ("content_type", "object_id")
class ToolVersion(AbstractMetaDataElement):
term = 'AppVersion'
value = models.CharField(max_length=128, blank=True)
class Meta:
# ToolVersion element is not repeatable
unique_together = ("content_type", "object_id")
class SupportedResTypeChoices(models.Model):
description = models.CharField(max_length=128)
def __unicode__(self):
return self.description
class SupportedResTypes(AbstractMetaDataElement):
term = 'SupportedResTypes'
supported_res_types = models.ManyToManyField(SupportedResTypeChoices, blank=True)
def get_supported_res_types_str(self):
return ','.join([parameter.description for parameter in self.supported_res_types.all()])
@classmethod
def _add_supported_res_type(cls, meta_instance, supported_res_types):
for res_type in supported_res_types:
# there are two possibilities for res_type_str values:
# list of string (during normal create or update) or
# integer (during creating new version of the resource)
if isinstance(res_type, int):
# "copy res" or "create a new version"
qs = SupportedResTypeChoices.objects.filter(id=res_type)
if not qs.exists():
raise
meta_instance.supported_res_types.add(qs[0])
elif isinstance(res_type, basestring):
# create or update res
qs = SupportedResTypeChoices.objects.filter(description__iexact=res_type)
if qs.exists():
meta_instance.supported_res_types.add(qs[0])
else:
meta_instance.supported_res_types.create(description=res_type)
else:
raise ValidationError("No supported_res_types parameter "
"was found in the **kwargs list")
@classmethod
def _validate_supported_res_types(cls, supported_res_types):
for res_type in supported_res_types:
if isinstance(res_type, basestring) \
and res_type not in [res_type_choice[0]
for res_type_choice in get_SupportedResTypes_choices()]:
raise ValidationError('Invalid supported_res_types:%s' % res_type)
@classmethod
def create(cls, **kwargs):
if 'supported_res_types' in kwargs:
cls._validate_supported_res_types(kwargs['supported_res_types'])
metadata_obj = kwargs['content_object']
new_meta_instance = SupportedResTypes.objects.create(content_object=metadata_obj)
cls._add_supported_res_type(new_meta_instance, kwargs['supported_res_types'])
return new_meta_instance
else:
raise ValidationError("No supported_res_types parameter was found in the **kwargs list")
@classmethod
def update(cls, element_id, **kwargs):
meta_instance = SupportedResTypes.objects.get(id=element_id)
if 'supported_res_types' in kwargs:
cls._validate_supported_res_types(kwargs['supported_res_types'])
meta_instance.supported_res_types.clear()
cls._add_supported_res_type(meta_instance, kwargs['supported_res_types'])
meta_instance.save()
else:
raise ValidationError("No supported_res_types parameter was found in the **kwargs list")
@classmethod
def remove(cls, element_id):
raise ValidationError("SupportedResTypes element can't be deleted.")
class SupportedSharingStatusChoices(models.Model):
description = models.CharField(max_length=128)
def __unicode__(self):
return self.description
class SupportedSharingStatus(AbstractMetaDataElement):
term = 'SupportedSharingStatus'
sharing_status = models.ManyToManyField(SupportedSharingStatusChoices, blank=True)
def get_sharing_status_str(self):
return ', '.join([parameter.description for parameter in self.sharing_status.all()])
@classmethod
def _add_sharing_status(cls, meta_instance, sharing_status_list):
for sharing_status in sharing_status_list:
# there are two possibilities for res_type_str values:
# list of string (during normal create or update) or
# integer (during creating new version of the resource)
if isinstance(sharing_status, int):
# "copy res" or "create a new version"
qs = SupportedSharingStatusChoices.objects.filter(id=sharing_status)
if not qs.exists():
raise
meta_instance.sharing_status.add(qs[0])
elif isinstance(sharing_status, basestring):
# create or update res
qs = SupportedSharingStatusChoices.objects.\
filter(description__iexact=sharing_status)
if qs.exists():
meta_instance.sharing_status.add(qs[0])
else:
meta_instance.sharing_status.create(description=sharing_status)
else:
raise ValidationError("No sharing_status parameter "
"was found in the **kwargs list")
@classmethod
def _validate_sharing_status(cls, sharing_status_list):
for sharing_status in sharing_status_list:
if isinstance(sharing_status, basestring) and \
sharing_status not in \
["Published", "Public", "Discoverable", "Private"]:
raise ValidationError('Invalid sharing_status:%s' % sharing_status)
@classmethod
def create(cls, **kwargs):
if 'sharing_status' in kwargs:
cls._validate_sharing_status(kwargs["sharing_status"])
metadata_obj = kwargs['content_object']
new_meta_instance = SupportedSharingStatus.objects.create(content_object=metadata_obj)
cls._add_sharing_status(new_meta_instance, kwargs['sharing_status'])
return new_meta_instance
else:
raise ValidationError("No sharing_status parameter was found in the **kwargs list")
@classmethod
def update(cls, element_id, **kwargs):
meta_instance = SupportedSharingStatus.objects.get(id=element_id)
if 'sharing_status' in kwargs:
cls._validate_sharing_status(kwargs["sharing_status"])
meta_instance.sharing_status.clear()
cls._add_sharing_status(meta_instance, kwargs['sharing_status'])
meta_instance.save()
else:
raise ValidationError("No sharing_status parameter "
"was found in the **kwargs list")
@classmethod
def remove(cls, element_id):
raise ValidationError("SupportedSharingStatus element can't be deleted.")
class ToolIcon(AbstractMetaDataElement):
term = 'ToolIcon'
value = models.CharField(max_length=1024, blank=True, default="")
class Meta:
# ToolVersion element is not repeatable
unique_together = ("content_type", "object_id")
class ToolMetaData(CoreMetaData):
url_bases = GenericRelation(RequestUrlBase)
versions = GenericRelation(ToolVersion)
supported_res_types = GenericRelation(SupportedResTypes)
tool_icon = GenericRelation(ToolIcon)
supported_sharing_status = GenericRelation(SupportedSharingStatus)
homepage_url = GenericRelation(AppHomePageUrl)
@property
def resource(self):
return ToolResource.objects.filter(object_id=self.id).first()
@classmethod
def get_supported_element_names(cls):
elements = super(ToolMetaData, cls).get_supported_element_names()
elements.append('RequestUrlBase')
elements.append('ToolVersion')
elements.append('SupportedResTypes')
elements.append('ToolIcon')
elements.append('SupportedSharingStatus')
elements.append('AppHomePageUrl')
return elements
def has_all_required_elements(self):
if self.get_required_missing_elements():
return False
return True
def get_required_missing_elements(self): # show missing required meta
missing_required_elements = super(ToolMetaData, self).get_required_missing_elements()
# At least one of the two metadata must exist: Home Page URL or App-launching URL Pattern
if (not self.url_bases.all().first() or not self.url_bases.all().first().value) \
and (not self.homepage_url.all().first() or not self.homepage_url.all().first().value):
missing_required_elements.append('App Home Page URL or App-launching URL Pattern')
else:
# If one between App-launching URL Pattern and Supported Res Type presents,
# the other must present as well
if self.url_bases.all().first() and self.url_bases.all().first().value:
if not self.supported_res_types.all().first() \
or not self.supported_res_types.all().first().supported_res_types.count() > 0:
missing_required_elements.append('Supported Resource Types')
if self.supported_res_types.all().first() \
and self.supported_res_types.all().first().supported_res_types.count() > 0:
if not self.url_bases.all().first() or not self.url_bases.all().first().value:
missing_required_elements.append('App-launching URL Pattern')
# if Supported Res Type presents, Supported Sharing Status must present, not vice versa
if self.supported_res_types.all().first() \
and self.supported_res_types.all().first().supported_res_types.count() > 0:
if not self.supported_sharing_status.all().first() \
or not self.supported_sharing_status.all().first().sharing_status.count() > 0:
missing_required_elements.append('Supported Sharing Status')
return missing_required_elements
def delete_all_elements(self):
super(ToolMetaData, self).delete_all_elements()
self.url_bases.all().delete()
self.versions.all().delete()
self.supported_res_types.all().delete()
self.tool_icon.all().delete()
self.supported_sharing_status.all().delete()
self.homepage_url.all().delete()
| {
"content_hash": "4b5e16cc77e9ada9282ba43131744aad",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 100,
"avg_line_length": 39.67656765676568,
"alnum_prop": 0.6397438030277824,
"repo_name": "FescueFungiShare/hydroshare",
"id": "1cdf3870c172f321245c8cb21e061bee0f80ec52",
"size": "12022",
"binary": false,
"copies": "1",
"ref": "refs/heads/FescueFungiShare-develop",
"path": "hs_tools_resource/models.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "374952"
},
{
"name": "HTML",
"bytes": "1107800"
},
{
"name": "JavaScript",
"bytes": "1822132"
},
{
"name": "Python",
"bytes": "3599347"
},
{
"name": "R",
"bytes": "4475"
},
{
"name": "Shell",
"bytes": "49970"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
} |
import os
import base64
import urlparse
import urllib2
import BaseHTTPServer
import unittest
import hashlib
from test import test_support
mimetools = test_support.import_module('mimetools', deprecated=True)
threading = test_support.import_module('threading')
try:
import ssl
except ImportError:
ssl = None
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Loopback http server infrastructure
class LoopbackHttpServer(BaseHTTPServer.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
BaseHTTPServer.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""BaseHTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(('127.0.0.1', 0),
request_handler)
#print "Serving HTTP on %s port %s" % (self.httpd.server_name,
# self.httpd.server_port)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
# Set the stop flag.
self._stop = True
self.join()
def run(self):
self.ready.set()
while not self._stop:
self.httpd.handle_request()
# Authentication infrastructure
class BasicAuthHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler for performing Basic Authentication."""
# Server side values
USER = "testUser"
PASSWD = "testPass"
REALM = "Test"
USER_PASSWD = "%s:%s" % (USER, PASSWD)
ENCODED_AUTH = base64.b64encode(USER_PASSWD)
def __init__(self, *args, **kwargs):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Suppress the HTTP Console log output
pass
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", "Basic realm=\"%s\"" % self.REALM)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
if self.headers.getheader("Authorization") == None:
self.do_AUTHHEAD()
self.wfile.write("No Auth Header Received")
elif self.headers.getheader(
"Authorization") == "Basic " + self.ENCODED_AUTH:
self.wfile.write("It works!")
else:
# Unauthorized Request
self.do_AUTHHEAD()
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num)).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write("Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if 'Proxy-Authorization' not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers['Proxy-Authorization']
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib2 uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
# Proxy test infrastructure
class FakeProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
#sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urlparse.urlparse(
self.path, 'http')
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write("You've reached %s!<BR>" % self.path)
self.wfile.write("Our apologies, but our server is down due to "
"a sudden zombie invasion.")
# Test cases
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
def tearDown(self):
self.doCleanups()
test_support.threading_cleanup(*self._threads)
class BasicAuthTests(BaseTestCase):
USER = "testUser"
PASSWD = "testPass"
INCORRECT_PASSWD = "Incorrect"
REALM = "Test"
def setUp(self):
super(BasicAuthTests, self).setUp()
# With Basic Authentication
def http_server_with_basic_auth_handler(*args, **kwargs):
return BasicAuthHandler(*args, **kwargs)
self.server = LoopbackHttpServerThread(http_server_with_basic_auth_handler)
self.server_url = 'http://127.0.0.1:%s' % self.server.port
self.server.start()
self.server.ready.wait()
self.addCleanup(self.server.stop)
def test_basic_auth_success(self):
ah = urllib2.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.PASSWD)
urllib2.install_opener(urllib2.build_opener(ah))
try:
self.assertTrue(urllib2.urlopen(self.server_url))
except urllib2.HTTPError:
self.fail("Basic Auth Failed for url: %s" % self.server_url)
except Exception as e:
raise e
def test_basic_auth_httperror(self):
ah = urllib2.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER,
self.INCORRECT_PASSWD)
urllib2.install_opener(urllib2.build_opener(ah))
self.assertRaises(urllib2.HTTPError, urllib2.urlopen, self.server_url)
class ProxyAuthTests(BaseTestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
# Ignore proxy bypass settings in the environment.
def restore_environ(old_environ):
os.environ.clear()
os.environ.update(old_environ)
self.addCleanup(restore_environ, os.environ.copy())
os.environ['NO_PROXY'] = ''
os.environ['no_proxy'] = ''
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
# With Digest Authentication
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
self.addCleanup(self.server.stop)
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib2.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib2.ProxyDigestAuthHandler()
self.opener = urllib2.build_opener(handler, self.proxy_digest_handler)
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib2.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib2.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib2.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
if body:
self.wfile.write(body)
def do_POST(self):
content_length = self.headers['Content-Length']
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % self.port)
if body:
self.send_header('Content-type', 'text/plain')
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
class TestUrlopen(BaseTestCase):
"""Tests urllib2.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
urllib2.install_opener(opener)
super(TestUrlopen, self).setUp()
def urlopen(self, url, data=None, **kwargs):
l = []
f = urllib2.urlopen(url, data, **kwargs)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def start_server(self, responses):
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
self.addCleanup(self.server.stop)
port = self.server.port
handler.port = port
return handler
def start_https_server(self, responses=None, **kwargs):
if not hasattr(urllib2, 'HTTPSHandler'):
self.skipTest('ssl support required')
from test.ssl_servers import make_https_server
if responses is None:
responses = [(200, [], b"we care a bit")]
handler = GetRequestHandler(responses)
server = make_https_server(self, handler_class=handler, **kwargs)
handler.port = server.port
return handler
def test_redirection(self):
expected_response = 'We got here...'
responses = [
(302, [('Location', 'http://localhost:%s/somewhere_else')], ''),
(200, [], expected_response)
]
handler = self.start_server(responses)
f = urllib2.urlopen('http://localhost:%s/' % handler.port)
data = f.read()
f.close()
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ['/', '/somewhere_else'])
def test_404(self):
expected_response = 'Bad bad bad...'
handler = self.start_server([(404, [], expected_response)])
try:
urllib2.urlopen('http://localhost:%s/weeble' % handler.port)
except urllib2.URLError, f:
pass
else:
self.fail('404 should raise URLError')
data = f.read()
f.close()
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ['/weeble'])
def test_200(self):
expected_response = 'pycon 2008...'
handler = self.start_server([(200, [], expected_response)])
f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port)
data = f.read()
f.close()
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ['/bizarre'])
def test_200_with_parameters(self):
expected_response = 'pycon 2008...'
handler = self.start_server([(200, [], expected_response)])
f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port, 'get=with_feeling')
data = f.read()
f.close()
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ['/bizarre', 'get=with_feeling'])
def test_https(self):
handler = self.start_https_server()
context = ssl.create_default_context(cafile=CERT_localhost)
data = self.urlopen("https://localhost:%s/bizarre" % handler.port, context=context)
self.assertEqual(data, b"we care a bit")
def test_https_with_cafile(self):
handler = self.start_https_server(certfile=CERT_localhost)
# Good cert
data = self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_localhost)
self.assertEqual(data, b"we care a bit")
# Bad cert
with self.assertRaises(urllib2.URLError):
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
# Good cert, but mismatching hostname
handler = self.start_https_server(certfile=CERT_fakehostname)
with self.assertRaises(ssl.CertificateError):
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
def test_https_with_cadefault(self):
handler = self.start_https_server(certfile=CERT_localhost)
# Self-signed cert should fail verification with system certificate store
with self.assertRaises(urllib2.URLError):
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cadefault=True)
def test_https_sni(self):
if ssl is None:
self.skipTest("ssl module required")
if not ssl.HAS_SNI:
self.skipTest("SNI support required in OpenSSL")
sni_name = [None]
def cb_sni(ssl_sock, server_name, initial_context):
sni_name[0] = server_name
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.set_servername_callback(cb_sni)
handler = self.start_https_server(context=context, certfile=CERT_localhost)
context = ssl.create_default_context(cafile=CERT_localhost)
self.urlopen("https://localhost:%s" % handler.port, context=context)
self.assertEqual(sni_name[0], "localhost")
def test_sending_headers(self):
handler = self.start_server([(200, [], "we don't care")])
req = urllib2.Request("http://localhost:%s/" % handler.port,
headers={'Range': 'bytes=20-39'})
urllib2.urlopen(req)
self.assertEqual(handler.headers_received['Range'], 'bytes=20-39')
def test_basic(self):
handler = self.start_server([(200, [], "we don't care")])
open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_info(self):
handler = self.start_server([(200, [], "we don't care")])
open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assertIsInstance(info_obj, mimetools.Message,
"object returned by 'info' is not an "
"instance of mimetools.Message")
self.assertEqual(info_obj.getsubtype(), "plain")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server([(200, [], "we don't care")])
open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
# as indicated by the comment below, this might fail with some ISP,
# so we run the test only when -unetwork/-uall is specified to
# mitigate the problem a bit (see #17564)
test_support.requires('network')
self.assertRaises(IOError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
# domain name requests in an attempt to boost traffic
# to their own sites, finding a domain name to use
# for this test is difficult. RFC2606 leads one to
# believe that '.invalid' should work, but experience
# seemed to indicate otherwise. Single character
# TLDs are likely to remain invalid, so this seems to
# be the best choice. The trailing '.' prevents a
# related problem: The normal DNS resolver appends
# the domain names from the search path if there is
# no '.' the end and, and if one of those domains
# implements a '*' rule a result is returned.
# However, none of this will prevent the test from
# failing if the ISP hijacks all invalid domain
# requests. The real solution would be to be able to
# parameterize the framework with a mock resolver.
urllib2.urlopen, "http://sadflkjsasf.i.nvali.d./")
def test_iteration(self):
expected_response = "pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = urllib2.urlopen("http://localhost:%s" % handler.port)
for line in data:
self.assertEqual(line, expected_response)
def ztest_line_iteration(self):
lines = ["We\n", "got\n", "here\n", "verylong " * 8192 + "\n"]
expected_response = "".join(lines)
handler = self.start_server([(200, [], expected_response)])
data = urllib2.urlopen("http://localhost:%s" % handler.port)
for index, line in enumerate(data):
self.assertEqual(line, lines[index],
"Fetched line number %s doesn't match expected:\n"
" Expected length was %s, got %s" %
(index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
def test_main():
# We will NOT depend on the network resource flag
# (Lib/test/regrtest.py -u network) since all tests here are only
# localhost. However, if this is a bad rationale, then uncomment
# the next line.
#test_support.requires("network")
test_support.run_unittest(BasicAuthTests, ProxyAuthTests, TestUrlopen)
if __name__ == "__main__":
test_main()
| {
"content_hash": "9fccdfa72d300e52e26ce996cf6f6bb5",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 93,
"avg_line_length": 37.244152046783626,
"alnum_prop": 0.5867713444553484,
"repo_name": "IronLanguages/ironpython2",
"id": "932b57223a5652c373d896a4ccba09ae833aa1a2",
"size": "25475",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/test/test_urllib2_localnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4080"
},
{
"name": "C",
"bytes": "20290"
},
{
"name": "C#",
"bytes": "12424667"
},
{
"name": "C++",
"bytes": "69156"
},
{
"name": "Classic ASP",
"bytes": "2117"
},
{
"name": "HTML",
"bytes": "13181412"
},
{
"name": "JavaScript",
"bytes": "1656"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "67035"
},
{
"name": "Python",
"bytes": "27860071"
},
{
"name": "Roff",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "193"
},
{
"name": "Smalltalk",
"bytes": "3"
},
{
"name": "VBScript",
"bytes": "974"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os.path
from git_code_debt.server.app import create_metric_config_if_not_exists
from git_code_debt.server.app import main
from testing.utilities.cwd import cwd
def test_file_does_not_exist():
assert main(argv=['i_dont_exist.db']) == 1
def test_create_metric_config_if_not_exists_existing(tmpdir):
with cwd(tmpdir.strpath):
with io.open(
'metric_config.yaml', 'w',
) as metric_config_file: # pragma: no cover (PY26 derps on `with`)
metric_config_file.write('Groups: []\nColorOverrides: []\n')
create_metric_config_if_not_exists()
after_contents = io.open('metric_config.yaml').read()
assert after_contents == 'Groups: []\nColorOverrides: []\n'
def test_create_metric_config_if_not_exists_not_existing(tmpdir):
with cwd(tmpdir.strpath):
create_metric_config_if_not_exists()
assert os.path.exists('metric_config.yaml')
| {
"content_hash": "db03af8c2e6313dd0b89f2d8c5376c1f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 30.78787878787879,
"alnum_prop": 0.6761811023622047,
"repo_name": "ucarion/git-code-debt",
"id": "c3844008917b2252808f8276cbf8b55bc46d7af2",
"size": "1016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/server/app_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2514"
},
{
"name": "HTML",
"bytes": "1617"
},
{
"name": "JavaScript",
"bytes": "5161"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Mako",
"bytes": "5510"
},
{
"name": "Python",
"bytes": "135268"
}
],
"symlink_target": ""
} |
from setuptools import setup
from beewarn import VERSION
setup(name='beewarn',
version=VERSION,
description='Utility for warning about bees',
author='Alistair Lynn',
author_email='[email protected]',
license='MIT',
url='https://github.com/prophile/beewarn',
zip_safe=True,
setup_requires=['nose >=1.0, <2.0'],
entry_points = {
'console_scripts': [
'beewarn=beewarn.cli:run_cli'
]
},
packages=['beewarn'],
test_suite='nose.collector')
| {
"content_hash": "bdb9771f80f2928b40c204036fa49890",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 51,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.5874769797421732,
"repo_name": "prophile/beewarn",
"id": "f6b3be7c9a1548e3e0e943e73713d158323b6aa4",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "762"
}
],
"symlink_target": ""
} |
"""
Default experiment for clean smp
Should be self-contained requiring now external packages or processes.
Robot: Point mass
World: open linear (1D) space
Task: go to a goal position and stay there
Brain: taxis
Loss: mean squared error / goal distance
"""
import time
from smq.utils import make_column_names_numbered, make_expr_id, make_robot_name
from smq.worlds import RobotWorld
from smq.robots import SimpleRandomRobot, PointmassRobot
from smq.plot import PlotTimeseries
from smq.tasks import NullTask, SetpointTask, GoalTask, GoalTaskTaxis
from smq.brains import NullBrain, TaxisBrain
# local variables for re-use
numsteps = 1000
motors = 1
name = "taxis_1d"
expr_id = make_expr_id(name)
# using dict convention seemed to be the best over yaml and friends
conf = {
# first level corresponds to experiment
"numsteps": numsteps,
"id": "%s-%s" % (name, time.strftime("%Y%m%d-%H%M%S")),
# these are arrays of dicts specifying components
"robots": [
{
"class": PointmassRobot, # SimpleRandomRobot,
"type": "explauto",
"name": make_robot_name(expr_id, "pm", 0),
# dimensions of different subparts of sm vector
# make that more compact / automatically inferred
# actually: make that lists of names whose length is the dim
"dim_s_proprio": make_column_names_numbered("acc", motors),
"dim_s_extero": make_column_names_numbered("vel", motors),
"dim_s_intero": make_column_names_numbered("vel_", motors) + make_column_names_numbered("pos_", motors) + make_column_names_numbered("vel_goal", motors) + make_column_names_numbered("vel_error", motors),
"dim_s_reward": make_column_names_numbered("dist_goal", 1),
"dim_s_pred": make_column_names_numbered("acc_pred", motors),
"dim_s_motor": make_column_names_numbered("m", motors),
# above is not enough, need either full dict / access variable by column name
# OR dict mapping variable name to numerical index (HACK?)
"numsteps": numsteps,
"control": "force",
"ros": False,
"brains": [
{
"class": TaxisBrain,
"name": "taxisbrain",
"dim_s_motor": motors,
"variant": "binary_threshold", # "continuous_linear"
"continuous_gain": 1.5,
"binary_threshold": 0.2,
"binary_high_range": 1.0,
"binary_low_range": 0.01,
# tasks be of length either one or same as len(robots)
"tasks": [
{
"class": GoalTaskTaxis,
"name": "goaltasktaxis",
"goalspace": "s_extero",
"goaldim": motors,
"goalinteroindex": 2,
"loss": "mse",
}
],
},
],
}
],
"worlds": [
{"class": RobotWorld,
"dt": 0.1,
"map": "open_planar_isotropic",
"dim": 0,
"numsteps": numsteps,
}
],
"loss": "mse",
"analyses": [
{
"class": PlotTimeseries,
"name": "plottimeseries",
"type": "seaborn" # "pyplot"
},
],
}
| {
"content_hash": "d857769d7fc6f91de56ba1eebd827f2c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 215,
"avg_line_length": 37.287234042553195,
"alnum_prop": 0.5289586305278174,
"repo_name": "x75/smq",
"id": "6fbe532ead88afe961a46ae4a56fe55f36949f49",
"size": "3505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/conf/taxis_1d_pm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "168491"
}
],
"symlink_target": ""
} |
""" losses for training neural networks """
from __future__ import absolute_import
__all__ = ['Loss', 'L2Loss', 'L1Loss',
'SigmoidBinaryCrossEntropyLoss', 'SigmoidBCELoss',
'SoftmaxCrossEntropyLoss', 'SoftmaxCELoss',
'KLDivLoss', 'CTCLoss', 'HuberLoss', 'HingeLoss',
'SquaredHingeLoss', 'LogisticLoss', 'TripletLoss', 'PoissonNLLLoss', 'CosineEmbeddingLoss']
import numpy as np
from .. import ndarray
from ..base import numeric_types
from .block import HybridBlock
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
class Loss(HybridBlock):
"""Base class for loss.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight, batch_axis, **kwargs):
super(Loss, self).__init__(**kwargs)
self._weight = weight
self._batch_axis = batch_axis
def __repr__(self):
s = '{name}(batch_axis={_batch_axis}, w={_weight})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class L2Loss(Loss):
r"""Calculates the mean squared error between `pred` and `label`.
.. math:: L = \frac{1}{2} \sum_i \vert {pred}_i - {label}_i \vert^2.
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(L2Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.square(pred - label)
loss = _apply_weighting(F, loss, self._weight/2, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class L1Loss(Loss):
r"""Calculates the mean absolute error between `pred` and `label`.
.. math:: L = \sum_i \vert {pred}_i - {label}_i \vert.
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=None, batch_axis=0, **kwargs):
super(L1Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.abs(pred - label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class SigmoidBinaryCrossEntropyLoss(Loss):
r"""The cross-entropy loss for binary classification. (alias: SigmoidBCELoss)
BCE loss is useful when training logistic regression. If `from_sigmoid`
is False (default), this loss computes:
.. math::
prob = \frac{1}{1 + \exp(-{pred})}
L = - \sum_i {label}_i * \log({prob}_i) +
(1 - {label}_i) * \log(1 - {prob}_i)
If `from_sigmoid` is True, this loss computes:
.. math::
L = - \sum_i {label}_i * \log({pred}_i) +
(1 - {label}_i) * \log(1 - {pred}_i)
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and BCE together, which is more numerically
stable through log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with values in range `[0, 1]`. Must have the
same size as `pred`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
super(SigmoidBinaryCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_sigmoid = from_sigmoid
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
if not self._from_sigmoid:
# We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
loss = F.relu(pred) - pred * label + F.Activation(-F.abs(pred), act_type='softrelu')
else:
loss = -(F.log(pred+1e-12)*label + F.log(1.-pred+1e-12)*(1.-label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
class SoftmaxCrossEntropyLoss(Loss):
r"""Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)
If `sparse_label` is `True` (default), label should contain integer
category indicators:
.. math::
\DeclareMathOperator{softmax}{softmax}
p = \softmax({pred})
L = -\sum_i \log p_{i,{label}_i}
`label`'s shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape should
be (1,2,4).
If `sparse_label` is `False`, `label` should contain probability distribution
and `label`'s shape should be the same with `pred`:
.. math::
p = \softmax({pred})
L = -\sum_i \sum_j {label}_j \log p_{ij}
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: the prediction tensor, where the `batch_axis` dimension
ranges over batch size and `axis` dimension ranges over the number
of classes.
- **label**: the truth tensor. When `sparse_label` is True, `label`'s
shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape
should be (1,2,4) and values should be integers between 0 and 2. If
`sparse_label` is False, `label`'s shape must be the same as `pred`
and values should be floats in the range `[0, 1]`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as label. For example, if label has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
batch_axis=0, **kwargs):
super(SoftmaxCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._axis = axis
self._sparse_label = sparse_label
self._from_logits = from_logits
def hybrid_forward(self, F, pred, label, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(pred*label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SoftmaxCELoss = SoftmaxCrossEntropyLoss
class KLDivLoss(Loss):
r"""The Kullback-Leibler divergence loss.
KL divergence measures the distance between contiguous distributions. It
can be used to minimize information loss when approximating a distribution.
If `from_logits` is True (default), loss is defined as:
.. math::
L = \sum_i {label}_i * \big[\log({label}_i) - {pred}_i\big]
If `from_logits` is False, loss is defined as:
.. math::
\DeclareMathOperator{softmax}{softmax}
prob = \softmax({pred})
L = \sum_i {label}_i * \big[\log({label}_i) - log({pred}_i)\big]
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
from_logits : bool, default is `True`
Whether the input is log probability (usually from log_softmax) instead
of unnormalized numbers.
axis : int, default -1
The dimension along with to compute softmax. Only used when `from_logits`
is False.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape. If `from_logits` is
True, `pred` should be log probabilities. Otherwise, it should be
unnormalized predictions, i.e. from a dense layer.
- **label**: truth tensor with values in range `(0, 1)`. Must have
the same size as `pred`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
References
----------
`Kullback-Leibler divergence
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_
"""
def __init__(self, from_logits=True, axis=-1, weight=None, batch_axis=0,
**kwargs):
super(KLDivLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
self._axis = axis
def hybrid_forward(self, F, pred, label, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
loss = label * (F.log(label+1e-12) - pred)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CTCLoss(Loss):
r"""Connectionist Temporal Classification Loss.
Parameters
----------
layout : str, default 'NTC'
Layout of prediction tensor. 'N', 'T', 'C' stands for batch size,
sequence length, and alphabet_size respectively.
label_layout : str, default 'NT'
Layout of the labels. 'N', 'T' stands for batch size, and sequence
length respectively.
weight : float or None
Global scalar weight for loss.
Inputs:
- **pred**: unnormalized prediction tensor (before softmax).
Its shape depends on `layout`. If `layout` is 'TNC', pred
should have shape `(sequence_length, batch_size, alphabet_size)`.
Note that in the last dimension, index `alphabet_size-1` is reserved
for internal use as blank label. So `alphabet_size` is one plus the
actual alphabet size.
- **label**: zero-based label tensor. Its shape depends on `label_layout`.
If `label_layout` is 'TN', `label` should have shape
`(label_sequence_length, batch_size)`.
- **pred_lengths**: optional (default None), used for specifying the
length of each entry when different `pred` entries in the same batch
have different lengths. `pred_lengths` should have shape `(batch_size,)`.
- **label_lengths**: optional (default None), used for specifying the
length of each entry when different `label` entries in the same batch
have different lengths. `label_lengths` should have shape `(batch_size,)`.
Outputs:
- **loss**: output loss has shape `(batch_size,)`.
**Example**: suppose the vocabulary is `[a, b, c]`, and in one batch we
have three sequences 'ba', 'cbb', and 'abac'. We can index the labels as
`{'a': 0, 'b': 1, 'c': 2, blank: 3}`. Then `alphabet_size` should be 4,
where label 3 is reserved for internal use by `CTCLoss`. We then need to
pad each sequence with `-1` to make a rectangular `label` tensor::
[[1, 0, -1, -1],
[2, 1, 1, -1],
[0, 1, 0, 2]]
References
----------
`Connectionist Temporal Classification: Labelling Unsegmented
Sequence Data with Recurrent Neural Networks
<http://www.cs.toronto.edu/~graves/icml_2006.pdf>`_
"""
def __init__(self, layout='NTC', label_layout='NT', weight=None, **kwargs):
assert layout in ['NTC', 'TNC'],\
"Only 'NTC' and 'TNC' layouts for pred are supported. Got: %s"%layout
assert label_layout in ['NT', 'TN'],\
"Only 'NT' and 'TN' layouts for label are supported. Got: %s"%label_layout
self._layout = layout
self._label_layout = label_layout
batch_axis = label_layout.find('N')
super(CTCLoss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label,
pred_lengths=None, label_lengths=None, sample_weight=None):
if self._layout == 'NTC':
pred = F.swapaxes(pred, 0, 1)
if self._batch_axis == 1:
label = F.swapaxes(label, 0, 1)
loss = F.CTCLoss(pred, label, pred_lengths, label_lengths,
use_data_lengths=pred_lengths is not None,
use_label_lengths=label_lengths is not None,
blank_label='last')
return _apply_weighting(F, loss, self._weight, sample_weight)
class HuberLoss(Loss):
r"""Calculates smoothed L1 loss that is equal to L1 loss if absolute error
exceeds rho but is equal to L2 loss otherwise. Also called SmoothedL1 loss.
.. math::
L = \sum_i \begin{cases} \frac{1}{2 {rho}} ({pred}_i - {label}_i)^2 &
\text{ if } |{pred}_i - {label}_i| < {rho} \\
|{pred}_i - {label}_i| - \frac{{rho}}{2} &
\text{ otherwise }
\end{cases}
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
rho : float, default 1
Threshold for trimmed mean estimator.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, rho=1, weight=None, batch_axis=0, **kwargs):
super(HuberLoss, self).__init__(weight, batch_axis, **kwargs)
self._rho = rho
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.abs(pred - label)
loss = F.where(loss > self._rho, loss - 0.5 * self._rho,
(0.5/self._rho) * F.square(loss))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class HingeLoss(Loss):
r"""Calculates the hinge loss function often used in SVMs:
.. math::
L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1. `pred` and `label` must have the same number of
elements.
Parameters
----------
margin : float
The margin in hinge loss. Defaults to 1.0
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape.
- **label**: truth tensor with values -1 or 1. Must have the same size
as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(HingeLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.relu(self._margin - pred * label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class SquaredHingeLoss(Loss):
r"""Calculates the soft-margin loss function used in SVMs:
.. math::
L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)^2
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1. `pred` and `label` can have arbitrary shape as
long as they have the same number of elements.
Parameters
----------
margin : float
The margin in hinge loss. Defaults to 1.0
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: truth tensor with values -1 or 1. Must have the same size
as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(SquaredHingeLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.square(F.relu(self._margin - pred * label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class LogisticLoss(Loss):
r"""Calculates the logistic loss (for binary losses only):
.. math::
L = \sum_i \log(1 + \exp(- {pred}_i \cdot {label}_i))
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1 (0 or 1 if `label_format` is binary).
`pred` and `label` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
label_format : str, default 'signed'
Can be either 'signed' or 'binary'. If the label_format is 'signed', all label values should
be either -1 or 1. If the label_format is 'binary', all label values should be either
0 or 1.
Inputs:
- **pred**: prediction tensor with arbitrary shape.
- **label**: truth tensor with values -1/1 (label_format is 'signed')
or 0/1 (label_format is 'binary'). Must have the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=None, batch_axis=0, label_format='signed', **kwargs):
super(LogisticLoss, self).__init__(weight, batch_axis, **kwargs)
self._label_format = label_format
if self._label_format not in ["signed", "binary"]:
raise ValueError("label_format can only be signed or binary, recieved %s."
% label_format)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
if self._label_format == 'signed':
label = (label + 1.0) / 2.0 # Transform label to be either 0 or 1
# Use a stable formula in computation
loss = F.relu(pred) - pred * label + F.Activation(-F.abs(pred), act_type='softrelu')
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class TripletLoss(Loss):
r"""Calculates triplet loss given three input tensors and a positive margin.
Triplet loss measures the relative similarity between prediction, a positive
example and a negative example:
.. math::
L = \sum_i \max(\Vert {pred}_i - {pos_i} \Vert_2^2 -
\Vert {pred}_i - {neg_i} \Vert_2^2 + {margin}, 0)
`pred`, `positive` and `negative` can have arbitrary shape as long as they
have the same number of elements.
Parameters
----------
margin : float
Margin of separation between correct and incorrect pair.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **positive**: positive example tensor with arbitrary shape. Must have
the same size as pred.
- **negative**: negative example tensor with arbitrary shape Must have
the same size as pred.
Outputs:
- **loss**: loss tensor with shape (batch_size,).
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(TripletLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, positive, negative):
positive = _reshape_like(F, positive, pred)
negative = _reshape_like(F, negative, pred)
loss = F.sum(F.square(pred-positive) - F.square(pred-negative),
axis=self._batch_axis, exclude=True)
loss = F.relu(loss + self._margin)
return _apply_weighting(F, loss, self._weight, None)
class PoissonNLLLoss(Loss):
r"""For a target (Random Variable) in a Poisson distribution, the function calculates the Negative
Log likelihood loss.
PoissonNLLLoss measures the loss accrued from a poisson regression prediction made by the model.
.. math::
L = \text{pred} - \text{target} * \log(\text{pred}) +\log(\text{target!})
`pred`, `target` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
from_logits : boolean, default True
indicating whether log(predicted) value has already been computed. If True, the loss is computed as
:math:`\exp(\text{pred}) - \text{target} * \text{pred}`, and if False, then loss is computed as
:math:`\text{pred} - \text{target} * \log(\text{pred}+\text{epsilon})`.The default value
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
compute_full: boolean, default False
Indicates whether to add an approximation(Stirling factor) for the Factorial term in the formula for the loss.
The Stirling factor is:
:math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`
epsilon: float, default 1e-08
This is to avoid calculating log(0) which is not defined.
Inputs:
- **pred**: Predicted value
- **target**: Random variable(count or number) which belongs to a Poisson distribution.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: Average loss (shape=(1,1)) of the loss tensor with shape (batch_size,).
"""
def __init__(self, weight=None, from_logits=True, batch_axis=0, compute_full=False, **kwargs):
super(PoissonNLLLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
self._compute_full = compute_full
def hybrid_forward(self, F, pred, target, sample_weight=None, epsilon=1e-08):
target = _reshape_like(F, target, pred)
if self._from_logits:
loss = F.exp(pred) - target * pred
else:
loss = pred - target * F.log(pred + epsilon)
if self._compute_full:
# Using numpy's pi value
stirling_factor = target * F.log(target)- target + 0.5 * F.log(2 * target * np.pi)
target_gt_1 = target > 1
stirling_factor *= target_gt_1
loss += stirling_factor
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CosineEmbeddingLoss(Loss):
r"""For a target label 1 or -1, vectors input1 and input2, the function computes the cosine distance
between the vectors. This can be interpreted as how similar/dissimilar two input vectors are.
.. math::
L = \sum_i \begin{cases} 1 - {cos\_sim({input1}_i, {input2}_i)} & \text{ if } {label}_i = 1\\
{cos\_sim({input1}_i, {input2}_i)} & \text{ if } {label}_i = -1 \end{cases}\\
cos\_sim(input1, input2) = \frac{{input1}_i.{input2}_i}{||{input1}_i||.||{input2}_i||}
`input1`, `input2` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
margin : float
Margin of separation between correct and incorrect pair.
Inputs:
- **input1**: a tensor with arbitrary shape
- **input2**: another tensor with same shape as pred to which input1 is
compared for similarity and loss calculation
- **label**: A 1-D tensor indicating for each pair input1 and input2, target label is 1 or -1
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as input1. For example, if input1 has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: The loss tensor with shape (batch_size,).
"""
def __init__(self, weight=None, batch_axis=0, margin=0, **kwargs):
super(CosineEmbeddingLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, input1, input2, label, sample_weight=None):
input1 = _reshape_like(F, input1, input2)
label = label.reshape((-1, 1))
cos_sim = self._cosine_similarity(F, input1, input2)
y_1 = label == 1
y_minus_1 = label == -1
cos_sim_a = (1 - cos_sim) * y_1
if F is ndarray:
z_array = F.array([0])
else:
z_array = F.zeros((1, 1))
cos_sim_b = F.broadcast_maximum(z_array, y_minus_1 * (cos_sim - self._margin), axis=1)
loss = cos_sim_a + cos_sim_b
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return loss
def _cosine_similarity(self, F, x, y, axis=-1):
# Calculates the cosine similarity between 2 vectors
x_norm = F.norm(x, axis=axis).reshape(-1, 1)
y_norm = F.norm(y, axis=axis).reshape(-1, 1)
x_dot_y = F.sum(x*y, axis=axis).reshape(-1, 1)
if F is ndarray:
eps_arr = F.array([1e-12])
else:
eps_arr = F.full((1, 1), 1e-12)
return (x_dot_y / F.broadcast_maximum(x_norm * y_norm, eps_arr))
| {
"content_hash": "d2c3b98e6074d558669a8cf0ef69db0a",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 118,
"avg_line_length": 39.43520782396088,
"alnum_prop": 0.6110422220844441,
"repo_name": "mlperf/training_results_v0.6",
"id": "238b300ba37bc187c45a01d3108121ad78981a7f",
"size": "33095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/gluon/loss.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
import graphene
from .....checkout import calculations
from .....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from .....payment.error_codes import PaymentErrorCode
from .....payment.models import ChargeStatus, Payment
from .....plugins.manager import get_plugins_manager
from ....tests.utils import get_graphql_content
DUMMY_GATEWAY = "mirumee.payments.dummy"
CREATE_PAYMENT_MUTATION = """
mutation CheckoutPaymentCreate(
$checkoutId: ID, $token: UUID, $input: PaymentInput!
) {
checkoutPaymentCreate(checkoutId: $checkoutId, token: $token, input: $input) {
payment {
chargeStatus
}
errors {
code
field
}
}
}
"""
def test_checkout_add_payment_by_checkout_id(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
total = calculations.checkout_total(
manager=manager, checkout_info=checkout_info, lines=lines, address=address
)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["errors"]
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.billing_address_1 == checkout.billing_address.street_address_1
assert payment.billing_first_name == checkout.billing_address.first_name
assert payment.billing_last_name == checkout.billing_address.last_name
def test_checkout_add_payment_neither_token_and_id_given(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
total = calculations.checkout_total(
manager=manager, checkout_info=checkout_info, lines=lines, address=address
)
variables = {
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert len(data["errors"]) == 1
assert not data["payment"]
assert data["errors"][0]["code"] == PaymentErrorCode.GRAPHQL_ERROR.name
def test_checkout_add_payment_both_token_and_id_given(
user_api_client, checkout_without_shipping_required, address
):
checkout = checkout_without_shipping_required
checkout.billing_address = address
checkout.save()
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
total = calculations.checkout_total(
manager=manager, checkout_info=checkout_info, lines=lines, address=address
)
variables = {
"checkoutId": checkout_id,
"token": checkout.token,
"input": {
"gateway": DUMMY_GATEWAY,
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_PAYMENT_MUTATION, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert len(data["errors"]) == 1
assert not data["payment"]
assert data["errors"][0]["code"] == PaymentErrorCode.GRAPHQL_ERROR.name
| {
"content_hash": "575785b9021d609a37e222f4232c96c1",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 86,
"avg_line_length": 36.24390243902439,
"alnum_prop": 0.6639748766262898,
"repo_name": "mociepka/saleor",
"id": "53672ce5038affbeca0816f407b2efdd081c1511",
"size": "4458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/payment/tests/deprecated/test_checkout_payment_create.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
from django.conf import settings
# noinspection PyProtectedMember
from django.core.cache import caches
from django.utils.six import iteritems
from logging import getLogger
import requests
import requests.exceptions
import string
import random
import socket
logger = getLogger(__name__)
def random_password(minlength=20, maxlength=30):
"""
Generate random string used as password.
"""
length = random.randint(minlength, maxlength)
letters = string.ascii_letters + string.digits
return ''.join([random.choice(letters) for _ in range(length)])
def _test_vnc(host, port, timeout=3):
"""
Test VNC connection.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
try:
sock.connect((host, port))
if sock.recv(1024).startswith('RFB'):
return True
except (socket.error, socket.timeout, socket.herror, socket.gaierror) as err:
logger.warning('Error "%s" when testing VNC on "%s:%s"', err, host, port)
finally:
sock.close()
return False
class Guacamole(object):
"""
Manipulate guacamole authorization from django.
"""
def __init__(self, request, vm=None, username=None, password=None, save_password=False, load_password=False):
"""
:param request: django request object.
:param vm: vm object or list of vm objects (queryset). If it's a object
it will be turned into a list.
:param username: if not specified it will be set to the username
attribute of request.user object.
:param password: if not specified it will be auto generated.
:param save_password: if True, then save the password in the
request.session object.
:param load_password: if True, then load the password from the
request.session object.
"""
self.auth = None
self.tree = None
self.request = request
self.vm = vm
if self.vm and not hasattr(self.vm, '__iter__'):
self.vm = [vm]
self.usr = username
if not self.usr:
self.usr = request.user.username
self.key = settings.GUACAMOLE_KEY + self.usr
self.pwd = password
if not self.pwd:
if load_password:
self.pwd = self.request.session.get(self.key, random_password())
else:
self.pwd = random_password()
if save_password:
self.request.session[self.key] = self.pwd
def __set_tree(self):
self.tree = {}
def __set_auth(self):
self.tree['password'] = self.pwd
def __set_vm(self):
for i in self.vm:
self.tree[i.hostname] = {
'protocol': 'vnc',
'hostname': i.node.address,
'port': i.vnc_port
}
@classmethod
def test_vnc(cls, vm, timeout=2):
"""
Test VNC connection on VM.
"""
return _test_vnc(vm.node.address, vm.vnc_port, timeout=timeout)
def usermap(self):
"""
Generate the user-mapping XML and return it along with the key string.
"""
logger.debug('Creating guacamole user-mapping for user %s.', self.usr)
self.__set_tree()
self.__set_auth()
if self.vm:
self.__set_vm()
return self.key, self.tree
def login(self, save_cookie=True):
"""
Perform a login to guacamole by issuing a POST request to /api/tokens.
"""
logger.info('Performing guacamole login of user %s.', self.usr)
exc = None
r = None
try:
r = requests.post(
settings.GUACAMOLE_URI + '/api/tokens',
data={'username': self.usr, 'password': self.pwd},
headers={'User-Agent': settings.GUACAMOLE_USERAGENT},
timeout=settings.GUACAMOLE_TIMEOUT,
allow_redirects=False
)
except requests.exceptions.RequestException as exc:
logger.exception(exc)
status = None
else:
status = r.status_code
if status == 200 and settings.GUACAMOLE_COOKIE in r.cookies:
token = r.json().get('authToken', '')
cookie = r.cookies[settings.GUACAMOLE_COOKIE]
logger.info('User %s got guacamole cookie=%s and token=%s.', self.usr, cookie, token)
if save_cookie:
self.request.session[settings.GUACAMOLE_COOKIE] = cookie
self.request.session[settings.GUACAMOLE_TOKEN] = token
res = {
'token': token,
'cookie': {
'key': settings.GUACAMOLE_COOKIE,
'value': cookie,
'path': settings.GUACAMOLE_COOKIEPATH,
'domain': settings.GUACAMOLE_COOKIEDOMAIN,
'httponly': False
}
}
else:
logger.error('User %s could not login to guacamole, status=%s, response="%r".',
self.usr, status, exc or r.text)
res = {}
return res
def logout(self):
"""
Perform a logout from guacamole by issuing a DELETE request to /api/tokens/<token>.
"""
session = self.request.session
token = ''
logger.info('Performing guacamole logout of user %s.', self.usr)
if settings.GUACAMOLE_COOKIE in session and settings.GUACAMOLE_TOKEN in session:
token = session[settings.GUACAMOLE_TOKEN]
try:
r = requests.delete(
settings.GUACAMOLE_URI + '/api/tokens/' + token,
cookies={settings.GUACAMOLE_COOKIE: session[settings.GUACAMOLE_COOKIE]},
headers={'User-Agent': settings.GUACAMOLE_USERAGENT},
timeout=settings.GUACAMOLE_TIMEOUT,
allow_redirects=False
)
r.raise_for_status()
except requests.exceptions.RequestException as exc:
if exc.response and exc.response.status_code == 404:
logger.warning('User %s could not logout from guacamole because the token "%s" '
'does not exist anymore', self.usr, token)
else:
logger.exception(exc)
logger.error('User %s could not logout from guacamole (%r).', self.usr, exc)
else:
logger.info('User %s has no guacamole cookie and/or token.', self.usr)
return {
'token': token,
'cookie': {
'key': settings.GUACAMOLE_COOKIE,
'path': settings.GUACAMOLE_COOKIEPATH,
}
}
class GuacamoleAuth(Guacamole):
"""
Manipulate guacamole-auth-redis keys.
"""
redis = caches['redis'].master_client
def set_auth(self):
"""
Create Guacamole usermap and store it in redis.
"""
username, configs = self.usermap()
pipe = self.redis.pipeline()
pipe.hset(username, 'password', configs.pop('password', None))
for key, cfg in iteritems(configs):
val = '\n'.join([str(i) + '=' + str(j) for i, j in iteritems(cfg)])
pipe.hset(username, key, val)
return pipe.execute()
def del_auth(self):
"""
Remove Guacamole usermap from redis.
"""
return self.redis.delete(self.key)
| {
"content_hash": "89304966f8a285b323f9cfa01fcf06bb",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 113,
"avg_line_length": 31.953191489361704,
"alnum_prop": 0.5591956319083766,
"repo_name": "erigones/esdc-ce",
"id": "d3ad7519736beb8391b8973acd2c09cb798b8bdb",
"size": "7509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/vm/guacamole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
} |
""" Testing suite for the PyTorch M2M100 model. """
import copy
import tempfile
import unittest
from transformers import is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import M2M100Config, M2M100ForConditionalGeneration, M2M100Model, M2M100Tokenizer
from transformers.models.m2m_100.modeling_m2m_100 import M2M100Decoder, M2M100Encoder
def prepare_m2m_100_inputs_dict(
config,
input_ids,
decoder_input_ids,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
if decoder_head_mask is None:
decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
if cross_attn_head_mask is None:
cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_torch
class M2M100ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
input_ids = input_ids.clamp(self.pad_token_id + 1)
decoder_input_ids = decoder_input_ids.clamp(self.pad_token_id + 1)
config = M2M100Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
encoder_layerdrop=self.encoder_layerdrop,
decoder_layerdrop=self.decoder_layerdrop,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
inputs_dict = prepare_m2m_100_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = M2M100Model(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = M2M100Model(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = M2M100Encoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = M2M100Decoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
M2M100Model,
M2M100ForConditionalGeneration,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (M2M100ForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
test_pruning = False
test_missing_keys = False
def setUp(self):
self.model_tester = M2M100ModelTester(self)
self.config_tester = ConfigTester(self, config_class=M2M100Config)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (M2M100Model, M2M100ForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = M2M100ForConditionalGeneration(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
TOLERANCE = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class M2M100ModelIntegrationTests(unittest.TestCase):
@cached_property
def default_tokenizer(self):
return M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
def test_inference_no_head(self):
model = M2M100Model.from_pretrained("facebook/m2m100_418M").to(torch_device)
input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]])
decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]])
inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids)
with torch.no_grad():
output = model(**inputs_dict)[0]
expected_shape = torch.Size((1, 11, 1024))
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
def test_inference_head(self):
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device)
# change to intended input
input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]])
decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]])
inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids)
with torch.no_grad():
output = model(**inputs_dict)[0]
expected_shape = torch.Size((1, 11, model.config.vocab_size))
self.assertEqual(output.shape, expected_shape)
# change to expected output here
expected_slice = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
def test_seq_to_seq_generation(self):
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device)
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="fr", tgt_lang="en")
src_fr = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
dct = tokenizer(src_fr, padding=True, return_tensors="pt")
hypotheses_batch = model.generate(
input_ids=dct["input_ids"].to(torch_device),
attention_mask=dct["attention_mask"].to(torch_device),
num_beams=5,
forced_bos_token_id=tokenizer.get_lang_id("en"),
)
expected_en = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S. Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all communications in France.",
]
generated = tokenizer.batch_decode(
hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True
)
assert generated == expected_en
| {
"content_hash": "112cc1458510425b51c21e8e709c72c4",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 305,
"avg_line_length": 43.917355371900825,
"alnum_prop": 0.652866641575712,
"repo_name": "huggingface/pytorch-transformers",
"id": "e39876e4ee7cecd996470b3c2ecb2e7130ce7b19",
"size": "16582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_modeling_m2m_100.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
import warnings
from functools import partial
from typing import ( # pylint: disable=unused-import
Any, AnyStr, Dict, IO, Iterable, List, Optional, overload, Tuple, Union,
TYPE_CHECKING
)
from azure.core.async_paging import AsyncItemPaged
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError
from azure.core.pipeline import AsyncPipeline
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
from .._shared.policies_async import ExponentialRetry
from .._shared.response_handlers import return_response_headers, process_storage_error
from .._generated.aio import AzureBlobStorage
from .._generated.models import CpkInfo
from .._blob_client import BlobClient as BlobClientBase
from .._deserialize import (
deserialize_blob_properties,
deserialize_pipeline_response_into_cls,
get_page_ranges_result,
parse_tags
)
from .._encryption import StorageEncryptionMixin
from .._models import BlobType, BlobBlock, BlobProperties, PageRange
from .._serialize import get_modify_conditions, get_api_version, get_access_conditions
from ._download_async import StorageStreamDownloader
from ._lease_async import BlobLeaseClient
from ._models import PageRangePaged
from ._upload_helpers import (
upload_block_blob,
upload_append_blob,
upload_page_blob
)
if TYPE_CHECKING:
from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential
from datetime import datetime
from .._models import ( # pylint: disable=unused-import
ContentSettings,
ImmutabilityPolicy,
PremiumPageBlobTier,
StandardBlobTier,
SequenceNumberAction
)
class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase, StorageEncryptionMixin): # pylint: disable=too-many-public-methods
"""A client to interact with a specific blob, although that blob may not yet exist.
:param str account_url:
The URI to the storage account. In order to create a client given the full URI to the blob,
use the :func:`from_blob_url` classmethod.
:param container_name: The container name for the blob.
:type container_name: str
:param blob_name: The name of the blob with which to interact. If specified, this value will override
a blob value specified in the blob URL.
:type blob_name: str
:param str snapshot:
The optional blob snapshot on which to operate. This can be the snapshot ID string
or the response returned from :func:`create_snapshot`.
:param credential:
The credentials with which to authenticate. This is optional if the
account URL already has a SAS token. The value can be a SAS token string,
an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
an account shared access key, or an instance of a TokenCredentials class from azure.identity.
If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
- except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
should be the storage account key.
:keyword str api_version:
The Storage API version to use for requests. Default value is the most recent service version that is
compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
.. versionadded:: 12.2.0
:keyword str secondary_hostname:
The hostname of the secondary endpoint.
:keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
Defaults to 4*1024*1024, or 4MB.
:keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
:keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
:keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
:keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
:keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
:keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
or 4MB.
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_authentication_async.py
:start-after: [START create_blob_client]
:end-before: [END create_blob_client]
:language: python
:dedent: 8
:caption: Creating the BlobClient from a URL to a public blob (no auth needed).
.. literalinclude:: ../samples/blob_samples_authentication_async.py
:start-after: [START create_blob_client_sas_url]
:end-before: [END create_blob_client_sas_url]
:language: python
:dedent: 8
:caption: Creating the BlobClient from a SAS URL to a blob.
"""
def __init__(
self, account_url: str,
container_name: str,
blob_name: str,
snapshot: Optional[Union[str, Dict[str, Any]]] = None,
credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long
**kwargs: Any
) -> None:
kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
super(BlobClient, self).__init__(
account_url,
container_name=container_name,
blob_name=blob_name,
snapshot=snapshot,
credential=credential,
**kwargs)
self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access
self._configure_encryption(kwargs)
@distributed_trace_async
async def get_account_information(self, **kwargs): # type: ignore
# type: (Optional[int]) -> Dict[str, str]
"""Gets information related to the storage account in which the blob resides.
The information can also be retrieved if the user has a SAS to a container or blob.
The keys in the returned dictionary include 'sku_name' and 'account_kind'.
:returns: A dict of account information (SKU and account type).
:rtype: dict(str, str)
"""
try:
return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def upload_blob_from_url(self, source_url, **kwargs):
# type: (str, Any) -> Dict[str, Any]
"""
Creates a new Block Blob where the content of the blob is read from a given URL.
The content of an existing blob is overwritten with the new blob.
:param str source_url:
A URL of up to 2 KB in length that specifies a file or blob.
The value should be URL-encoded as it would appear in a request URI.
If the source is in another account, the source must either be public
or must be authenticated via a shared access signature. If the source
is public, no authentication is required.
Examples:
https://myaccount.blob.core.windows.net/mycontainer/myblob
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
:keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
If True, upload_blob will overwrite the existing data. If set to False, the
operation will fail with ResourceExistsError.
:keyword bool include_source_blob_properties:
Indicates if properties from the source blob should be copied. Defaults to True.
:keyword tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
:paramtype tags: dict(str, str)
:keyword bytearray source_content_md5:
Specify the md5 that is used to verify the integrity of the source bytes.
:keyword ~datetime.datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the source resource has been modified since the specified time.
:keyword ~datetime.datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the source resource has not been modified since the specified date/time.
:keyword str source_etag:
The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions source_match_condition:
The source match condition to use upon the etag.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The destination match condition to use upon the etag.
:keyword destination_lease:
The lease ID specified for this header must match the lease ID of the
destination blob. If the request does not include the lease ID or it is not
valid, the operation fails with status code 412 (Precondition Failed).
:paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
:keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
:keyword str source_authorization:
Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
the prefix of the source_authorization string.
"""
options = self._upload_blob_from_url_options(
source_url=self._encode_source_url(source_url),
**kwargs)
try:
return await self._client.block_blob.put_blob_from_url(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def upload_blob(
self, data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
length=None, # type: Optional[int]
metadata=None, # type: Optional[Dict[str, str]]
**kwargs
):
# type: (...) -> Any
"""Creates a new blob from a data source with automatic chunking.
:param data: The blob data to upload.
:param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:keyword tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
.. versionadded:: 12.4.0
:paramtype tags: dict(str, str)
:keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
If True, upload_blob will overwrite the existing data. If set to False, the
operation will fail with ResourceExistsError. The exception to the above is with Append
blob types: if set to False and the data already exists, an error will not be raised
and the data will be appended to the existing blob. If set overwrite=True, then the existing
append blob will be deleted, and a new one created. Defaults to False.
:keyword ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:keyword bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https, as https (the default), will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:keyword lease:
If specified, upload_blob only succeeds if the
blob's lease is active and matches this ID.
Required if the blob has an active lease.
:paramtype: ~azure.storage.blob.aio.BlobLeaseClient
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
Specifies the immutability policy of a blob, blob snapshot or blob version.
Currently this parameter of upload_blob() API is for BlockBlob only.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword bool legal_hold:
Specified if a legal hold should be set on the blob.
Currently this parameter of upload_blob() API is for BlockBlob only.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
:keyword int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:keyword int max_concurrency:
Maximum number of parallel connections to use when the blob size exceeds
64MB.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword str encoding:
Defaults to UTF-8.
:keyword progress_hook:
An async callback to track the progress of a long running upload. The signature is
function(current: int, total: Optional[int]) where current is the number of bytes transfered
so far, and total is the size of the blob or None if the size is unknown.
:paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:returns: Blob-updated property dict (Etag and last modified)
:rtype: dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_hello_world_async.py
:start-after: [START upload_a_blob]
:end-before: [END upload_a_blob]
:language: python
:dedent: 16
:caption: Upload a blob to the container.
"""
options = self._upload_blob_options(
data,
blob_type=blob_type,
length=length,
metadata=metadata,
**kwargs)
if blob_type == BlobType.BlockBlob:
return await upload_block_blob(**options)
if blob_type == BlobType.PageBlob:
return await upload_page_blob(**options)
return await upload_append_blob(**options)
@overload
async def download_blob(
self, offset: int = None,
length: int = None,
*,
encoding: str,
**kwargs) -> StorageStreamDownloader[str]:
...
@overload
async def download_blob(
self, offset: int = None,
length: int = None,
*,
encoding: None = None,
**kwargs) -> StorageStreamDownloader[bytes]:
...
@distributed_trace_async
async def download_blob(
self, offset: int = None,
length: int = None,
*,
encoding: Optional[str] = None,
**kwargs) -> StorageStreamDownloader:
"""Downloads a blob to the StorageStreamDownloader. The readall() method must
be used to read all the content or readinto() must be used to download the blob into
a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
:param int offset:
Start of byte range to use for downloading a section of the blob.
Must be set if length is provided.
:param int length:
Number of bytes to read from the stream. This is optional, but
should be supplied for optimal performance.
:keyword str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to download.
.. versionadded:: 12.4.0
This keyword argument was introduced in API version '2019-12-12'.
:keyword bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https, as https (the default), will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:keyword lease:
Required if the blob has an active lease. If specified, download_blob only
succeeds if the blob's lease is active and matches this ID. Value can be a
BlobLeaseClient object or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:keyword str encoding:
Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
:keyword progress_hook:
An async callback to track the progress of a long running download. The signature is
function(current: int, total: int) where current is the number of bytes transfered
so far, and total is the total size of the download.
:paramtype progress_hook: Callable[[int, int], Awaitable[None]]
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:returns: A streaming object (StorageStreamDownloader)
:rtype: ~azure.storage.blob.aio.StorageStreamDownloader
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_hello_world_async.py
:start-after: [START download_a_blob]
:end-before: [END download_a_blob]
:language: python
:dedent: 16
:caption: Download a blob.
"""
options = self._download_blob_options(
offset=offset,
length=length,
encoding=encoding,
**kwargs)
downloader = StorageStreamDownloader(**options)
await downloader._setup() # pylint: disable=protected-access
return downloader
@distributed_trace_async
async def delete_blob(self, delete_snapshots=None, **kwargs):
# type: (str, Any) -> None
"""Marks the specified blob for deletion.
The blob is later deleted during garbage collection.
Note that in order to delete a blob, you must delete all of its
snapshots. You can delete both at the same time with the delete_blob()
operation.
If a delete retention policy is enabled for the service, then this operation soft deletes the blob
and retains the blob for a specified number of days.
After the specified number of days, the blob's data is removed from the service during garbage collection.
Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
option. Soft-deleted blob can be restored using :func:`undelete` operation.
:param str delete_snapshots:
Required if the blob has associated snapshots. Values include:
- "only": Deletes only the blobs snapshots.
- "include": Deletes the blob along with all snapshots.
:keyword str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to delete.
.. versionadded:: 12.4.0
This keyword argument was introduced in API version '2019-12-12'.
:keyword lease:
Required if the blob has an active lease. If specified, delete_blob only
succeeds if the blob's lease is active and matches this ID. Value can be a
BlobLeaseClient object or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_hello_world_async.py
:start-after: [START delete_blob]
:end-before: [END delete_blob]
:language: python
:dedent: 16
:caption: Delete a blob.
"""
options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs)
try:
await self._client.blob.delete(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def undelete_blob(self, **kwargs):
# type: (Any) -> None
"""Restores soft-deleted blobs or snapshots.
Operation will only be successful if used within the specified number of days
set in the delete retention policy.
If blob versioning is enabled, the base blob cannot be restored using this
method. Instead use :func:`start_copy_from_url` with the URL of the blob version
you wish to promote to the current version.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START undelete_blob]
:end-before: [END undelete_blob]
:language: python
:dedent: 12
:caption: Undeleting a blob.
"""
try:
await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def exists(self, **kwargs):
# type: (**Any) -> bool
"""
Returns True if a blob exists with the defined parameters, and returns
False otherwise.
:kwarg str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to check if it exists.
:kwarg int timeout:
The timeout parameter is expressed in seconds.
:returns: boolean
"""
try:
await self._client.blob.get_properties(
snapshot=self.snapshot,
**kwargs)
return True
# Encrypted with CPK
except ResourceExistsError:
return True
except HttpResponseError as error:
try:
process_storage_error(error)
except ResourceNotFoundError:
return False
@distributed_trace_async
async def get_blob_properties(self, **kwargs):
# type: (Any) -> BlobProperties
"""Returns all user-defined metadata, standard HTTP properties, and
system properties for the blob. It does not return the content of the blob.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to get properties.
.. versionadded:: 12.4.0
This keyword argument was introduced in API version '2019-12-12'.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: BlobProperties
:rtype: ~azure.storage.blob.BlobProperties
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START get_blob_properties]
:end-before: [END get_blob_properties]
:language: python
:dedent: 12
:caption: Getting the properties for a blob.
"""
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_modify_conditions(kwargs)
cpk = kwargs.pop('cpk', None)
cpk_info = None
if cpk:
if self.scheme.lower() != 'https':
raise ValueError("Customer provided encryption key must be used over HTTPS.")
cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
encryption_algorithm=cpk.algorithm)
try:
cls_method = kwargs.pop('cls', None)
if cls_method:
kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method)
blob_props = await self._client.blob.get_properties(
timeout=kwargs.pop('timeout', None),
version_id=kwargs.pop('version_id', None),
snapshot=self.snapshot,
lease_access_conditions=access_conditions,
modified_access_conditions=mod_conditions,
cls=kwargs.pop('cls', None) or deserialize_blob_properties,
cpk_info=cpk_info,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
blob_props.name = self.blob_name
if isinstance(blob_props, BlobProperties):
blob_props.container = self.container_name
blob_props.snapshot = self.snapshot
return blob_props # type: ignore
@distributed_trace_async
async def set_http_headers(self, content_settings=None, **kwargs):
# type: (Optional[ContentSettings], Any) -> None
"""Sets system properties on the blob.
If one property is set for the content_settings, all properties will be overridden.
:param ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified)
:rtype: Dict[str, Any]
"""
options = self._set_http_headers_options(content_settings=content_settings, **kwargs)
try:
return await self._client.blob.set_http_headers(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def set_blob_metadata(self, metadata=None, **kwargs):
# type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
"""Sets user-defined metadata for the blob as one or more name-value pairs.
:param metadata:
Dict containing name and value pairs. Each call to this operation
replaces all existing metadata attached to the blob. To remove all
metadata from the blob, call this operation with no metadata headers.
:type metadata: dict(str, str)
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified)
"""
options = self._set_blob_metadata_options(metadata=metadata, **kwargs)
try:
return await self._client.blob.set_metadata(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def set_immutability_policy(self, immutability_policy, **kwargs):
# type: (ImmutabilityPolicy, **Any) -> Dict[str, str]
"""The Set Immutability Policy operation sets the immutability policy on the blob.
.. versionadded:: 12.10.0
This operation was introduced in API version '2020-10-02'.
:param ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
Specifies the immutability policy of a blob, blob snapshot or blob version.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Key value pairs of blob tags.
:rtype: Dict[str, str]
"""
kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
return await self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs)
@distributed_trace_async()
async def delete_immutability_policy(self, **kwargs):
# type: (**Any) -> None
"""The Delete Immutability Policy operation deletes the immutability policy on the blob.
.. versionadded:: 12.10.0
This operation was introduced in API version '2020-10-02'.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Key value pairs of blob tags.
:rtype: Dict[str, str]
"""
await self._client.blob.delete_immutability_policy(**kwargs)
@distributed_trace_async
async def set_legal_hold(self, legal_hold, **kwargs):
# type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]]
"""The Set Legal Hold operation sets a legal hold on the blob.
.. versionadded:: 12.10.0
This operation was introduced in API version '2020-10-02'.
:param bool legal_hold:
Specified if a legal hold should be set on the blob.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Key value pairs of blob tags.
:rtype: Dict[str, Union[str, datetime, bool]]
"""
return await self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs)
@distributed_trace_async
async def create_page_blob( # type: ignore
self, size, # type: int
content_settings=None, # type: Optional[ContentSettings]
metadata=None, # type: Optional[Dict[str, str]]
premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]]
**kwargs
):
# type: (...) -> Dict[str, Union[str, datetime]]
"""Creates a new Page Blob of the specified size.
:param int size:
This specifies the maximum size for the page blob, up to 1 TB.
The page blob size must be aligned to a 512-byte boundary.
:param ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:keyword tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
.. versionadded:: 12.4.0
:paramtype tags: dict(str, str)
:keyword int sequence_number:
Only for Page blobs. The sequence number is a user-controlled value that you can use to
track requests. The value of the sequence number must be between 0
and 2^63 - 1.The default value is 0.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
Specifies the immutability policy of a blob, blob snapshot or blob version.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword bool legal_hold:
Specified if a legal hold should be set on the blob.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict[str, Any]
"""
options = self._create_page_blob_options(
size,
content_settings=content_settings,
metadata=metadata,
premium_page_blob_tier=premium_page_blob_tier,
**kwargs)
try:
return await self._client.page_blob.create(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def create_append_blob(self, content_settings=None, metadata=None, **kwargs):
# type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
"""Creates a new Append Blob.
:param ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:keyword tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
.. versionadded:: 12.4.0
:paramtype tags: dict(str, str)
:keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
Specifies the immutability policy of a blob, blob snapshot or blob version.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword bool legal_hold:
Specified if a legal hold should be set on the blob.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict[str, Any]
"""
options = self._create_append_blob_options(
content_settings=content_settings,
metadata=metadata,
**kwargs)
try:
return await self._client.append_blob.create(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def create_snapshot(self, metadata=None, **kwargs):
# type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
"""Creates a snapshot of the blob.
A snapshot is a read-only version of a blob that's taken at a point in time.
It can be read, copied, or deleted, but not modified. Snapshots provide a way
to back up a blob as it appears at a moment in time.
A snapshot of a blob has the same name as the base blob from which the snapshot
is taken, with a DateTime value appended to indicate the time at which the
snapshot was taken.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict(str, str)
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
:rtype: dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START create_blob_snapshot]
:end-before: [END create_blob_snapshot]
:language: python
:dedent: 12
:caption: Create a snapshot of the blob.
"""
options = self._create_snapshot_options(metadata=metadata, **kwargs)
try:
return await self._client.blob.create_snapshot(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs):
# type: (str, Optional[Dict[str, str]], bool, Any) -> Dict[str, Union[str, datetime]]
"""Copies a blob from the given URL.
This operation returns a dictionary containing `copy_status` and `copy_id`,
which can be used to check the status of or abort the copy operation.
`copy_status` will be 'success' if the copy completed synchronously or
'pending' if the copy has been started asynchronously. For asynchronous copies,
the status can be checked by polling the :func:`get_blob_properties` method and
checking the copy status. Set `requires_sync` to True to force the copy to be synchronous.
The Blob service copies blobs on a best-effort basis.
The source blob for a copy operation may be a block blob, an append blob,
or a page blob. If the destination blob already exists, it must be of the
same blob type as the source blob. Any existing destination blob will be
overwritten. The destination blob cannot be modified while a copy operation
is in progress.
When copying from a page blob, the Blob service creates a destination page
blob of the source blob's length, initially containing all zeroes. Then
the source page ranges are enumerated, and non-empty ranges are copied.
For a block blob or an append blob, the Blob service creates a committed
blob of zero length before returning from this operation. When copying
from a block blob, all committed blocks and their block IDs are copied.
Uncommitted blocks are not copied. At the end of the copy operation, the
destination blob will have the same committed block count as the source.
When copying from an append blob, all committed blocks are copied. At the
end of the copy operation, the destination blob will have the same committed
block count as the source.
:param str source_url:
A URL of up to 2 KB in length that specifies a file or blob.
The value should be URL-encoded as it would appear in a request URI.
If the source is in another account, the source must either be public
or must be authenticated via a shared access signature. If the source
is public, no authentication is required.
Examples:
https://myaccount.blob.core.windows.net/mycontainer/myblob
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
:param metadata:
Name-value pairs associated with the blob as metadata. If no name-value
pairs are specified, the operation will copy the metadata from the
source blob or file to the destination blob. If one or more name-value
pairs are specified, the destination blob is created with the specified
metadata, and metadata is not copied from the source blob or file.
:type metadata: dict(str, str)
:param bool incremental_copy:
Copies the snapshot of the source page blob to a destination page blob.
The snapshot is copied such that only the differential changes between
the previously copied snapshot are transferred to the destination.
The copied snapshots are complete copies of the original snapshot and
can be read or copied from as usual. Defaults to False.
:keyword tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_).
The (case-sensitive) literal "COPY" can instead be passed to copy tags from the source blob.
This option is only available when `incremental_copy=False` and `requires_sync=True`.
.. versionadded:: 12.4.0
:paramtype tags: dict(str, str) or Literal["COPY"]
:keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
Specifies the immutability policy of a blob, blob snapshot or blob version.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword bool legal_hold:
Specified if a legal hold should be set on the blob.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword ~datetime.datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the source
blob has been modified since the specified date/time.
:keyword ~datetime.datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the source blob
has not been modified since the specified date/time.
:keyword str source_etag:
The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions source_match_condition:
The source match condition to use upon the etag.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has been modified since the specified date/time.
If the destination blob has not been modified, the Blob service returns
status code 412 (Precondition Failed).
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has not been modified since the specified
date/time. If the destination blob has been modified, the Blob service
returns status code 412 (Precondition Failed).
:keyword str etag:
The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The destination match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword destination_lease:
The lease ID specified for this header must match the lease ID of the
destination blob. If the request does not include the lease ID or it is not
valid, the operation fails with status code 412 (Precondition Failed).
:paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword source_lease:
Specify this to perform the Copy Blob operation only if
the lease ID given matches the active lease ID of the source blob.
:paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
:keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
Indicates the priority with which to rehydrate an archived blob
:keyword bool seal_destination_blob:
Seal the destination append blob. This operation is only for append blob.
.. versionadded:: 12.4.0
:keyword bool requires_sync:
Enforces that the service will not return a response until the copy is complete.
:keyword str source_authorization:
Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
the prefix of the source_authorization string. This option is only available when `incremental_copy` is
set to False and `requires_sync` is set to True.
.. versionadded:: 12.9.0
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.10.0
:returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
:rtype: dict[str, Union[str, ~datetime.datetime]]
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START copy_blob_from_url]
:end-before: [END copy_blob_from_url]
:language: python
:dedent: 16
:caption: Copy a blob from a URL.
"""
options = self._start_copy_from_url_options(
source_url=self._encode_source_url(source_url),
metadata=metadata,
incremental_copy=incremental_copy,
**kwargs)
try:
if incremental_copy:
return await self._client.page_blob.copy_incremental(**options)
return await self._client.blob.start_copy_from_url(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def abort_copy(self, copy_id, **kwargs):
# type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None
"""Abort an ongoing copy operation.
This will leave a destination blob with zero length and full metadata.
This will raise an error if the copy operation has already ended.
:param copy_id:
The copy operation to abort. This can be either an ID, or an
instance of BlobProperties.
:type copy_id: str or ~azure.storage.blob.BlobProperties
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START abort_copy_blob_from_url]
:end-before: [END abort_copy_blob_from_url]
:language: python
:dedent: 16
:caption: Abort copying a blob from URL.
"""
options = self._abort_copy_options(copy_id, **kwargs)
try:
await self._client.blob.abort_copy_from_url(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs):
# type: (int, Optional[str], Any) -> BlobLeaseClient
"""Requests a new lease.
If the blob does not have an active lease, the Blob
Service creates a lease on the blob and returns a new lease.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. Default is -1 (infinite lease).
:param str lease_id:
Proposed lease ID, in a GUID string format. The Blob Service
returns 400 (Invalid request) if the proposed lease ID is not
in the correct format.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: A BlobLeaseClient object.
:rtype: ~azure.storage.blob.aio.BlobLeaseClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_common_async.py
:start-after: [START acquire_lease_on_blob]
:end-before: [END acquire_lease_on_blob]
:language: python
:dedent: 12
:caption: Acquiring a lease on a blob.
"""
lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
await lease.acquire(lease_duration=lease_duration, **kwargs)
return lease
@distributed_trace_async
async def set_standard_blob_tier(self, standard_blob_tier, **kwargs):
# type: (Union[str, StandardBlobTier], Any) -> None
"""This operation sets the tier on a block blob.
A block blob's tier determines Hot/Cool/Archive storage type.
This operation does not update the blob's ETag.
:param standard_blob_tier:
Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
'Archive'. The hot tier is optimized for storing data that is accessed
frequently. The cool storage tier is optimized for storing data that
is infrequently accessed and stored for at least a month. The archive
tier is optimized for storing data that is rarely accessed and stored
for at least six months with flexible latency requirements.
:type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
:keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
Indicates the priority with which to rehydrate an archived blob
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:rtype: None
"""
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_modify_conditions(kwargs)
if standard_blob_tier is None:
raise ValueError("A StandardBlobTier must be specified")
try:
await self._client.blob.set_tier(
tier=standard_blob_tier,
timeout=kwargs.pop('timeout', None),
modified_access_conditions=mod_conditions,
lease_access_conditions=access_conditions,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def stage_block(
self, block_id, # type: str
data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
**kwargs
):
# type: (...) -> None
"""Creates a new block to be committed as part of a blob.
:param str block_id: A string value that identifies the block.
The string should be less than or equal to 64 bytes in size.
For a given blob, the block_id must be the same size for each block.
:param data: The blob data.
:param int length: Size of the block.
:keyword bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https, as https (the default), will
already validate. Note that this MD5 hash is not stored with the
blob. Also note that if enabled, the memory-efficient upload algorithm
will not be used because computing the MD5 hash requires buffering
entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword str encoding:
Defaults to UTF-8.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:rtype: None
"""
options = self._stage_block_options(
block_id,
data,
length=length,
**kwargs)
try:
return await self._client.block_blob.stage_block(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def stage_block_from_url(
self, block_id, # type: Union[str, int]
source_url, # type: str
source_offset=None, # type: Optional[int]
source_length=None, # type: Optional[int]
source_content_md5=None, # type: Optional[Union[bytes, bytearray]]
**kwargs
):
# type: (...) -> None
"""Creates a new block to be committed as part of a blob where
the contents are read from a URL.
:param str block_id: A string value that identifies the block.
The string should be less than or equal to 64 bytes in size.
For a given blob, the block_id must be the same size for each block.
:param str source_url: The URL.
:param int source_offset:
Start of byte range to use for the block.
Must be set if source length is provided.
:param int source_length: The size of the block in bytes.
:param bytearray source_content_md5:
Specify the md5 calculated for the range of
bytes that must be read from the copy source.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword str source_authorization:
Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
the prefix of the source_authorization string.
:rtype: None
"""
options = self._stage_block_from_url_options(
block_id,
source_url=self._encode_source_url(source_url),
source_offset=source_offset,
source_length=source_length,
source_content_md5=source_content_md5,
**kwargs)
try:
return await self._client.block_blob.stage_block_from_url(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_block_list(self, block_list_type="committed", **kwargs):
# type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]]
"""The Get Block List operation retrieves the list of blocks that have
been uploaded as part of a block blob.
:param str block_list_type:
Specifies whether to return the list of committed
blocks, the list of uncommitted blocks, or both lists together.
Possible values include: 'committed', 'uncommitted', 'all'
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: A tuple of two lists - committed and uncommitted blocks
:rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock))
"""
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_modify_conditions(kwargs)
try:
blocks = await self._client.block_blob.get_block_list(
list_type=block_list_type,
snapshot=self.snapshot,
timeout=kwargs.pop('timeout', None),
lease_access_conditions=access_conditions,
modified_access_conditions=mod_conditions,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
return self._get_block_list_result(blocks)
@distributed_trace_async
async def commit_block_list( # type: ignore
self, block_list, # type: List[BlobBlock]
content_settings=None, # type: Optional[ContentSettings]
metadata=None, # type: Optional[Dict[str, str]]
**kwargs
):
# type: (...) -> Dict[str, Union[str, datetime]]
"""The Commit Block List operation writes a blob by specifying the list of
block IDs that make up the blob.
:param list block_list:
List of Blockblobs.
:param ~azure.storage.blob.ContentSettings content_settings:
ContentSettings object used to set blob properties. Used to set content type, encoding,
language, disposition, md5, and cache control.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: dict[str, str]
:keyword tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
.. versionadded:: 12.4.0
:paramtype tags: dict(str, str)
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
Specifies the immutability policy of a blob, blob snapshot or blob version.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword bool legal_hold:
Specified if a legal hold should be set on the blob.
.. versionadded:: 12.10.0
This was introduced in API version '2020-10-02'.
:keyword bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https, as https (the default),
will already validate. Note that this MD5 hash is not stored with the
blob.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
A standard blob tier value to set the blob to. For this version of the library,
this is only applicable to block blobs on standard storage accounts.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
options = self._commit_block_list_options(
block_list,
content_settings=content_settings,
metadata=metadata,
**kwargs)
try:
return await self._client.block_blob.commit_block_list(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs):
# type: (Union[str, PremiumPageBlobTier], **Any) -> None
"""Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
:param premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:rtype: None
"""
access_conditions = get_access_conditions(kwargs.pop('lease', None))
mod_conditions = get_modify_conditions(kwargs)
if premium_page_blob_tier is None:
raise ValueError("A PremiumPageBlobTiermust be specified")
try:
await self._client.blob.set_tier(
tier=premium_page_blob_tier,
timeout=kwargs.pop('timeout', None),
lease_access_conditions=access_conditions,
modified_access_conditions=mod_conditions,
**kwargs)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def set_blob_tags(self, tags=None, **kwargs):
# type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
"""The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
Each call to this operation replaces all existing tags attached to the blob. To remove all
tags from the blob, call this operation with no tags set.
.. versionadded:: 12.4.0
This operation was introduced in API version '2019-12-12'.
:param tags:
Name-value pairs associated with the blob as tag. Tags are case-sensitive.
The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
and tag values must be between 0 and 256 characters.
Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
:type tags: dict(str, str)
:keyword str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to delete.
:keyword bool validate_content:
If true, calculates an MD5 hash of the tags content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https, as https (the default),
will already validate. Note that this MD5 hash is not stored with the
blob.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified)
:rtype: Dict[str, Any]
"""
options = self._set_blob_tags_options(tags=tags, **kwargs)
try:
return await self._client.blob.set_tags(**options)
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_blob_tags(self, **kwargs):
# type: (**Any) -> Dict[str, str]
"""The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot.
.. versionadded:: 12.4.0
This operation was introduced in API version '2019-12-12'.
:keyword str version_id:
The version id parameter is an opaque DateTime
value that, when present, specifies the version of the blob to add tags to.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Key value pairs of blob tags.
:rtype: Dict[str, str]
"""
options = self._get_blob_tags_options(**kwargs)
try:
_, tags = await self._client.blob.get_tags(**options)
return parse_tags(tags) # pylint: disable=protected-access
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def get_page_ranges( # type: ignore
self, offset=None, # type: Optional[int]
length=None, # type: Optional[int]
previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]]
**kwargs
):
# type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
"""DEPRECATED: Returns the list of valid page ranges for a Page Blob or snapshot
of a page blob.
:param int offset:
Start of byte range to use for getting valid page ranges.
If no length is given, all bytes after the offset will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param int length:
Number of bytes to use for getting valid page ranges.
If length is given, offset must be provided.
This range will return valid page ranges from the offset start up to
the specified length.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param str previous_snapshot_diff:
The snapshot diff parameter that contains an opaque DateTime value that
specifies a previous blob snapshot to be compared
against a more recent snapshot or the current blob.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns:
A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
The first element are filled page ranges, the 2nd element is cleared page ranges.
:rtype: tuple(list(dict(str, str), list(dict(str, str))
"""
warnings.warn(
"get_page_ranges is deprecated, use list_page_ranges instead",
DeprecationWarning
)
options = self._get_page_ranges_options(
offset=offset,
length=length,
previous_snapshot_diff=previous_snapshot_diff,
**kwargs)
try:
if previous_snapshot_diff:
ranges = await self._client.page_blob.get_page_ranges_diff(**options)
else:
ranges = await self._client.page_blob.get_page_ranges(**options)
except HttpResponseError as error:
process_storage_error(error)
return get_page_ranges_result(ranges)
@distributed_trace
def list_page_ranges(
self,
*,
offset: Optional[int] = None,
length: Optional[int] = None,
previous_snapshot: Optional[Union[str, Dict[str, Any]]] = None,
**kwargs: Any
) -> AsyncItemPaged[PageRange]:
"""Returns the list of valid page ranges for a Page Blob or snapshot
of a page blob. If `previous_snapshot` is specified, the result will be
a diff of changes between the target blob and the previous snapshot.
:keyword int offset:
Start of byte range to use for getting valid page ranges.
If no length is given, all bytes after the offset will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:keyword int length:
Number of bytes to use for getting valid page ranges.
If length is given, offset must be provided.
This range will return valid page ranges from the offset start up to
the specified length.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:keyword previous_snapshot:
A snapshot value that specifies that the response will contain only pages that were changed
between target blob and previous snapshot. Changed pages include both updated and cleared
pages. The target blob may be a snapshot, as long as the snapshot specified by `previous_snapshot`
is the older of the two.
:paramtype previous_snapshot: str or Dict[str, Any]
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int results_per_page:
The maximum number of page ranges to retrieve per API call.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: An iterable (auto-paging) of PageRange.
:rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.PageRange]
"""
results_per_page = kwargs.pop('results_per_page', None)
options = self._get_page_ranges_options(
offset=offset,
length=length,
previous_snapshot_diff=previous_snapshot,
**kwargs)
if previous_snapshot:
command = partial(
self._client.page_blob.get_page_ranges_diff,
**options)
else:
command = partial(
self._client.page_blob.get_page_ranges,
**options)
return AsyncItemPaged(
command, results_per_page=results_per_page,
page_iterator_class=PageRangePaged)
@distributed_trace_async
async def get_page_range_diff_for_managed_disk(
self, previous_snapshot_url, # type: str
offset=None, # type: Optional[int]
length=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
"""Returns the list of valid page ranges for a managed disk or snapshot.
.. note::
This operation is only available for managed disk accounts.
.. versionadded:: 12.2.0
This operation was introduced in API version '2019-07-07'.
:param previous_snapshot_url:
Specifies the URL of a previous snapshot of the managed disk.
The response will only contain pages that were changed between the target blob and
its previous snapshot.
:param int offset:
Start of byte range to use for getting valid page ranges.
If no length is given, all bytes after the offset will be searched.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param int length:
Number of bytes to use for getting valid page ranges.
If length is given, offset must be provided.
This range will return valid page ranges from the offset start up to
the specified length.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns:
A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
The first element are filled page ranges, the 2nd element is cleared page ranges.
:rtype: tuple(list(dict(str, str), list(dict(str, str))
"""
options = self._get_page_ranges_options(
offset=offset,
length=length,
prev_snapshot_url=previous_snapshot_url,
**kwargs)
try:
ranges = await self._client.page_blob.get_page_ranges_diff(**options)
except HttpResponseError as error:
process_storage_error(error)
return get_page_ranges_result(ranges)
@distributed_trace_async
async def set_sequence_number( # type: ignore
self, sequence_number_action, # type: Union[str, SequenceNumberAction]
sequence_number=None, # type: Optional[str]
**kwargs
):
# type: (...) -> Dict[str, Union[str, datetime]]
"""Sets the blob sequence number.
:param str sequence_number_action:
This property indicates how the service should modify the blob's sequence
number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
:param str sequence_number:
This property sets the blob's sequence number. The sequence number is a
user-controlled property that you can use to track requests and manage
concurrency issues.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
options = self._set_sequence_number_options(
sequence_number_action, sequence_number=sequence_number, **kwargs)
try:
return await self._client.page_blob.update_sequence_number(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def resize_blob(self, size, **kwargs):
# type: (int, Any) -> Dict[str, Union[str, datetime]]
"""Resizes a page blob to the specified size.
If the specified value is less than the current size of the blob,
then all pages above the specified value are cleared.
:param int size:
Size used to resize blob. Maximum size for a page blob is up to 1 TB.
The page blob size must be aligned to a 512-byte boundary.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
A page blob tier value to set the blob to. The tier correlates to the size of the
blob and number of allowed IOPS. This is only applicable to page blobs on
premium storage accounts.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
options = self._resize_blob_options(size, **kwargs)
try:
return await self._client.page_blob.resize(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def upload_page( # type: ignore
self, page, # type: bytes
offset, # type: int
length, # type: int
**kwargs
):
# type: (...) -> Dict[str, Union[str, datetime]]
"""The Upload Pages operation writes a range of pages to a page blob.
:param bytes page:
Content of the page.
:param int offset:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param int length:
Number of bytes to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https, as https (the default),
will already validate. Note that this MD5 hash is not stored with the
blob.
:keyword int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:keyword int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:keyword int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword str encoding:
Defaults to UTF-8.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
options = self._upload_page_options(
page=page,
offset=offset,
length=length,
**kwargs)
try:
return await self._client.page_blob.upload_pages(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def upload_pages_from_url(self, source_url, # type: str
offset, # type: int
length, # type: int
source_offset, # type: int
**kwargs
):
# type: (...) -> Dict[str, Any]
"""
The Upload Pages operation writes a range of pages to a page blob where
the contents are read from a URL.
:param str source_url:
The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
shared access signature attached.
:param int offset:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param int length:
Number of bytes to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param int source_offset:
This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
The service will read the same number of bytes as the destination range (length-offset).
:keyword bytes source_content_md5:
If given, the service will calculate the MD5 hash of the block content and compare against this value.
:keyword ~datetime.datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the source resource has been modified since the specified time.
:keyword ~datetime.datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the source resource has not been modified since the specified date/time.
:keyword str source_etag:
The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions source_match_condition:
The source match condition to use upon the etag.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:keyword int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:keyword int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The destination match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword str source_authorization:
Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
the prefix of the source_authorization string.
"""
options = self._upload_pages_from_url_options(
source_url=self._encode_source_url(source_url),
offset=offset,
length=length,
source_offset=source_offset,
**kwargs
)
try:
return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def clear_page(self, offset, length, **kwargs):
# type: (int, int, Any) -> Dict[str, Union[str, datetime]]
"""Clears a range of pages.
:param int offset:
Start of byte range to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:param int length:
Number of bytes to use for writing to a section of the blob.
Pages must be aligned with 512-byte boundaries, the start offset
must be a modulus of 512 and the length must be a modulus of
512.
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword int if_sequence_number_lte:
If the blob's sequence number is less than or equal to
the specified value, the request proceeds; otherwise it fails.
:keyword int if_sequence_number_lt:
If the blob's sequence number is less than the specified
value, the request proceeds; otherwise it fails.
:keyword int if_sequence_number_eq:
If the blob's sequence number is equal to the specified
value, the request proceeds; otherwise it fails.
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag and last modified).
:rtype: dict(str, Any)
"""
options = self._clear_page_options(offset, length, **kwargs)
try:
return await self._client.page_blob.clear_pages(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async
async def append_block( # type: ignore
self, data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
length=None, # type: Optional[int]
**kwargs
):
# type: (...) -> Dict[str, Union[str, datetime, int]]
"""Commits a new block of data to the end of the existing append blob.
:param data:
Content of the block.
:param int length:
Size of the block in bytes.
:keyword bool validate_content:
If true, calculates an MD5 hash of the block content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https, as https (the default),
will already validate. Note that this MD5 hash is not stored with the
blob.
:keyword int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:keyword int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword str encoding:
Defaults to UTF-8.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
:rtype: dict(str, Any)
"""
options = self._append_block_options(
data,
length=length,
**kwargs
)
try:
return await self._client.append_blob.append_block(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async()
async def append_block_from_url(self, copy_source_url, # type: str
source_offset=None, # type: Optional[int]
source_length=None, # type: Optional[int]
**kwargs):
# type: (...) -> Dict[str, Union[str, datetime, int]]
"""
Creates a new block to be committed as part of a blob, where the contents are read from a source url.
:param str copy_source_url:
The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
shared access signature attached.
:param int source_offset:
This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
:param int source_length:
This indicates the end of the range of bytes that has to be taken from the copy source.
:keyword bytearray source_content_md5:
If given, the service will calculate the MD5 hash of the block content and compare against this value.
:keyword int maxsize_condition:
Optional conditional header. The max length in bytes permitted for
the append blob. If the Append Block operation would cause the blob
to exceed that limit or if the blob size is already greater than the
value specified in this header, the request will fail with
MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
:keyword int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the
AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The destination match condition to use upon the etag.
:keyword str if_tags_match_condition:
Specify a SQL where clause on blob tags to operate only on blob with a matching value.
eg. ``\"\\\"tagname\\\"='my tag'\"``
.. versionadded:: 12.4.0
:keyword ~datetime.datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the source resource has been modified since the specified time.
:keyword ~datetime.datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the source resource has not been modified since the specified date/time.
:keyword str source_etag:
The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions source_match_condition:
The source match condition to use upon the etag.
:keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:keyword str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
.. versionadded:: 12.2.0
:keyword int timeout:
The timeout parameter is expressed in seconds.
:keyword str source_authorization:
Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
the prefix of the source_authorization string.
"""
options = self._append_block_from_url_options(
copy_source_url=self._encode_source_url(copy_source_url),
source_offset=source_offset,
source_length=source_length,
**kwargs
)
try:
return await self._client.append_blob.append_block_from_url(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
@distributed_trace_async()
async def seal_append_blob(self, **kwargs):
# type: (...) -> Dict[str, Union[str, datetime, int]]
"""The Seal operation seals the Append Blob to make it read-only.
.. versionadded:: 12.4.0
:keyword int appendpos_condition:
Optional conditional header, used only for the Append Block operation.
A number indicating the byte offset to compare. Append Block will
succeed only if the append position is equal to this number. If it
is not, the request will fail with the AppendPositionConditionNotMet error
(HTTP status code 412 - Precondition Failed).
:keyword lease:
Required if the blob has an active lease. Value can be a BlobLeaseClient object
or the lease ID as a string.
:paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
:keyword ~datetime.datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:keyword ~datetime.datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:keyword str etag:
An ETag value, or the wildcard character (*). Used to check if the resource has changed,
and act according to the condition specified by the `match_condition` parameter.
:keyword ~azure.core.MatchConditions match_condition:
The match condition to use upon the etag.
:keyword int timeout:
The timeout parameter is expressed in seconds.
:returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
:rtype: dict(str, Any)
"""
options = self._seal_append_blob_options(**kwargs)
try:
return await self._client.append_blob.seal(**options) # type: ignore
except HttpResponseError as error:
process_storage_error(error)
def _get_container_client(self): # pylint: disable=client-method-missing-kwargs
# type: (...) -> ContainerClient
"""Get a client to interact with the blob's parent container.
The container need not already exist. Defaults to current blob's credentials.
:returns: A ContainerClient.
:rtype: ~azure.storage.blob.ContainerClient
.. admonition:: Example:
.. literalinclude:: ../samples/blob_samples_containers_async.py
:start-after: [START get_container_client_from_blob_client]
:end-before: [END get_container_client_from_blob_client]
:language: python
:dedent: 12
:caption: Get container client from blob object.
"""
from ._container_client_async import ContainerClient
if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access
_pipeline = AsyncPipeline(
transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
policies=self._pipeline._impl_policies # pylint: disable = protected-access
)
else:
_pipeline = self._pipeline # pylint: disable = protected-access
return ContainerClient(
"{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name,
credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
_pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
require_encryption=self.require_encryption, encryption_version=self.encryption_version,
key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
| {
"content_hash": "a21997f7a201bce8f9f5db8e6c12cc07",
"timestamp": "",
"source": "github",
"line_count": 2764,
"max_line_length": 169,
"avg_line_length": 54.27496382054993,
"alnum_prop": 0.6448778796928327,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3807e8f9fdadf1cb97fbf4eef49d74e3bb80439e",
"size": "150387",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_client_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
class PsicrawlerPipeline(object):
def get_base_path(self):
return os.path.dirname(os.path.realpath(__file__)) + "/../xmlfiles"
def write_item(self, item):
if len(item['topics']) > 0:
targetDir = self.get_base_path()
firstLetter = item['title'][0].lower()
targetDir += '/' + item['source'] + '/' + firstLetter
if os.path.exists(targetDir) == False:
os.makedirs(targetDir)
fn = item['title'].replace(' ', '_') + '.xml'
with open(targetDir + '/' + fn, 'w') as f:
f.write(item.asXml())
def process_item(self, item, spider):
self.write_item(item)
return item
| {
"content_hash": "fd6b18f2b8e07d4984e0bdca53a439fe",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 28.346153846153847,
"alnum_prop": 0.5115332428765265,
"repo_name": "psiopic2/psicrawler",
"id": "ff321fca705e925e6e8e1de4f89424a7a0bf96bf",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psicrawler/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21963"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
import minecraft.minecraft as minecraft
#import minecraft block module
import minecraft.block as block
#import time, so delays can be used
import time
if __name__ == "__main__":
time.sleep(2)
#Connect to minecraft by creating the minecraft object
# - minecraft needs to be running and in a game
mc = minecraft.Minecraft.create()
#Post a message to the minecraft chat window
mc.postToChat("Hi, Minecraft API, the basics, what can you do? ")
time.sleep(5)
#Find out your players position
playerPos = mc.player.getPos()
mc.postToChat("Find your position - its x=" + str(playerPos.x) + ", y=" + str(playerPos.y) + ", z=" + str(playerPos.z))
time.sleep(5)
#Using your players position
# - the players position is an x,y,z coordinate of floats (e.g. 23.59,12.00,-45.32)
# - in order to use the players position in other commands we need integers (e.g. 23,12,-45)
# - so round the players position
playerPos = minecraft.Vec3(int(playerPos.x), int(playerPos.y), int(playerPos.z))
#Changing your players position
mc.postToChat("Move your player - 30 blocks UP!")
time.sleep(2)
mc.player.setPos(playerPos.x,playerPos.y + 30,playerPos.z)
# - wait for you to fall!
time.sleep(5)
#Interacting with a block
# - get the type block directly below you
blockType = mc.getBlock(playerPos.x,playerPos.y - 1,playerPos.z)
mc.postToChat("Interact with blocks - the block below you is of type - " + str(blockType))
time.sleep(5)
# - change the block below you to wood planks
mc.setBlock(playerPos.x,playerPos.y-1,playerPos.z,block.WOOD_PLANKS)
mc.postToChat("Change blocks - the block below you is now wood planks")
time.sleep(5)
#Creating many blocks
mc.postToChat("Create blocks - making a diamond tower")
# - loop 20 times
for up in range(0, 20):
mc.setBlock(playerPos.x + 1, playerPos.y + up, playerPos.z, block.DIAMOND_BLOCK)
time.sleep(0.3)
time.sleep(2)
# - put you on top of the tower
mc.postToChat("Dont look down, because Im putting you on top of it!")
time.sleep(1)
mc.player.setPos(playerPos.x + 1, playerPos.y + 20, playerPos.z)
time.sleep(5)
mc.postToChat("www.stuffaboutcode.com")
#Setting multiple blocks at once (only works for 2 blocks - wierd!)
# - create list of arguments
#towerArgs = []
# - append co-ord to list
#for up in range(0, 20):
# towerArgs.append(playerPos.x + 2)
# towerArgs.append(playerPos.y + up)
# ccccccc.append(playerPos.z)
# - append block type to list
#towerArgs.append(5)
# - call method
#mc.setBlocks(*towerBlocks)
# move player
#mc.player.setPos(playerPos.x + 1, playerPos.y + 21, playerPos.z)
| {
"content_hash": "1ebb014dec9633be5ff9457ab2e68c44",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 123,
"avg_line_length": 32.73255813953488,
"alnum_prop": 0.6550621669626998,
"repo_name": "martinohanlon/minecraft-api-tutorial",
"id": "381386daaf5a24c5350609fa567f2c1da2cb199b",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minecraft-api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "1023"
},
{
"name": "Python",
"bytes": "16490"
}
],
"symlink_target": ""
} |
import unittest
import threading
import time
import IECore
import Gaffer
import GafferTest
class NodeTest( GafferTest.TestCase ) :
def testParenting( self ) :
c = Gaffer.GraphComponent()
n = Gaffer.Node()
self.assertEqual( n.acceptsParent( c ), False )
self.assertRaises( RuntimeError, c.addChild, n )
n2 = Gaffer.Node()
self.assertEqual( n.acceptsParent( n2 ), True )
n2.addChild( n )
p = Gaffer.Plug()
self.assert_( n.acceptsChild( p ) )
self.assert_( not n.acceptsParent( p ) )
n.addChild( p )
self.assert_( p.parent().isSame( n ) )
def testNaming( self ) :
n = Gaffer.Node()
self.assertEqual( n.getName(), "Node" )
def testScriptNode( self ) :
n = Gaffer.Node()
n2 = Gaffer.Node()
self.assertEqual( n.scriptNode(), None )
self.assertEqual( n2.scriptNode(), None )
sn = Gaffer.ScriptNode()
sn.addChild( n )
n.addChild( n2 )
self.assert_( n.scriptNode().isSame( sn ) )
self.assert_( n2.scriptNode().isSame( sn ) )
def testExtendedConstructor( self ) :
n = Gaffer.Node()
self.assertEqual( n.getName(), "Node" )
n = Gaffer.Node( "a" )
self.assertEqual( n.getName(), "a" )
self.assertRaises( Exception, Gaffer.Node, "too", "many" )
def testDynamicPlugSerialisationOrder( self ) :
n = Gaffer.Node()
n["p1"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["p2"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["p3"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( n.children()[0].getName(), "user" )
self.assertEqual( n.children()[1].getName(), "p1" )
self.assertEqual( n.children()[2].getName(), "p2" )
self.assertEqual( n.children()[3].getName(), "p3" )
s = Gaffer.ScriptNode()
s["n"] = n
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["n"].children()[0].getName(), "user" )
self.assertEqual( s["n"].children()[1].getName(), "p1" )
self.assertEqual( s["n"].children()[2].getName(), "p2" )
self.assertEqual( s["n"].children()[3].getName(), "p3" )
def testSerialiseDynamicStringPlugs( self ) :
n = Gaffer.Node()
n["p1"] = Gaffer.StringPlug( defaultValue = "default", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["p1"].setValue( "value" )
self.assertEqual( n["p1"].getValue(), "value" )
s = Gaffer.ScriptNode()
s["n"] = n
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["n"]["p1"].defaultValue(), "default" )
self.assertEqual( s["n"]["p1"].getValue(), "value" )
def testSerialiseDynamicBoolPlugs( self ) :
n = Gaffer.Node()
n["p1"] = Gaffer.BoolPlug( defaultValue = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["p1"].setValue( False )
s = Gaffer.ScriptNode()
s["n"] = n
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["n"]["p1"].defaultValue(), True )
self.assertEqual( s["n"]["p1"].getValue(), False )
def testUnparentingRemovesConnections( self ) :
s = Gaffer.ScriptNode()
n1 = GafferTest.AddNode( "n1" )
n2 = GafferTest.AddNode( "n2" )
s.addChild( n1 )
s.addChild( n2 )
n2["op1"].setInput( n1["sum"] )
self.failUnless( n2["op1"].getInput().isSame( n1["sum"] ) )
del s["n2"]
self.assertEqual( n2["op1"].getInput(), None )
s.addChild( n2 )
n2["op1"].setInput( n1["sum"] )
self.failUnless( n2["op1"].getInput().isSame( n1["sum"] ) )
del s["n1"]
self.assertEqual( n2["op1"].getInput(), None )
def testOverrideAcceptsInput( self ) :
class AcceptsInputTestNode( Gaffer.Node ) :
def __init__( self, name = "AcceptsInputTestNode" ) :
Gaffer.Node.__init__( self, name )
self.addChild( Gaffer.IntPlug( "in" ) )
self.addChild( Gaffer.IntPlug( "out", Gaffer.Plug.Direction.Out ) )
def acceptsInput( self, plug, inputPlug ) :
if plug.isSame( self["in"] ) :
return isinstance( inputPlug.source().node(), AcceptsInputTestNode )
return True
n1 = AcceptsInputTestNode()
n2 = AcceptsInputTestNode()
n3 = GafferTest.AddNode()
self.assertEqual( n1["in"].acceptsInput( n2["out"] ), True )
self.assertEqual( n1["in"].acceptsInput( n3["sum"] ), False )
# check that we can't use a pass-through connection as
# a loophole.
# this particular connection makes no sense but breaks
# no rules - we're just using it to test the loophole.
n2["out"].setInput( n3["sum"] )
self.assertEqual( n1["in"].acceptsInput( n2["out"] ), False )
def testPlugFlagsChangedSignal( self ) :
n = Gaffer.Node()
n["p"] = Gaffer.Plug()
cs = GafferTest.CapturingSlot( n.plugFlagsChangedSignal() )
self.assertEqual( len( cs ), 0 )
n["p"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
self.assertEqual( len( cs ), 1 )
self.failUnless( cs[0][0].isSame( n["p"] ) )
# second time should have no effect because they're the same
n["p"].setFlags( Gaffer.Plug.Flags.ReadOnly, True )
self.assertEqual( len( cs ), 1 )
n["p"].setFlags( Gaffer.Plug.Flags.ReadOnly, False )
self.assertEqual( len( cs ), 2 )
self.failUnless( cs[1][0].isSame( n["p"] ) )
def testUserPlugs( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["test"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["test"].setValue( 10 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["user"]["test"].getValue(), 10 )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "3e4ebfc509ba8c3f100204c64b91839a",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 120,
"avg_line_length": 26.60747663551402,
"alnum_prop": 0.6283807516684229,
"repo_name": "paulondc/gaffer",
"id": "2d39c7024137cdc07f338e8358e5c88f412af7aa",
"size": "7578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferTest/NodeTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "15447"
},
{
"name": "C++",
"bytes": "2630344"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Objective-C",
"bytes": "107529"
},
{
"name": "Python",
"bytes": "2745422"
},
{
"name": "Shell",
"bytes": "6943"
},
{
"name": "Slash",
"bytes": "32856"
}
],
"symlink_target": ""
} |
import re
class FilePermissions(object):
"""
Class for parsing `ls -l` line targeted at concrete file and handling parsed properties.
It is useful for checking file permissions and owner.
Attributes:
perms_owner (str): Owner permissions, e.g. 'rwx'
perms_group (str): Group permissions
perms_other (str): Other permissions
owner (str): Owner user name
group (str): Owner group name
path (str): Full path to file
Note:
This class does not support Access Control Lists (ACLs). If that is needed in the future,
it would be preferable to create another class than extend this one.
Advanced File Permissions - SUID, SGID and Sticky Bit - are not yet correctly parsed.
"""
_PERMISSIONS_PATTERN = re.compile('''
^
.([-rwxsS]{3})([-rwxsS]{3})([-rwxsS]{3}) # -rwxrwxrwx
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^^^^^^^^
# Valid characters are -rwxsS
# s == execute bit and sticky bit
# S == sticky bit without execute bit
\S* # the character(s) after rwxrwxrwx for ACLs/xattrs
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^
\s+\S+\s+ # the number of hardlinks and spaces around
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^
([^\s:]+)\s+([^\s:]+) # owner, spaces, group
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^^^^^^^
# Username and group name are strings without whitespace \s and without colon :.
\s+\S+\s+ # size and spaces around
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^^^^
\S+\s+\S+ # month and day
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^^^^
\s+\S+\s+ # time/year and spaces around
# -rw-------. 1 root root 4308 Apr 22 2009 /etc/ssh/sshd_config
# ^^^^^^
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^^^^^
(.*) # file name or path
# -rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
# ^^^^^^^^^^^^^^^^^^^^
# -rw-------. 1 root root 4308 Apr 22 15:57 file_name_without_path
# ^^^^^^^^^^^^^^^^^^^^^^
$
''', re.VERBOSE)
def __init__(self, line):
"""
Args:
line (str): A line from `ls -l /concrete/file` execution. Such as:
-rw-------. 1 root root 762 Sep 23 002 /etc/ssh/sshd_config
-rw-------. 1 root root 4308 Apr 22 15:57 /etc/ssh/sshd_config
-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub2/grub.cfg
Raises:
ValueError: If line is malformed
"""
self.line = line
r = self._PERMISSIONS_PATTERN.search(self.line)
if r:
(self.perms_owner, self.perms_group, self.perms_other,
self.owner, self.group, self.path) = r.groups()
else:
raise ValueError('Invalid `ls -l` line "{}"'.format(self.line))
@classmethod
def from_dict(self, dirent):
"""
Create a new FilePermissions object from the given dictionary. This
works with the FileListing parser class, which has already done the
hard work of pulling many of these fields out. We create an object
with all the dictionary keys available as properties, and also split
the ``perms`` string up into owner, group
"""
# Check that we have at least as much data as the __init__ requires
for k in ['perms', 'owner', 'group', 'name', 'dir']:
if k not in dirent:
raise ValueError("Need required key '{k}'".format(k=k))
# Copy all values across
for k in dirent:
setattr(self, k, dirent[k])
# Create perms parts
self.perms_owner = self.perms[0:3]
self.perms_group = self.perms[3:6]
self.perms_other = self.perms[6:9]
return self
def owned_by(self, owner, also_check_group=False):
"""
Checks if the specified user or user and group own the file.
Args:
owner (str): the user (or group) name for which we ask about ownership
also_check_group (bool): if set to True, both user owner and group owner checked
if set to False, only user owner checked
Returns:
bool: True if owner of the file is the specified owner
"""
if also_check_group:
return self.owner == owner and self.group == owner
else:
return self.owner == owner
def owner_can_read(self):
"""
Checks if owner can read the file. Write and execute bits are not evaluated.
Returns:
bool: True if owner can read the file.
"""
return 'r' in self.perms_owner
def group_can_read(self):
"""
Checks if group can read the file. Write and execute bits are not evaluated.
Returns:
bool: True if group can read the file.
"""
return 'r' in self.perms_group
def others_can_read(self):
"""
Checks if 'others' can read the file. Write and execute bits are not evaluated. ('others' in
the sense of unix permissions that know about user, group, others.)
Returns:
bool: True if 'others' can read the file.
"""
return 'r' in self.perms_other
def owner_can_only_read(self):
"""
Checks if owner has read-only permissions for the file.
Therefore, write and execute bits for owner must be unset and read bit must be set.
Returns:
bool: True if owner can only read the file.
"""
return 'r--' == self.perms_owner
def group_can_only_read(self):
"""
Checks if group has read-only permissions for the file.
Therefore, write and execute bits for group must be unset and read bit must be set.
Returns:
bool: True if group can only read the file.
"""
return 'r--' == self.perms_group
def others_can_only_read(self):
"""
Checks if 'others' has read-only permissions for the file.
Therefore, write and execute bits for 'others' must be unset and read bit must be set.
('others' in the sense of unix permissions that know about user, group, others.)
Returns:
bool: True if 'others' can only read the file.
"""
return 'r--' == self.perms_other
def owner_can_write(self):
"""
Checks if owner can write the file. Read and execute bits are not evaluated.
Returns:
bool: True if owner can write the file.
"""
return 'w' in self.perms_owner
def group_can_write(self):
"""
Checks if group can write the file. Read and execute bits are not evaluated.
Returns:
bool: True if group can write the file.
"""
return 'w' in self.perms_group
def others_can_write(self):
"""
Checks if 'others' can write the file. Read and execute bits are not evaluated. ('others' in
the sense of unix permissions that know about user, group, others.)
Returns:
bool: True if 'others' can write the file.
"""
return 'w' in self.perms_other
def only_root_can_read(self, root_group_can_read=True):
"""
Checks if only root is allowed to read the file (and anyone else is
forbidden from reading). Write and execute bits are not checked. The
read bits for root user/group are not checked because root can
read/write anything regardless of the read/write permissions.
When called with ``root_group_can_read`` = ``True``:
* owner must be root
* and 'others' permissions must not contain read
* and if group owner is not root, the 'group' permissions must not
contain read
Valid cases::
rwxrwxrwx owner ownergroup
-------------------------------
???-??-?? root nonroot
??????-?? root root
r--r----- root root
r-------- root nonroot
rwxrwx--- root root
rwxrwx-wx root root
Specifically, these cases are NOT valid because the owner can chmod
permissions and grant themselves permissions without root's
knowledge::
rwxrwxrwx owner ownergroup
-------------------------------
-??-??-?? nonroot nonroot
-??r??-?? nonroot root
--------- nonroot nonroot
When called with ``root_group_can_read`` = ``False``:
* owner must be root
* and 'group' and 'others' permissions must not contain read
Valid cases::
rwxrwxrwx owner ownergroup
-------------------------------
???-??-?? root ?
r-------- root root
r-------- root nonroot
rwx-wx--- root root
rwx-wx--- root nonroot
rwx-wxrwx root nonroot
Specifically, these cases are NOT valid because the owner can chmod
permissions and grant themselves permissions without root's
knowledge::
rwxrwxrwx owner ownergroup
-------------------------------
-??-??-?? nonroot nonroot
--------- nonroot nonroot
Args:
root_group_can_read (bool): if set to True, this tests whether the
'root' group can also read the file.
Returns:
bool: True if only root user (or optionally root group) can read
the file.
"""
requirements = True # The final answer is progressively assembled in this variable.
requirements &= self.owner == 'root'
requirements &= not self.others_can_read()
if root_group_can_read:
if self.group != 'root':
# if group is not root, group must not be able to read
requirements &= not self.group_can_read()
else: # root_group_can_read == False
requirements &= not self.group_can_read()
return requirements
def only_root_can_write(self, root_group_can_write=True):
"""
Checks if only root is allowed to write the file (and anyone else is
barred from writing). Read and execute bits are not checked. The
write bits for root user/group are not checked because root can
read/write anything regardless of the read/write permissions.
When called with ``root_group_can_write`` = ``True``:
* owner must be root
* and 'others' permissions must not contain write
* and if group owner is not root, the 'group' permissions must not contain write
Valid cases::
rwxrwxrwx owner ownergroup
-------------------------------
????-??-? root nonroot
???????-? root root
-w--w---- root root
-w------- root root
rwxrwx--- root root
rwxrwxr-x root root
Specifically, these cases are NOT valid because the owner can chmod
permissions and grant themselves permissions without root's
knowledge::
rwxrwxrwx owner ownergroup
-------------------------------
?-??-??-? nonroot nonroot
?-??w??-? nonroot root
--------- nonroot nonroot
When called with ``root_group_can_write`` = ``False``:
* owner must be root
* and 'group' and 'others' permissions must not contain write
Valid cases::
rwxrwxrwx owner ownergroup
-------------------------------
????-??-? root ?
-w------- root root
-w------- root nonroot
rwxr-x--- root root
rwxr-x--- root nonroot
rwxr-xr-x root nonroot
Specifically, these cases are NOT valid because the owner can chmod
permissions and grant themselves permissions without root's
knowledge::
rwxrwxrwx owner ownergroup
-------------------------------
?-??-??-? nonroot nonroot
--------- nonroot nonroot
Args:
root_group_can_write (bool): if set to True, this tests whether
'root' group can also write to the file.
Returns:
bool: True if only root user (or optionally root group) can write
the file.
"""
requirements = True # The final answer is progressively assembled in this variable.
requirements &= self.owner == 'root'
requirements &= not self.others_can_write()
if root_group_can_write:
if self.group != 'root':
# if group is not root, group must not be able to write
requirements &= not self.group_can_write()
else: # root_group_can_write == False
requirements &= not self.group_can_write()
return requirements
def all_zero(self):
"""
Checks that all permissions are zero ('---------' in ls -l) - nobody but root can read,
write, exec.
Returns:
bool: True if all permissions are zero ('---------')
"""
_PERM_NOTHING = '---'
return all((
self.perms_owner == _PERM_NOTHING,
self.perms_group == _PERM_NOTHING,
self.perms_other == _PERM_NOTHING,
))
def __repr__(self):
return 'FilePermissions(' + self.path + ')'
| {
"content_hash": "0150d8704fc40fe1f510ec78eb275d29",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 100,
"avg_line_length": 37.03350515463917,
"alnum_prop": 0.5220961792748278,
"repo_name": "RedHatInsights/insights-core",
"id": "26fbc25905bd3eab3c83e4172b3456890f001bf0",
"size": "14369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/util/file_permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
} |
import os
import tarfile
import zipfile
from .basefile import BaseFile
from ...errors.fe import FileContainerFileNotFound as CNF
from ...errors.fe import FileContainerFailedExtraction as CFE
class ContainerFile(BaseFile):
"""
The **CompressedFile** :py:class:`pynion.Multiton` is a file management object
created directly through the py:class:`pynion.File` factory.
Extends :py:class:`pynion.filesystem._filetypes.BaseFile`
It specifically manages compacted or compressed files with multiple files
within.
"""
def __init__(self, file_name, action, ctype):
super(ContainerFile, self).__init__(file_name, action)
self.ctype = ctype
if self.is_tarfile:
self.action = self.action + '|*'
############
# BOOLEANS #
############
@property
def is_gzipped(self):
"""
:return: Check if compression is gzip
:rtype: bool
"""
return self.ctype.endswith('gzip')
@property
def is_bzipped(self):
"""
:return: Check if compression is bzip
:rtype: bool
"""
return self.ctype.endswith('bzip')
@property
def is_zipped(self):
"""
:return: Check if compression is zip
:rtype: bool
"""
return self.ctype == 'zip'
@property
def is_tarfile(self):
"""
:return: Check if compression is tar
:rtype: bool
"""
return self.ctype.startswith('tar')
####################
# METHODS: ON FILE #
####################
def open(self):
"""
Open the file in the previously defined action type.
:rtype: self
"""
if self.is_open:
return self
if self.is_tarfile:
self._fd = tarfile.open(self.full)
if self.is_zipped:
self._fd = zipfile.ZipFile(self.full)
return self
def read_file(self, file_name):
"""
Get a specific file from the file bundle.
:param str file_name: Name of the query internal file.
:raise: :py:class:`pynion.errors.fe.FileContainerFileNotFound` if query
file is not in the bundle.
:rtype: str
"""
if self.is_zipped:
if file_name not in self._fd.namelist():
raise CNF(self.full, file_name)
return self._fd.read(file_name).split('\n')
if self.is_tarfile:
if file_name not in self._fd.getnames():
raise CNF(self.full, file_name)
return self._fd.extractfile(self._fd.getmember(file_name)).readlines()
def has_file(self, file_name):
"""
Query if file exists in the bundle.
:rtype: bool
"""
if self.is_zipped:
return file_name in self._fd.namelist()
if self.is_tarfile:
return file_name in self._fd.getnames()
def extract(self, target_file = None, target_dir = os.getcwd()):
"""
Extract a specific file from the bundle.
:param str target_file: Query file to extract from the bundle. If
:py:data:`None`, all files contained
:param str target_dir: Directory to which to write the file. By default
that is the current working directory
:raise: :py:class:`pyninon.errors.fe.FileContainerFailedExtraction` if
target_file not
"""
if not os.path.isdir(target_dir) or not os.access(target_dir, os.W_OK):
raise CFE(self.full, target_dir)
if target_file is not None and not self.has_file(target_file):
raise CNF(self.full, target_file)
if target_file is None:
self._fd.extractall(path = target_dir)
else:
self._fd.extract(target_file, target_dir)
def list_files(self):
"""
Name of all files in the bundle..
:rtype: list
"""
if self.is_tarfile: return self._fd.getnames()
if self.is_zipped: return self._fd.namelist()
def length(self):
"""
Number of lines in the file.
:rtype: int
"""
return len(self.list_files)
def __new__(self):
raise NotImplementedError
def __exit__(self, type, value, traceback):
raise NotImplementedError
| {
"content_hash": "ac28935bd431ba1aa237f83c9c1c1106",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 82,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.5665742024965326,
"repo_name": "jaumebonet/pynion",
"id": "043d127b3d1ad50d6b4efd6540cedda63e8c386b",
"size": "4326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynion/filesystem/_filetypes/containerfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80950"
}
],
"symlink_target": ""
} |
import functools
from stronghold import decorators
import django
if django.VERSION[:2] < (1, 9):
from django.utils import unittest
else:
import unittest
class StrongholdDecoratorTests(unittest.TestCase):
def test_public_decorator_sets_attr(self):
@decorators.public
def function():
pass
self.assertTrue(function.STRONGHOLD_IS_PUBLIC)
def test_public_decorator_sets_attr_with_nested_decorators(self):
def stub_decorator(func):
return func
@decorators.public
@stub_decorator
def inner_function():
pass
self.assertTrue(inner_function.STRONGHOLD_IS_PUBLIC)
def test_public_decorator_works_with_partials(self):
def function():
pass
partial = functools.partial(function)
decorators.public(partial)
self.assertTrue(function.STRONGHOLD_IS_PUBLIC)
| {
"content_hash": "f9c95708f340e74b0c303414cd17e1cf",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 69,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6579234972677596,
"repo_name": "klenks/jobsportal",
"id": "8726b9b36b0d77a74dced7686763fd9087310192",
"size": "915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/stronghold/tests/testdecorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "138702"
},
{
"name": "HTML",
"bytes": "158529"
},
{
"name": "JavaScript",
"bytes": "250743"
},
{
"name": "Python",
"bytes": "7450092"
},
{
"name": "Shell",
"bytes": "3234"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from nova.conf import serial_console
CONF = cfg.CONF
serial_console.register_opts(CONF)
| {
"content_hash": "4053e8075244ac70ed62c5eec8a017c0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 17,
"alnum_prop": 0.7899159663865546,
"repo_name": "devendermishrajio/nova",
"id": "33426f4aa20e45e43c5791fe2499e1a98fff39cc",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/conf/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16836881"
},
{
"name": "Shell",
"bytes": "24210"
},
{
"name": "Smarty",
"bytes": "351433"
}
],
"symlink_target": ""
} |
import cupy
import numpy as np
from cupy._core import internal
from cupy import _util
from cupyx.scipy.sparse import _base
from cupyx.scipy.sparse import _coo
from cupyx.scipy.sparse import _sputils
_ufuncs = [
'arcsin', 'arcsinh', 'arctan', 'arctanh', 'ceil', 'deg2rad', 'expm1',
'floor', 'log1p', 'rad2deg', 'rint', 'sign', 'sin', 'sinh', 'sqrt', 'tan',
'tanh', 'trunc',
]
class _data_matrix(_base.spmatrix):
def __init__(self, data):
self.data = data
@property
def dtype(self):
"""Data type of the matrix."""
return self.data.dtype
def _with_data(self, data, copy=True):
raise NotImplementedError
def __abs__(self):
"""Elementwise abosulte."""
return self._with_data(abs(self.data))
def __neg__(self):
"""Elementwise negative."""
return self._with_data(-self.data)
def astype(self, t):
"""Casts the array to given data type.
Args:
dtype: Type specifier.
Returns:
A copy of the array with a given type.
"""
return self._with_data(self.data.astype(t))
def conj(self, copy=True):
if cupy.issubdtype(self.dtype, cupy.complexfloating):
return self._with_data(self.data.conj(), copy=copy)
elif copy:
return self.copy()
else:
return self
conj.__doc__ = _base.spmatrix.conj.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = _base.spmatrix.copy.__doc__
def count_nonzero(self):
"""Returns number of non-zero entries.
.. note::
This method counts the actual number of non-zero entories, which
does not include explicit zero entries.
Instead ``nnz`` returns the number of entries including explicit
zeros.
Returns:
Number of non-zero entries.
"""
return cupy.count_nonzero(self.data)
def mean(self, axis=None, dtype=None, out=None):
"""Compute the arithmetic mean along the specified axis.
Args:
axis (int or ``None``): Axis along which the sum is computed.
If it is ``None``, it computes the average of all the elements.
Select from ``{None, 0, 1, -2, -1}``.
Returns:
cupy.ndarray: Summed array.
.. seealso::
:meth:`scipy.sparse.spmatrix.mean`
"""
_sputils.validateaxis(axis)
nRow, nCol = self.shape
data = self.data.copy()
if axis is None:
n = nRow * nCol
elif axis in (0, -2):
n = nRow
else:
n = nCol
return self._with_data(data / n).sum(axis, dtype, out)
def power(self, n, dtype=None):
"""Elementwise power function.
Args:
n: Exponent.
dtype: Type specifier.
"""
if dtype is None:
data = self.data.copy()
else:
data = self.data.astype(dtype, copy=True)
data **= n
return self._with_data(data)
def _find_missing_index(ind, n):
positions = cupy.arange(ind.size)
diff = ind != positions
return cupy.where(
diff.any(),
diff.argmax(),
cupy.asarray(ind.size if ind.size < n else -1))
def _non_zero_cmp(mat, am, zero, m):
size = np.prod(mat.shape)
if size == mat.nnz:
return am
else:
ind = mat.row * mat.shape[1] + mat.col
zero_ind = _find_missing_index(ind, size)
return cupy.where(
m == zero,
cupy.minimum(zero_ind, am),
zero_ind)
class _minmax_mixin(object):
"""Mixin for min and max methods.
These are not implemented for dia_matrix, hence the separate class.
"""
def _min_or_max_axis(self, axis, min_or_max, explicit):
N = self.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = self.shape[1 - axis]
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
# Do the reduction
value = mat._minor_reduce(min_or_max, axis, explicit)
major_index = cupy.arange(M)
mask = value != 0
major_index = cupy.compress(mask, major_index)
value = cupy.compress(mask, value)
if axis == 0:
return _coo.coo_matrix(
(value, (cupy.zeros(len(value)), major_index)),
dtype=self.dtype, shape=(1, M))
else:
return _coo.coo_matrix(
(value, (major_index, cupy.zeros(len(value)))),
dtype=self.dtype, shape=(M, 1))
def _min_or_max(self, axis, out, min_or_max, explicit):
if out is not None:
raise ValueError(("Sparse matrices do not support "
"an 'out' parameter."))
_sputils.validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("zero-size array to reduction operation")
zero = cupy.zeros((), dtype=self.dtype)
if self.nnz == 0:
return zero
self.sum_duplicates()
m = min_or_max(self.data)
if explicit:
return m
if self.nnz != internal.prod(self.shape):
if min_or_max is cupy.min:
m = cupy.minimum(zero, m)
elif min_or_max is cupy.max:
m = cupy.maximum(zero, m)
else:
assert False
return m
if axis < 0:
axis += 2
return self._min_or_max_axis(axis, min_or_max, explicit)
def _arg_min_or_max_axis(self, axis, op):
if self.shape[axis] == 0:
raise ValueError("Can't apply the operation along a zero-sized "
"dimension.")
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
# Do the reduction
value = mat._arg_minor_reduce(op, axis)
if axis == 0:
return value[None, :]
else:
return value[:, None]
def _arg_min_or_max(self, axis, out, op, compare):
if out is not None:
raise ValueError("Sparse matrices do not support "
"an 'out' parameter.")
_sputils.validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("Can't apply the operation to "
"an empty matrix.")
if self.nnz == 0:
return 0
else:
zero = cupy.asarray(self.dtype.type(0))
mat = self.tocoo()
mat.sum_duplicates()
am = op(mat.data)
m = mat.data[am]
return cupy.where(
compare(m, zero), mat.row[am] * mat.shape[1] + mat.col[am],
_non_zero_cmp(mat, am, zero, m))
if axis < 0:
axis += 2
return self._arg_min_or_max_axis(axis, op)
def max(self, axis=None, out=None, *, explicit=False):
"""Returns the maximum of the matrix or maximum along an axis.
Args:
axis (int): {-2, -1, 0, 1, ``None``} (optional)
Axis along which the sum is computed. The default is to
compute the maximum over all the matrix elements, returning
a scalar (i.e. ``axis`` = ``None``).
out (None): (optional)
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value, as this argument is not used.
explicit (bool): Return the maximum value explicitly specified and
ignore all implicit zero entries. If the dimension has no
explicit values, a zero is then returned to indicate that it is
the only implicit value. This parameter is experimental and may
change in the future.
Returns:
(cupy.ndarray or float): Maximum of ``a``. If ``axis`` is
``None``, the result is a scalar value. If ``axis`` is given,
the result is an array of dimension ``a.ndim - 1``. This
differs from numpy for computational efficiency.
.. seealso:: min : The minimum value of a sparse matrix along a given
axis.
.. seealso:: numpy.matrix.max : NumPy's implementation of ``max`` for
matrices
"""
if explicit:
api_name = 'explicit of cupyx.scipy.sparse.{}.max'.format(
self.__class__.__name__)
_util.experimental(api_name)
return self._min_or_max(axis, out, cupy.max, explicit)
def min(self, axis=None, out=None, *, explicit=False):
"""Returns the minimum of the matrix or maximum along an axis.
Args:
axis (int): {-2, -1, 0, 1, ``None``} (optional)
Axis along which the sum is computed. The default is to
compute the minimum over all the matrix elements, returning
a scalar (i.e. ``axis`` = ``None``).
out (None): (optional)
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
explicit (bool): Return the minimum value explicitly specified and
ignore all implicit zero entries. If the dimension has no
explicit values, a zero is then returned to indicate that it is
the only implicit value. This parameter is experimental and may
change in the future.
Returns:
(cupy.ndarray or float): Minimum of ``a``. If ``axis`` is
None, the result is a scalar value. If ``axis`` is given, the
result is an array of dimension ``a.ndim - 1``. This differs
from numpy for computational efficiency.
.. seealso:: max : The maximum value of a sparse matrix along a given
axis.
.. seealso:: numpy.matrix.min : NumPy's implementation of 'min' for
matrices
"""
if explicit:
api_name = 'explicit of cupyx.scipy.sparse.{}.min'.format(
self.__class__.__name__)
_util.experimental(api_name)
return self._min_or_max(axis, out, cupy.min, explicit)
def argmax(self, axis=None, out=None):
"""Returns indices of maximum elements along an axis.
Implicit zero elements are taken into account. If there are several
maximum values, the index of the first occurrence is returned. If
``NaN`` values occur in the matrix, the output defaults to a zero entry
for the row/column in which the NaN occurs.
Args:
axis (int): {-2, -1, 0, 1, ``None``} (optional)
Axis along which the argmax is computed. If ``None`` (default),
index of the maximum element in the flatten data is returned.
out (None): (optional)
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns:
(cupy.narray or int): Indices of maximum elements. If array,
its size along ``axis`` is 1.
"""
return self._arg_min_or_max(axis, out, cupy.argmax, cupy.greater)
def argmin(self, axis=None, out=None):
"""
Returns indices of minimum elements along an axis.
Implicit zero elements are taken into account. If there are several
minimum values, the index of the first occurrence is returned. If
``NaN`` values occur in the matrix, the output defaults to a zero entry
for the row/column in which the NaN occurs.
Args:
axis (int): {-2, -1, 0, 1, ``None``} (optional)
Axis along which the argmin is computed. If ``None`` (default),
index of the minimum element in the flatten data is returned.
out (None): (optional)
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns:
(cupy.narray or int): Indices of minimum elements. If matrix,
its size along ``axis`` is 1.
"""
return self._arg_min_or_max(axis, out, cupy.argmin, cupy.less)
def _install_ufunc(func_name):
def f(self):
ufunc = getattr(cupy, func_name)
result = ufunc(self.data)
return self._with_data(result)
f.__doc__ = 'Elementwise %s.' % func_name
f.__name__ = func_name
setattr(_data_matrix, func_name, f)
def _install_ufuncs():
for func_name in _ufuncs:
_install_ufunc(func_name)
_install_ufuncs()
| {
"content_hash": "e2143de949f9e83296b86471d43f075a",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 79,
"avg_line_length": 33.14572864321608,
"alnum_prop": 0.5489690721649485,
"repo_name": "cupy/cupy",
"id": "1615aea2db15f5b1a46cc5cd7f6a28061990781f",
"size": "13192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cupyx/scipy/sparse/_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "C",
"bytes": "712019"
},
{
"name": "C++",
"bytes": "895316"
},
{
"name": "Cuda",
"bytes": "151799"
},
{
"name": "Cython",
"bytes": "1996454"
},
{
"name": "Dockerfile",
"bytes": "40251"
},
{
"name": "PowerShell",
"bytes": "7361"
},
{
"name": "Python",
"bytes": "4841354"
},
{
"name": "Shell",
"bytes": "24521"
}
],
"symlink_target": ""
} |
"""
pyshtools
=========
pyshtools is an archive of scientific routines that can be used to
perform spherical harmonic transforms and reconstructions, rotations
of data expressed in spherical harmonics, and multitaper spectral
analyses on the sphere.
This module imports the following classes and subpackages into the
main namespace:
SHCoeffs - A high level class for spherical harmonic coefficients.
SHGrid - A high level classes for global grids.
SHWindow - A high level classes for localization windows.
shclasses - All pyshtools classes and subclasses.
shtools - All Python-wrapped Fortran 95 routines.
constant - pyshtools constants.
legendre - Legendre functions.
expand - Spherical harmonic expansion routines.
shio - Spherical harmonic I/O, storage, and conversion routines.
spectralanalysis - Global and localized spectral analysis routines.
rotate - Spherical harmonic rotation routines.
gravmag - Gravity and magnetics routines.
utils - pyshtools utilities.
For further information, consult the web documentation at
https://shtools.oca.eu/
and the GitHub project page at
https://github.com/SHTOOLS/SHTOOLS
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
__version__ = '4.2'
__author__ = 'SHTOOLS developers'
import os as _os
import numpy as _np
# ---- Import shtools subpackages ----
from . import shtools
from . import constant
from . import shclasses
from . import legendre
from . import expand
from . import shio
from . import spectralanalysis
from . import rotate
from . import gravmag
from . import utils
# ---- Import classes into pyshtools namespace
from .shclasses import SHCoeffs, SHGrid, SHWindow
# ---- Define __all__ for use with: from pyshtools import * ----
__all__ = ['constant', 'shclasses', 'SHCoeffs', 'SHGrid', 'SHWindow',
'legendre', 'expand', 'shio', 'spectralanalysis',
'rotate', 'gravmag', 'utils']
| {
"content_hash": "2eb2757d0c74b2cc86310bea8f7eae02",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 71,
"avg_line_length": 31.90625,
"alnum_prop": 0.732615083251714,
"repo_name": "ioshchepkov/SHTOOLS",
"id": "a1203d186778eb0948671ea65ef6d35a5a2b39ae",
"size": "2042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyshtools/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "1204574"
},
{
"name": "Makefile",
"bytes": "26801"
},
{
"name": "Python",
"bytes": "290300"
}
],
"symlink_target": ""
} |
"""
This script builds and runs a graph with miniflow.
There is no need to change anything to solve this quiz!
However, feel free to play with the network! Can you also
build a network that solves the equation below?
(x + y) + y
"""
from miniflow import *
x, y = Input(), Input()
f = Add(x, y)
feed_dict = {x: 10, y: 5}
sorted_nodes = topological_sort(feed_dict)
output = forward_pass(f, sorted_nodes)
# NOTE: because topological_sort set the values for the `Input` nodes we could also access
# the value for x with x.value (same goes for y).
print("{} + {} = {} (according to miniflow)".format(feed_dict[x], feed_dict[y], output))
| {
"content_hash": "75c5c246a3fb21429bf6510b4f7a0bc5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 25.64,
"alnum_prop": 0.6911076443057722,
"repo_name": "nehal96/Deep-Learning-ND-Exercises",
"id": "bcde0b8316a3647afc0ae880cd34ee9a71dbde3c",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MiniFlow/1 - Forward Propagation/nn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1116048"
},
{
"name": "Jupyter Notebook",
"bytes": "11694856"
},
{
"name": "Python",
"bytes": "33916"
}
],
"symlink_target": ""
} |
"""This bot will move pages out of redirected categories.
The bot will look for categories that are marked with a category redirect
template, take the first parameter of the template as the target of the
redirect, and move all pages and subcategories of the category there. It
also changes hard redirects into soft redirects, and fixes double redirects.
A log is written under <userpage>/category_redirect_log. Only category pages
that haven't been edited for a certain cooldown period (currently 7 days)
are taken into account.
-delay:# Set an amount of days. If the category is edited more recenty
than given days, ignore it. Default is 7.
-tiny Only loops over Category:Non-empty_category_redirects and
moves all images, pages and categories in redirect categories
to the target category.
Usage:
python pwb.py category_redirect [options]
"""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import re
import sys
import time
from datetime import timedelta
import pywikibot
from pywikibot import i18n, pagegenerators, config
if sys.version_info[0] > 2:
import pickle as cPickle
else:
import cPickle
class CategoryRedirectBot(pywikibot.Bot):
"""Page category update bot."""
def __init__(self, **kwargs):
"""Constructor."""
self.availableOptions.update({
'tiny': False, # use Non-empty category redirects only
'delay': 7, # cool down delay in days
})
super(CategoryRedirectBot, self).__init__(**kwargs)
self.cooldown = self.getOption('delay')
self.site = pywikibot.Site()
self.catprefix = self.site.namespace(14) + ":"
self.log_text = []
self.edit_requests = []
self.problems = []
self.template_list = []
self.cat = None
self.log_page = pywikibot.Page(self.site,
u"User:%(user)s/category redirect log"
% {'user': self.site.username()})
# Localization:
# Category that contains all redirected category pages
self.cat_redirect_cat = {
'commons': "Category:Category redirects",
'meta': 'Category:Maintenance of categories/Soft redirected categories',
'ar': u"تصنيف:تحويلات تصنيفات ويكيبيديا",
'cs': 'Kategorie:Údržba:Zastaralé kategorie',
'da': "Kategori:Omdirigeringskategorier",
'en': "Category:Wikipedia soft redirected categories",
'es': "Categoría:Wikipedia:Categorías redirigidas",
'fa': u"رده:ردههای منتقلشده",
'hu': "Kategória:Kategóriaátirányítások",
'ja': "Category:移行中のカテゴリ",
'no': "Kategori:Wikipedia omdirigertekategorier",
'pl': "Kategoria:Przekierowania kategorii",
'pt': "Categoria:!Redirecionamentos de categorias",
'ru': "Категория:Википедия:Категории-дубликаты",
'sco': "Category:Wikipaedia soft redirectit categories",
'simple': "Category:Category redirects",
'sh': u"Kategorija:Preusmjerene kategorije Wikipedije",
'vi': u"Thể loại:Thể loại đổi hướng",
'zh': u"Category:已重定向的分类",
'ro': 'Categorie:Categorii de redirecționare',
}
# Category that contains non-empty redirected category pages
self.tiny_cat_redirect_cat = 'Q8099903'
self.move_comment = 'category_redirect-change-category'
self.redir_comment = 'category_redirect-add-template'
self.dbl_redir_comment = 'category_redirect-fix-double'
self.maint_comment = 'category_redirect-comment'
self.edit_request_text = i18n.twtranslate(
self.site, 'category_redirect-edit-request') + u'\n~~~~'
self.edit_request_item = i18n.twtranslate(
self.site, 'category_redirect-edit-request-item')
def get_cat(self):
"""Specify the category page."""
if self.getOption('tiny'):
self.cat = self.site.page_from_repository(
self.tiny_cat_redirect_cat)
else:
cat_title = pywikibot.translate(self.site, self.cat_redirect_cat)
if cat_title:
self.cat = pywikibot.Category(pywikibot.Link(cat_title,
self.site))
return self.cat is not None
def move_contents(self, oldCatTitle, newCatTitle, editSummary):
"""The worker function that moves pages out of oldCat into newCat."""
while True:
try:
oldCat = pywikibot.Category(self.site,
self.catprefix + oldCatTitle)
newCat = pywikibot.Category(self.site,
self.catprefix + newCatTitle)
param = {
'oldCatLink': oldCat.title(),
'oldCatTitle': oldCatTitle,
'newCatLink': newCat.title(),
'newCatTitle': newCatTitle,
}
summary = editSummary % param
# Move articles
found, moved = 0, 0
for article in oldCat.members():
found += 1
changed = article.change_category(oldCat, newCat,
summary=summary)
if changed:
moved += 1
# pass 2: look for template doc pages
for item in pywikibot.data.api.ListGenerator(
"categorymembers", cmtitle=oldCat.title(),
cmprop="title|sortkey", cmnamespace="10",
cmlimit="max"):
doc = pywikibot.Page(pywikibot.Link(item['title'] +
"/doc", self.site))
try:
doc.get()
except pywikibot.Error:
continue
changed = doc.change_category(oldCat, newCat,
summary=summary)
if changed:
moved += 1
if found:
pywikibot.output(u"%s: %s found, %s moved"
% (oldCat.title(), found, moved))
return (found, moved)
except pywikibot.ServerError:
pywikibot.output(u"Server error: retrying in 5 seconds...")
time.sleep(5)
continue
except KeyboardInterrupt:
raise
except:
return (None, None)
def readyToEdit(self, cat):
"""Return True if cat not edited during cooldown period, else False."""
today = pywikibot.Timestamp.now()
deadline = today + timedelta(days=-self.cooldown)
if cat.editTime() is None:
raise RuntimeError
return (deadline > cat.editTime())
def get_log_text(self):
"""Rotate log text and return the most recent text."""
LOG_SIZE = 7 # Number of items to keep in active log
try:
log_text = self.log_page.get()
except pywikibot.NoPage:
log_text = u""
log_items = {}
header = None
for line in log_text.splitlines():
if line.startswith("==") and line.endswith("=="):
header = line[2:-2].strip()
if header is not None:
log_items.setdefault(header, [])
log_items[header].append(line)
if len(log_items) < LOG_SIZE:
return log_text
# sort by keys and keep the first (LOG_SIZE-1) values
keep = [text for (key, text) in
sorted(log_items.items(), reverse=True)[:LOG_SIZE - 1]]
log_text = "\n".join("\n".join(line for line in text) for text in keep)
# get permalink to older logs
history = list(self.log_page.revisions(total=LOG_SIZE))
# get the id of the newest log being archived
rotate_revid = history[-1].revid
# append permalink
log_text += ("\n\n'''[%s Older logs]'''"
% self.log_page.permalink(oldid=rotate_revid))
return log_text
def check_hard_redirect(self):
"""
Check for hard-redirected categories.
Check categories that are not already marked with an appropriate
softredirect template.
"""
pywikibot.output("Checking hard-redirect category pages.")
comment = i18n.twtranslate(self.site, self.redir_comment)
# generator yields all hard redirect pages in namespace 14
for page in pagegenerators.PreloadingGenerator(
self.site.allpages(namespace=14, filterredir=True),
groupsize=250):
if page.isCategoryRedirect():
# this is already a soft-redirect, so skip it (for now)
continue
try:
target = page.getRedirectTarget()
except pywikibot.CircularRedirect:
target = page
self.problems.append(u"# %s is a self-linked redirect"
% page.title(asLink=True, textlink=True))
except RuntimeError:
# race condition: someone else removed the redirect while we
# were checking for it
continue
if target.is_categorypage():
# this is a hard-redirect to a category page
newtext = (u"{{%(template)s|%(cat)s}}"
% {'cat': target.title(withNamespace=False),
'template': self.template_list[0]})
try:
page.text = newtext
page.save(comment)
self.log_text.append(u"* Added {{tl|%s}} to %s"
% (self.template_list[0],
page.title(asLink=True,
textlink=True)))
except pywikibot.Error:
self.log_text.append(u"* Failed to add {{tl|%s}} to %s"
% (self.template_list[0],
page.title(asLink=True,
textlink=True)))
else:
self.problems.append(u"# %s is a hard redirect to %s"
% (page.title(asLink=True, textlink=True),
target.title(asLink=True, textlink=True)))
def run(self):
"""Run the bot."""
# validate L10N
self.template_list = self.site.category_redirects()
if not self.template_list:
pywikibot.warning(u"No redirect templates defined for %s"
% self.site)
return
if not self.get_cat():
pywikibot.warning(u"No redirect category found for %s" % self.site)
return
user = self.site.user() # invokes login()
newredirs = []
l = time.localtime()
today = "%04d-%02d-%02d" % l[:3]
edit_request_page = pywikibot.Page(
self.site, u"User:%s/category edit requests" % user)
datafile = pywikibot.config.datafilepath("%s-catmovebot-data"
% self.site.dbName())
try:
with open(datafile, "rb") as inp:
record = cPickle.load(inp)
except IOError:
record = {}
if record:
with open(datafile + ".bak", "wb") as f:
cPickle.dump(record, f, protocol=config.pickle_protocol)
# regex to match soft category redirects
# TODO: enhance and use textlib._MultiTemplateMatchBuilder
# note that any templates containing optional "category:" are
# incorrect and will be fixed by the bot
template_regex = re.compile(
r"""{{\s*(?:%(prefix)s\s*:\s*)? # optional "template:"
(?:%(template)s)\s*\| # catredir template name
(\s*%(catns)s\s*:\s*)? # optional "category:"
([^|}]+) # redirect target cat
(?:\|[^|}]*)*}} # optional arguments 2+, ignored
""" % {'prefix': self.site.namespace(10).lower(),
'template': "|".join(item.replace(" ", "[ _]+")
for item in self.template_list),
'catns': self.site.namespace(14)},
re.I | re.X)
self.check_hard_redirect()
comment = i18n.twtranslate(self.site, self.move_comment)
counts = {}
nonemptypages = []
redircat = self.cat
pywikibot.output(u"\nChecking %d category redirect pages"
% redircat.categoryinfo['subcats'])
catpages = set()
for cat in redircat.subcategories():
catpages.add(cat)
cat_title = cat.title(withNamespace=False)
if "category redirect" in cat_title:
self.log_text.append(u"* Ignoring %s"
% cat.title(asLink=True, textlink=True))
continue
if hasattr(cat, "_catinfo"):
# skip empty categories that don't return a "categoryinfo" key
catdata = cat.categoryinfo
if "size" in catdata and int(catdata['size']):
# save those categories that have contents
nonemptypages.append(cat)
if cat_title not in record:
# make sure every redirect has a record entry
record[cat_title] = {today: None}
try:
newredirs.append("*# %s -> %s"
% (cat.title(asLink=True, textlink=True),
cat.getCategoryRedirectTarget().title(
asLink=True, textlink=True)))
except pywikibot.Error:
pass
# do a null edit on cat
try:
cat.save()
except:
pass
# delete record entries for non-existent categories
for cat_name in record.keys():
if pywikibot.Category(self.site,
self.catprefix + cat_name) not in catpages:
del record[cat_name]
pywikibot.output(u"\nMoving pages out of %s redirected categories."
% len(nonemptypages))
for cat in pagegenerators.PreloadingGenerator(nonemptypages):
try:
if not cat.isCategoryRedirect():
self.log_text.append(u"* False positive: %s"
% cat.title(asLink=True,
textlink=True))
continue
except pywikibot.Error:
self.log_text.append(u"* Could not load %s; ignoring"
% cat.title(asLink=True, textlink=True))
continue
cat_title = cat.title(withNamespace=False)
if not self.readyToEdit(cat):
counts[cat_title] = None
self.log_text.append(u"* Skipping %s; in cooldown period."
% cat.title(asLink=True, textlink=True))
continue
dest = cat.getCategoryRedirectTarget()
if not dest.exists():
self.problems.append("# %s redirects to %s"
% (cat.title(asLink=True, textlink=True),
dest.title(asLink=True, textlink=True)))
# do a null edit on cat to update any special redirect
# categories this wiki might maintain
try:
cat.save()
except:
pass
continue
if dest.isCategoryRedirect():
double = dest.getCategoryRedirectTarget()
if double == dest or double == cat:
self.log_text.append(u"* Redirect loop from %s"
% dest.title(asLink=True,
textlink=True))
# do a null edit on cat
try:
cat.save()
except:
pass
else:
self.log_text.append(
u"* Fixed double-redirect: %s -> %s -> %s"
% (cat.title(asLink=True, textlink=True),
dest.title(asLink=True, textlink=True),
double.title(asLink=True, textlink=True)))
oldtext = cat.text
# remove the old redirect from the old text,
# leaving behind any non-redirect text
oldtext = template_regex.sub("", oldtext)
newtext = (u"{{%(redirtemp)s|%(ncat)s}}"
% {'redirtemp': self.template_list[0],
'ncat': double.title(withNamespace=False)})
newtext = newtext + oldtext.strip()
try:
cat.text = newtext
cat.save(i18n.twtranslate(self.site,
self.dbl_redir_comment))
except pywikibot.Error as e:
self.log_text.append("** Failed: %s" % e)
continue
found, moved = self.move_contents(cat_title,
dest.title(withNamespace=False),
editSummary=comment)
if found is None:
self.log_text.append(
u"* [[:%s%s]]: error in move_contents"
% (self.catprefix, cat_title))
elif found:
record[cat_title][today] = found
self.log_text.append(
u"* [[:%s%s]]: %d found, %d moved"
% (self.catprefix, cat_title, found, moved))
counts[cat_title] = found
# do a null edit on cat
try:
cat.save()
except:
pass
with open(datafile, "wb") as f:
cPickle.dump(record, f, protocol=config.pickle_protocol)
self.log_text.sort()
self.problems.sort()
newredirs.sort()
comment = i18n.twtranslate(self.site, self.maint_comment)
self.log_page.text = (u"\n== %i-%02i-%02iT%02i:%02i:%02iZ ==\n"
% time.gmtime()[:6] +
u'\n'.join(self.log_text) +
u'\n* New redirects since last report:\n' +
u'\n'.join(newredirs) +
u'\n' + u'\n'.join(self.problems) +
u'\n' + self.get_log_text())
self.log_page.save(comment)
if self.edit_requests:
edit_request_page.text = (self.edit_request_text
% {'itemlist': u"\n" + u"\n".join(
(self.edit_request_item % item)
for item in self.edit_requests)})
edit_request_page.save(comment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg.startswith('-delay:'):
pos = arg.find(':')
options[arg[1:pos]] = int(arg[pos + 1:])
else:
# generic handling of we have boolean options
options[arg[1:]] = True
bot = CategoryRedirectBot(**options)
bot.run()
if __name__ == "__main__":
main()
| {
"content_hash": "e7fe7078d366bad262c93fcc3f3f4f42",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 84,
"avg_line_length": 42.937369519832984,
"alnum_prop": 0.4948218019156902,
"repo_name": "npdoty/pywikibot",
"id": "a401fd9d499f3c5c4370fdfc4b1f84ca8aea5cc2",
"size": "20750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/category_redirect.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4485564"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
import evennia.accounts.manager
import django.core.validators
class Migration(migrations.Migration):
dependencies = [("accounts", "0003_auto_20150209_2234")]
operations = [
migrations.DeleteModel(name="DefaultGuest"),
migrations.DeleteModel(name="DefaultAccount"),
migrations.AlterModelManagers(
name="accountdb", managers=[("objects", evennia.accounts.manager.AccountDBManager())]
),
migrations.AlterField(
model_name="accountdb",
name="email",
field=models.EmailField(max_length=254, verbose_name="email address", blank=True),
),
migrations.AlterField(
model_name="accountdb",
name="groups",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Group",
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
verbose_name="groups",
),
),
migrations.AlterField(
model_name="accountdb",
name="last_login",
field=models.DateTimeField(null=True, verbose_name="last login", blank=True),
),
migrations.AlterField(
model_name="accountdb",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
max_length=30,
validators=[
django.core.validators.RegexValidator(
"^[\\w.@+-]+$",
"Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.",
"invalid",
)
],
help_text="Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.",
unique=True,
verbose_name="username",
),
),
]
| {
"content_hash": "93504dbe79163780b731057bbb85c0cc",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 126,
"avg_line_length": 38.05357142857143,
"alnum_prop": 0.5344908493664946,
"repo_name": "jamesbeebop/evennia",
"id": "51217a83e5c2c148274cf1203d94761b7cb6d2d5",
"size": "2157",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/accounts/migrations/0004_auto_20150403_2339.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
} |
__author__ = 'olesya'
import unittest
from models.User import User
from DAL import DAL
class UserDetails(unittest.TestCase):
def test_get_name(self):
d = DAL()
self.assertEqual(d.get_user_details("user_name"),"oles_ka")
def test_set_name(self):
d = DAL()
us_list = d.set_user_details("itamar", "[email protected]", "123")
self.assertEqual(us_list.pop().user_name ,"itamar")
def main():
unittest.main()
if __name__ == "__main__":
main() | {
"content_hash": "2a02d217726f103b2f32f0d1167634fe",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 67,
"avg_line_length": 23.181818181818183,
"alnum_prop": 0.5764705882352941,
"repo_name": "shaipeer/ATTENDER",
"id": "64b2b9c1d5f3803e57e23eac7111beeef8e52d11",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/attender-mobile/tests/DBTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "14507"
},
{
"name": "Python",
"bytes": "12376"
}
],
"symlink_target": ""
} |
"""
progress handler.
Old progress funcs needto be depricated ProgressIter and ProgChunks are pretty
much the only useful things here.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import math
import datetime
from functools import partial
from utool import util_logging
from utool import util_inject
from utool import util_arg
from utool import util_time
from utool import util_iter
from utool import util_cplat
from six.moves import range, zip
import collections
import six # NOQA
print, rrr, profile = util_inject.inject2(__name__)
default_timer = util_time.default_timer
SILENT = util_arg.SILENT
VERBOSE = util_arg.VERBOSE
VALID_PROGRESS_TYPES = ['none', 'dots', 'fmtstr', 'simple']
AGGROFLUSH = util_arg.get_argflag('--aggroflush')
PROGGRESS_BACKSPACE = not util_arg.get_argflag(('--screen', '--progress-backspace'))
NO_PROGRESS = util_arg.get_argflag(('--no-progress', '--noprogress'))
FORCE_ALL_PROGRESS = util_arg.get_argflag(('--force-all-progress',))
#('--screen' not in sys.argv and '--progress-backspace' not in sys.argv)
DEBUG_FREQ_ADJUST = util_arg.get_argflag('--debug-adjust-freq')
def test_progress():
"""
CommandLine:
python -m utool.util_progress --test-test_progress
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_progress import * # NOQA
>>> test_progress()
"""
import utool as ut
#import time
#ut.rrrr()
print('_________________')
#numiter = 50
#sleeptime = 1E-4
#sleeptime2 = 1E-2
numiter = 20
sleeptime = 1E-7
sleeptime2 = 1E-7
with ut.Timer():
for x in ut.ProgressIter(range(0, numiter), freq=8, adjust=True):
time.sleep(sleeptime)
print('_________________')
numiter = 50
sleeptime = 1E-4
with ut.Timer():
for x in ut.ProgressIter(range(0, numiter), freq=8, adjust=True):
time.sleep(sleeptime)
print('_________________')
print('No frequncy run:')
with ut.Timer():
for x in range(0, numiter):
time.sleep(sleeptime)
print('_________________')
numiter = 500
sleeptime = 8E-7
with ut.Timer():
for x in ut.ProgressIter(range(0, numiter), freq=8, adjust=True):
time.sleep(sleeptime)
print('_________________')
with ut.Timer():
for x in ut.ProgressIter(range(0, numiter), freq=200):
time.sleep(sleeptime)
print('_________________')
print('No frequncy run:')
with ut.Timer():
for x in range(0, numiter):
time.sleep(sleeptime)
print('_________________')
# Test nested iter
# progiter1 = ut.ProgressIter(range(0, 10), lbl='prog1', freq=1, adjust=False)
# for count1 in progiter1:
# progiter_partials = progiter1.get_subindexers(1)
# progiter2 = progiter_partials[0](range(0, 7), lbl='sub_prog1', freq=1, adjust=False)
# for count2 in progiter2:
# pass
for x in ut.ProgressIter(zip(range(10), range(10)), freq=8, adjust=True):
time.sleep(sleeptime)
#progiter3 = progiter_partials[1](range(0, 3), lbl='sub_prog2', freq=1, adjust=False)
#for count3 in progiter3:
# pass
print('Double backspace progress 1')
progiter1 = ut.ProgressIter(range(0, 10), lbl='prog1', freq=1, adjust=False, backspace=False)
for count1 in progiter1:
progiter2 = ut.ProgressIter(range(0, 10), lbl='prog2', freq=1, adjust=False, backspace=True)
for count2 in progiter2:
time.sleep(sleeptime2)
print('Double backspace progress 2')
progiter1 = ut.ProgressIter(range(0, 10), lbl='prog1', freq=1, adjust=False, backspace=True)
for count1 in progiter1:
progiter2 = ut.ProgressIter(range(0, 10), lbl='prog2', freq=1, adjust=False, backspace=True)
for count2 in progiter2:
time.sleep(sleeptime2)
def get_num_chunks(length, chunksize):
r"""
Returns the number of chunks that a list will be split into given a
chunksize.
Args:
length (int):
chunksize (int):
Returns:
int: n_chunks
CommandLine:
python -m utool.util_progress --exec-get_num_chunks:0
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_progress import * # NOQA
>>> length = 2000
>>> chunksize = 256
>>> n_chunks = get_num_chunks(length, chunksize)
>>> result = ('n_chunks = %s' % (six.text_type(n_chunks),))
>>> print(result)
n_chunks = 8
"""
n_chunks = int(math.ceil(length / chunksize))
return n_chunks
def ProgChunks(list_, chunksize, nInput=None, **kwargs):
"""
Yeilds an iterator in chunks and computes progress
Progress version of ut.ichunks
Args:
list_ (list):
chunksize (?):
nInput (None): (default = None)
Kwargs:
length, freq
Returns:
ProgressIter: progiter_
CommandLine:
python -m utool.util_progress ProgChunks --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_progress import * # NOQA
>>> import utool as ut
>>> list_ = range(100)
>>> chunksize = 10
>>> nInput = None
>>> progiter_ = ProgChunks(list_, chunksize, nInput)
>>> iter_ = iter(progiter_)
>>> chunk = six.next(iter_)
>>> assert len(chunk) == 10
>>> rest = ut.flatten(list(progiter_))
>>> assert len(rest) == 90
"""
if nInput is None:
nInput = len(list_)
n_chunks = get_num_chunks(nInput, chunksize)
kwargs['length'] = n_chunks
if 'freq' not in kwargs:
kwargs['freq'] = 1
chunk_iter = util_iter.ichunks(list_, chunksize)
progiter_ = ProgressIter(chunk_iter, **kwargs)
return progiter_
def ProgPartial(*args, **kwargs):
return partial(ProgressIter, *args, **kwargs)
class ProgressIter(object):
"""
Wraps a for loop with progress reporting
lbl='Progress: ', length=0, flushfreq=4, startafter=-1, start=True,
repl=False, approx=False, disable=False, writefreq=1, with_time=False,
backspace=True, pad_stdout=False, wfreq=None, ffreq=None, freq=None,
total=None, num=None, with_totaltime=None
Referencs:
https://github.com/verigak/progress/blob/master/progress/__init__.py
Args:
iterable (): iterable normally passed to for loop
lbl (str): progress label
length (int):
flushfreq (int):
startafter (int):
start (bool):
repl (bool):
approx (bool):
enabled (bool):
writefreq (int):
with_totaltime (bool):
backspace (bool):
pad_stdout (bool):
autoadjust (bool): no adjusting frequency if True (default False)
wfreq (None): alias for write_freq
ffreq (None): alias for flush_freq
total (None): alias for length
num (None): alias for length
Timeit::
import utool as ut
setup = ut.codeblock(
'''
import utool as ut
from six.moves import range, zip
import time
def time_append(size):
start_time = time.time()
last_time = start_time
list2 = []
for x in range(size):
now_time = time.time()
between = now_time - last_time
last_time = now_time
list2.append(between)
def time_assign(size):
start_time = time.time()
last_time = start_time
list1 = ut.alloc_nones(size)
for x in range(size):
now_time = time.time()
between = now_time - last_time
last_time = now_time
list1[x] = between
def time_baseline(size):
start_time = time.time()
last_time = start_time
for x in range(size):
now_time = time.time()
between = now_time - last_time
last_time = now_time
def time_null(size):
for x in range(size):
pass
''')
input_sizes = [2 ** count for count in range(7, 12)]
stmt_list = ['time_assign', 'time_append', 'time_baseline', 'time_null']
input_sizes=[100, 1000, 10000]
ut.timeit_grid(stmt_list, setup, input_sizes=input_sizes, show=True)
CommandLine:
python -m utool.util_progress --test-ProgressIter
python -m utool.util_progress --test-ProgressIter:0
python -m utool.util_progress --test-ProgressIter:1
python -m utool.util_progress --test-ProgressIter:2
python -m utool.util_progress --test-ProgressIter:3
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> from six.moves import range
>>> num = 1000
>>> num2 = 10001
>>> results1 = [x for x in ut.ProgressIter(range(num), wfreq=10, adjust=True)]
>>> results4 = [x for x in ut.ProgressIter(range(num), wfreq=1, adjust=True)]
>>> results2 = [x for x in range(num)]
>>> results3 = [x for x in ut.progiter((y + 1 for y in range(num2)),
>>> ntotal=num2, wfreq=1000,
>>> backspace=True, adjust=True)]
>>> assert results1 == results2
Example1:
>>> # DISABLE_DOCTEST
>>> # SLOW_DOCTEST
>>> import utool as ut
>>> from six.moves import range
>>> num2 = 10001
>>> progiter = ut.ProgressIter(range(num2), lbl='testing primes',
>>> report_unit='seconds', freq=1,
>>> time_thresh=.1, adjust=True)
>>> [ut.get_nth_prime_bruteforce(29) for x in progiter]
Example2:
>>> # DISABLE_DOCTEST
>>> # SLOW_DOCTEST
>>> import utool as ut
>>> from six.moves import range
>>> num2 = 100001
>>> progiter = ut.ProgressIter(range(num2), lbl='testing primes',
>>> report_unit='seconds', freq=1,
>>> time_thresh=3, adjust=True, bs=True)
>>> [ut.get_nth_prime_bruteforce(29) for x in progiter]
Example3:
>>> # DISABLE_DOCTEST
>>> # SLOW_DOCTEST
>>> import utool as ut
>>> from six.moves import range
>>> import time
>>> crazy_time_list = [.001, .01, .0001] * 1000
>>> crazy_time_iter = (time.sleep(x) for x in crazy_time_list)
>>> progiter = ut.ProgressIter(crazy_time_iter, lbl='crazy times', length=len(crazy_time_list), freq=10)
>>> list(progiter)
"""
def __init__(self, iterable=None, *args, **kwargs):
self.iterable = iterable
if len(args) < 2 and 'nTotal' not in kwargs and 'length' not in kwargs:
try:
length = len(iterable)
kwargs['length'] = length
except Exception:
pass
self.use_rate = kwargs.pop('use_rate', True)
self.use_rate = True # Force
self.lbl = kwargs.get('lbl', 'lbl')
self.lbl = kwargs.get('label', self.lbl)
self.length = kwargs.get('nTotal', kwargs.get('length', 0))
#self.backspace = kwargs.get('backspace', True)
self.backspace = kwargs.get('backspace', kwargs.get('bs', False))
self.freq = kwargs.get('freq', 1)
self.invert_rate = kwargs.get('invert_rate', False)
self.auto_invert_rate = kwargs.get('auto_invert_rate', True)
self.verbose = kwargs.pop('verbose', True) # VERBOSE
#self.report_unit = kwargs.get('report_unit', 'minutes')
self.enabled = kwargs.get('enabled', True)
self.report_unit = kwargs.get('report_unit', 'seconds')
# autoadjust frequency of reporting
self.autoadjust = kwargs.get('autoadjust', kwargs.get('adjust', False))
self.time_thresh = kwargs.pop('time_thresh', None)
self.prog_hook = kwargs.pop('prog_hook', None)
self.prehack = kwargs.pop('prehack', None)
self.freq_est_strat = kwargs.pop('freq_est', 'between')
if 'separate' in kwargs:
print('WARNING separate no longer supported by ProgIter')
# FIXME: get these subinder things working
# ~/code/guitool/guitool/guitool_components.py
#self.substep_min = kwargs.pop('substep_min', 0)
#self.substep_size = kwargs.pop('substep_size', 1)
#self.level = kwargs.pop('level', 0)
self.parent_index = kwargs.pop('parent_index', 0)
self.parent_length = kwargs.pop('parent_length', 1)
self.parent_offset = self.parent_index * self.length
self._cursor_at_newline = True
# Window sizes for estimates
self.est_window = kwargs.pop('est_window', 64)
#self.start_offset = self.substep_min
self.stream = kwargs.pop('stream', None)
self.extra = ''
if FORCE_ALL_PROGRESS:
self.freq = 1
self.autoadjust = False
if self.prog_hook is not None:
# Sets the label of a progress bar to the ProgIter label
self.prog_hook.register_progiter(self)
#self.time_thresh_growth = kwargs.pop('time_thresh_growth', 1.0)
self.time_thresh_growth = kwargs.pop('time_thresh_growth', 1.0)
self.with_totaltime = False
if self.freq is None:
self.freq = 1
if self.use_rate:
# Hacky so hacky. this needs major cleanup
# saving args and kwargs so can wait on log_progress call
# not sure where it is called and dont want to break things
self.args = args
self.kwargs = kwargs
self.mark = None
self.end = None
#else:
# self.mark, self.end = log_progress(*args, **kwargs)
self.count = 0
def __call__(self, iterable):
self.iterable = iterable
return self
def __iter__(self):
if not self.enabled:
return iter(self.iterable)
if NO_PROGRESS:
# IF PROGRESS IS TURNED OFF
msg = 'Iterating ' + self.lbl + ' with no progress'
if self.verbose:
print(msg)
#with ut.Timer(msg):
return iter(self.iterable)
else:
#if self.use_rate:
# STANDARD CALL CASE
return self.iter_rate()
#else:
# return self.iter_without_rate()
#def get_subindexers(prog_iter, num_substeps):
# # FIXME and make this a method of progiter
# step_min = (((prog_iter.count - 1) / prog_iter.length) *
# prog_iter.substep_size + prog_iter.substep_min)
# step_size = (1.0 / prog_iter.length) * prog_iter.substep_size
# substep_size = step_size / num_substeps
# substep_min_list = [(step * substep_size) + step_min
# for step in range(num_substeps)]
# #level = prog_iter.level + 1
# DEBUG = False
# if DEBUG:
# with ut.Indenter(' ' * 4 * prog_iter.level):
# print('\n')
# print('+____<NEW SUBSTEPS>____')
# print('Making %d substeps for prog_iter.lbl = %s' % (
# num_substeps, prog_iter.lbl,))
# print(' * step_min = %.2f' % (step_min,))
# print(' * step_size = %.2f' % (step_size,))
# print(' * substep_size = %.2f' % (substep_size,))
# print(' * substep_min_list = %r' % (substep_min_list,))
# print(r'L____</NEW SUBSTEPS>____')
# print('\n')
# subprog_partial_list = [
# partial(ProgressIter,
# parent_length=prog_iter.length * num_substeps,
# parent_index=(prog_iter.count - 1) + (prog_iter.length * step))
# for step in range(num_substeps)]
# return subprog_partial_list
#def build_msg_fmtstr_time(self, lbl, invert_rate, backspace):
# with_wall = True
# tzname = time.tzname[0]
# if util_cplat.WIN32:
# tzname = tzname.replace('Eastern Standard Time', 'EST')
# msg_fmtstr_time = ''.join((
# 'rate=%3.3f seconds/iter, ' if invert_rate else 'rate=%4.2f Hz,',
# ' etr=%s,',
# ' ellapsed=%s,',
# ' wall=%s ' + tzname if with_wall else '',
# #'' if backspace else '\n',
# '\n' if backspace else '',
# ))
# return msg_fmtstr_time
@staticmethod
def build_msg_fmtstr_head_cols(length, lbl):
nTotal_ = '?' if length == 0 else six.text_type(length)
msg_head_columns = ['', lbl, ' {count:4d}/', nTotal_ , '... ']
return msg_head_columns
@staticmethod
def build_msg_fmtstr2(lbl, length, invert_rate, backspace):
r"""
Args:
lbl (str):
invert_rate (bool):
backspace (bool):
Returns:
str: msg_fmtstr_time
CommandLine:
python -m utool.util_progress --exec-ProgressIter.build_msg_fmtstr2
Setup:
>>> from utool.util_progress import * # NOQA
>>> lbl = 'foo'
>>> invert_rate = True
>>> backspace = False
>>> length = None
Example:
>>> # DISABLE_DOCTEST
>>> msg_fmtstr_time = ProgressIter.build_msg_fmtstr2(lbl, length, invert_rate, backspace)
>>> result = ('%s' % (ut.repr2(msg_fmtstr_time),))
>>> print(result)
"""
with_wall = True
tzname = time.tzname[0]
if util_cplat.WIN32:
tzname = tzname.replace('Eastern Standard Time', 'EST')
# ansii/vt100 code for clearline
# CLEARLINE_L2 = '\33[2K'
# BEFORE_PROG = '\r\033[?25l'
CLEARLINE_EL0 = '\33[0K' # clear line to right
# CLEARLINE_EL1 = '\33[1K' # clear line to left
CLEARLINE_EL2 = '\33[2K' # clear line
# DECTCEM_HIDE = '\033[?25l' # hide cursor
CLEAR_BEFORE = '\r' + CLEARLINE_EL2 # + DECTCEM_HIDE
# FIXME: hideing cursor persists if the program crashes
CLEAR_AFTER = CLEARLINE_EL0
msg_head = ProgressIter.build_msg_fmtstr_head_cols(length, lbl)
if backspace:
msg_head = [CLEAR_BEFORE] + msg_head
msg_tail = [
(
'rate={rate:4.2f} sec/iter, '
if invert_rate else
'rate={rate:4.2f} Hz,'
),
(
''
if length == 0 else
' etr={etr},'
),
' ellapsed={ellapsed},',
(
' wall={wall} ' + tzname
if with_wall
else ''
),
# backslash-r is a carrage return and undoes all previous output on
# a written line
(' {extra}'),
CLEAR_AFTER if backspace else '\n',
]
msg_fmtstr_time = ''.join((msg_head + msg_tail))
return msg_fmtstr_time
def iter_rate(self):
"""
pun not intended
# TODO: record iteration times for analysis
# TODO Incorporate this better
# FIXME; pad_stdout into subfunctions
import dis
dis.dis(ut.ProgressIter.iter_rate)
"""
#class IterState(object):
# def __init__(state):
# state.freq = 1
# state.freq = 1
# pass
adjust = self.autoadjust
self._cursor_at_newline = not self.backspace
# SETUP VARIABLES
# HACK: reaquire logging print funcs in case they have changed
if self.stream is None:
self.write = util_logging._utool_write()
self.flush = util_logging._utool_flush()
else:
self.write = lambda msg: self.stream.write(msg) # NOQA
self.flush = lambda: self.stream.flush() # NOQA
length = self.length * self.parent_length # hack
freq = self.freq
self.count = 0
between_count = 0
last_count = 0
# how long iterations should be before a flush
# (used for freq adjustment)
time_thresh = (self._get_timethresh_heuristics()
if self.time_thresh is None else
self.time_thresh)
time_thresh_growth = self.time_thresh_growth
if time_thresh_growth > 1:
# time_thresh_growth is specified for very long processes
# print out the starting timestamp in that case
timestamp = time.strftime('%Y-%m-%d %H:%M:%S') + ' ' + time.tzname[0]
print('Start progress lbl= %s at %s' % (self.lbl, timestamp,))
#time_thresh = 0.5
max_between_time = -1.0
max_between_count = -1.0 # why is this different? # because frequency varies
# TODO: should be kept as a statistic that uses the max time from a
# list of iterations divided by the size of that list that will account
# for buffering issues
iters_per_second = 0
self.iters_per_second = float('nan')
self.est_seconds_left = 0
self.total_seconds = 0
# Write initial message
#force_newlines = not self.backspace
start_msg_fmt = ''.join(self.build_msg_fmtstr_head_cols(length, self.lbl))
self.msg_fmtstr = self.build_msg_fmtstr2(self.lbl, length,
self.invert_rate,
self.backspace)
try:
util_logging._utool_flush()()
except IOError as ex:
# There is some weird error when doing progress in IPython notebook
if util_arg.VERBOSE:
print('IOError flushing %s' % (ex,))
if not self.prehack:
if self.backspace:
self.display_message()
elif self.verbose:
start_msg = start_msg_fmt.format(count=self.parent_offset)
util_logging._utool_write()(start_msg + '\n')
self._cursor_at_newline = not self.backspace
try:
util_logging._utool_flush()()
except IOError as ex:
# There is some weird error when doing progress in IPython notebook
if util_arg.VERBOSE:
print('IOError flushing %s' % (ex,))
else:
self._cursor_at_newline = True
if self.prog_hook is not None:
self.prog_hook(self.count, length)
# TODO: on windows is time.clock better?
# http://exnumerus.blogspot.com/2011/02/how-to-quickly-plot-multiple-line.html
start_time = default_timer()
last_time = start_time
start = 1 + self.parent_offset
if self.freq_est_strat == 'between':
FREQ_EST = 0
elif self.freq_est_strat == 'absolute':
FREQ_EST = 1
else:
FREQ_EST = 1
USE_RECORD = True
# use last 64 times to compute a more stable average rate
measure_between_time = collections.deque([], maxlen=self.est_window)
# Wrap the for loop with a generator
for self.count, item in enumerate(self.iterable, start=start):
if self.prehack:
# hack to print before yeilding
# so much for efficiency
self.set_extra((self.lbl + '=' + self.prehack) % item)
self.display_message()
self.ensure_newline()
# GENERATE
yield item
if self.prehack or (self.count) % freq == 0:
now_time = default_timer()
between_time = (now_time - last_time)
between_count = self.count - last_count
total_seconds = (now_time - start_time)
self.total_seconds = total_seconds
if FREQ_EST == 0:
if USE_RECORD:
measure_between_time.append(between_count / (float(between_time) + 1E-9))
iters_per_second = sum(measure_between_time) / len(measure_between_time)
else:
iters_per_second = between_count / (float(between_time) + 1E-9)
elif FREQ_EST == 1:
iters_per_second = (now_time - start_time) / self.count
self.iters_per_second = iters_per_second
# If the future is known
if length is None:
est_seconds_left = -1
else:
iters_left = length - self.count
est_seconds_left = iters_left / (iters_per_second + 1E-9)
self.est_seconds_left = est_seconds_left
# /future
last_count = self.count
last_time = now_time
# ADJUST FREQ IF NEEDED
# Adjust frequency if printing too quickly
# so progress doesnt slow down actual function
# TODO: better adjust algorithm
time_thresh *= time_thresh_growth
if adjust and (between_time < time_thresh or between_time > time_thresh * 2.0):
max_between_time = max(max(max_between_time, between_time),
1E-9)
max_between_count = max(max_between_count, between_count)
# If progress was uniform and all time estimates were
# perfect this would be the new freq to achieve time_thresh
new_freq = max(int(time_thresh * max_between_count /
max_between_time), 1)
if DEBUG_FREQ_ADJUST:
print('\n+---')
print('[prog] between_count = %r' % between_count)
print('[prog] between_time = %.8r' % between_time)
print('[prog] time_thresh = %r' % time_thresh)
print('[prog] max_between_count = %r' % max_between_count)
print('[prog] max_between_time = %.8r' % max_between_time)
print('[prog] Adusting frequency from: %r' % freq)
print('[prog] Adusting frequency to: %r' % new_freq)
print('L___')
# But things are not perfect. So, don't make drastic changes
max_freq_change_up = max(256, freq * 2)
max_freq_change_down = freq // 2
if (new_freq - freq) > max_freq_change_up:
freq += max_freq_change_up
elif (freq - new_freq) > max_freq_change_down:
freq -= max_freq_change_down
else:
freq = new_freq
if not self.prehack:
self.display_message()
# DO PROGRESS INFO
if self.prog_hook is not None:
# From the point of view of the progress iter, we are about
# to enter the body of a for loop. (But we may have
# executed the body implicitly in the yeild.... so it is
# ambiguous. In the second case 0 will be executed twice.
self.prog_hook(self.count, length)
if self.prehack:
self.set_extra('')
# --- end of main loop
# cleanup
if (self.count) % freq != 0:
# If the final line of progress was not written in the loop, write
# it here
self.est_seconds_left = 0
self.total_seconds = (default_timer() - start_time)
self.display_message()
if self.prog_hook is not None:
# From the point of view of the progress iter, we are about to
# enter the body of a for loop. (But we may have executed the
# body implicitly in the yeild.... so it is ambiguous. In the
# second case 0 will be executed twice.
self.prog_hook(self.count, length)
self.ensure_newline()
def display_message(self):
# HACK to be more like sklearn.extrnals ProgIter version
if self.verbose:
instant_invert_rate = self.iters_per_second < 0.1
if self.auto_invert_rate and self.invert_rate != instant_invert_rate:
self.invert_rate = instant_invert_rate
length = self.length * self.parent_length # hack
self.msg_fmtstr = self.build_msg_fmtstr2(self.lbl, length,
self.invert_rate,
self.backspace)
rate = 1.0 / (self.iters_per_second + 1E-9) if self.invert_rate else self.iters_per_second
msg = self.msg_fmtstr.format(
count=self.count,
rate=rate,
etr=six.text_type(datetime.timedelta(seconds=int(self.est_seconds_left))),
ellapsed=six.text_type(datetime.timedelta(seconds=int(self.total_seconds))),
wall=time.strftime('%H:%M'),
extra=self.extra
)
self.write(msg)
self._cursor_at_newline = not self.backspace
try:
self.flush()
except IOError as ex:
if util_arg.VERBOSE:
print('IOError flushing %s' % (ex,))
#print('self.flush = %r' % (self.flush,))
#import utool as ut
#ut.debug_logging_iostreams()
#ut.printex(ex)
#raise
pass
def set_extra(self, extra):
"""
specify a custom info appended to the end of the next message
TODO: come up with a better name and rename
"""
self.extra = extra
def ensure_newline(self):
"""
use before any custom printing when using the progress iter to ensure
your print statement starts on a new line instead of at the end of a
progress line
"""
DECTCEM_SHOW = '\033[?25h' # show cursor
AT_END = DECTCEM_SHOW + '\n'
if not self._cursor_at_newline:
self.write(AT_END)
self._cursor_at_newline = True
def _get_timethresh_heuristics(self):
"""
resonably decent hueristics for how much time to wait before
updating progress.
"""
if self.length > 1E5:
time_thresh = 2.5
elif self.length > 1E4:
time_thresh = 2.0
elif self.length > 1E3:
time_thresh = 1.0
else:
time_thresh = 0.5
return time_thresh
progiter = ProgressIter
class ProgIter(ProgressIter):
""" Thin wrapper with better arg positions """
def __init__(self, iterable, lbl='Prog', adjust=True, freq=1, bs=True,
**kwargs):
import utool as ut
super(ut.ProgIter, self).__init__(iterable, lbl=lbl, adjust=adjust,
freq=freq, bs=bs, **kwargs)
def progress_str(max_val, lbl='Progress: ', repl=False, approx=False,
backspace=PROGGRESS_BACKSPACE):
r""" makes format string that prints progress: %Xd/MAX_VAL with backspaces
NOTE: \r can be used instead of backspaces. This function is not very
relevant because of that.
"""
# string that displays max value
max_str = six.text_type(max_val)
if approx:
# denote approximate maximum
max_str = '~' + max_str
dnumstr = six.text_type(len(max_str))
# string that displays current progress
cur_str = '%' + dnumstr + 'd'
# If user passed in the label
if repl:
_fmt_str = lbl.replace('<cur_str>', cur_str).replace('<max_str>', max_str)
else:
_fmt_str = lbl + cur_str + '/' + max_str
if backspace:
# put backspace characters into the progress string
# (looks nice on normal terminals)
#nBackspaces = len(_fmt_str) - len(dnumstr) + len(max_str)
#backspaces = '\b' * nBackspaces
#fmt_str = backspaces + _fmt_str
# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES
fmt_str = '\r' + _fmt_str
else:
# FIXME: USE CARAGE RETURN INSTEAD OF BACKSPACES
# this looks better on terminals without backspaces
fmt_str = _fmt_str + '\n'
return fmt_str
def log_progress(lbl='Progress: ', length=0, flushfreq=4, startafter=-1,
start=True, repl=False, approx=False, disable=False,
writefreq=1, with_time=False, backspace=True,
pad_stdout=False, wfreq=None, ffreq=None, freq=None, total=None,
num=None, with_totaltime=None):
"""
DEPRICATE
FIXME: depricate for ProgressIter.
still used in util_dev
"""
global AGGROFLUSH
# Alias kwargs with simpler names
if num is not None:
length = num
if total is not None:
length = total
if wfreq is not None:
writefreq = wfreq
if ffreq is not None:
flushfreq = ffreq
if freq is not None:
writefreq = flushfreq = freq
if with_totaltime is not None:
with_time = with_totaltime
# flush frequency must be a multiple of write frequency
flushfreq = max(int(round(flushfreq / writefreq)), 1) * writefreq
if length < startafter or disable:
# Do not mark progress if only executing a small number of tasks
def mark_progress(*args):
pass
def end_progress(*args):
pass
return mark_progress, end_progress
else:
write_fn = util_logging._utool_write()
flush_fn = util_logging._utool_flush()
# build format string for displaying progress
fmt_str = progress_str(length, lbl=lbl, repl=repl, approx=approx,
backspace=backspace)
if AGGROFLUSH:
# Progress function which automatically flushes
def mark_progress(count, flush_fn=flush_fn):
count_ = count + 1
write_fn(fmt_str % (count_))
flush_fn()
else:
# Progress function flushes every <flushfreq> times
def mark_progress(count, fmt_str=fmt_str, flushfreq=flushfreq,
writefreq=writefreq, write_fn=write_fn,
flush_fn=flush_fn):
count_ = count + 1
if count_ % writefreq == 0:
write_fn(fmt_str % count_)
if count_ % flushfreq == 0:
flush_fn()
if pad_stdout:
write_fn('\n')
write_fn('\n')
flush_fn()
if with_time:
tt = util_time.tic(lbl)
def end_progress(count_=length, write_fn=write_fn, flush_fn=flush_fn):
write_fn(fmt_str % (count_))
write_fn('\n')
flush_fn()
if with_time:
util_time.toc(tt)
if pad_stdout:
write_fn('\n\n')
flush_fn()
#mark_progress(0)
if start:
mark_progress(-1)
return mark_progress, end_progress
if __name__ == '__main__':
"""
CommandLine:
python -c "import utool, utool.util_progress; utool.doctest_funcs(utool.util_progress, allexamples=True)"
python -c "import utool, utool.util_progress; utool.doctest_funcs(utool.util_progress)"
python -m utool.util_progress
python -m utool.util_progress --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| {
"content_hash": "c7895f345908dde409546d7658bc0158",
"timestamp": "",
"source": "github",
"line_count": 961,
"max_line_length": 113,
"avg_line_length": 37.733610822060356,
"alnum_prop": 0.5336164580001103,
"repo_name": "Erotemic/utool",
"id": "2654077b7e1eefca2017b3333102d189b02bfc24",
"size": "36286",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "utool/util_progress.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "2272952"
},
{
"name": "Shell",
"bytes": "37605"
}
],
"symlink_target": ""
} |
__author__ = 'radlab'
import ctypes as ct
# Enums
_ARUCO_FAILURE = -1
_ARUCO_SUCCESS = 0
_ARUCO_FALSE = 0
_ARUCO_TRUE = 1
# Structures
class _Size(ct.Structure):
_fields_ = [
('width', ct.c_int),
('height', ct.c_int),
]
class _Image(ct.Structure):
_fields_ = [
('data', ct.POINTER(ct.c_uint8)),
('size', _Size),
]
# Basic types
def _Status(status):
"""A type wrapper callable that converts an aruco staus into an exception.
"""
if status != _ARUCO_SUCCESS:
raise ArucoError(
_dll.aruco_error_last_str(),
_dll.aruco_error_last_code())
return status
_Handle = ct.POINTER(ct.c_int32)
_Size = ct.c_uint64
_ImagePtr = ct.POINTER(_Image)
# Load the DLL
import ctypes
import ctypes.util
name = ctypes.util.find_library(r'C:\Users\radlab\drone_demo\native\build32\third-party\aruco-1.0.0\bin\Release\caruco')
_dll = ctypes.CDLL(name)
if _dll is None:
raise ImportError('Could not load caruco native library.')
else:
print _dll
# Function return and argument types
_dll.aruco_error_last_str.restype = ct.c_char_p
_dll.aruco_board_new.restype = _Handle
_dll.aruco_board_free.argtypes = ( _Handle, )
_dll.aruco_board_draw_3d_axis.argtypes = ( _Handle, _ImagePtr, _Handle )
_dll.aruco_board_draw_3d_cube.argtypes = ( _Handle, _ImagePtr, _Handle )
_dll.aruco_board_get_extrinsics.argtypes = ( _Handle, ct.POINTER(ct.c_float), ct.POINTER(ct.c_float) )
_dll.aruco_board_configuration_new.restype = _Handle
_dll.aruco_board_configuration_free.argtypes = ( _Handle, )
_dll.aruco_board_configuration_save_to_file.restype = _Status
_dll.aruco_board_configuration_save_to_file.argtypes = ( _Handle, ct.c_char_p )
_dll.aruco_board_configuration_read_from_file.restype = _Status
_dll.aruco_board_configuration_read_from_file.argtypes = ( _Handle, ct.c_char_p )
_dll.aruco_board_configuration_marker_ids.argtypes = ( _Handle, ct.POINTER(ct.c_int) )
_dll.aruco_detect_board.restype = _Status
_dll.aruco_detect_board.argtypes = ( _Handle, _Handle, _Handle, _Handle, ct.c_float, ct.POINTER(ct.c_float) )
_dll.aruco_camera_parameters_new.restype = _Handle
_dll.aruco_camera_parameters_free.argtypes = ( _Handle, )
_dll.aruco_camera_parameters_is_valid.argtypes = ( _Handle, )
_dll.aruco_camera_parameters_save_to_file.restype = _Status
_dll.aruco_camera_parameters_save_to_file.argtypes = ( _Handle, ct.c_char_p )
_dll.aruco_camera_parameters_read_from_file.restype = _Status
_dll.aruco_camera_parameters_read_from_file.argtypes = ( _Handle, ct.c_char_p )
_dll.aruco_camera_parameters_read_from_xml_file.restype = _Status
_dll.aruco_camera_parameters_read_from_xml_file.argtypes = ( _Handle, ct.c_char_p )
_dll.aruco_camera_parameters_resize.argtypes = ( _Handle, ct.POINTER(_Size) )
_dll.aruco_camera_parameters_get_camera_matrix.argtypes = ( _Handle, ct.POINTER(ct.c_float) )
_dll.aruco_camera_parameters_get_distortion_coeffs.argtypes = ( _Handle, ct.POINTER(ct.c_float) )
_dll.aruco_marker_new.restype = _Handle
_dll.aruco_marker_free.argtypes = ( _Handle, )
_dll.aruco_marker_copy_from.argtypes = ( _Handle, _Handle )
_dll.aruco_marker_is_valid.argtypes = ( _Handle, )
_dll.aruco_marker_id.argtypes = ( _Handle, )
_dll.aruco_marker_calculate_extrinsics.restype = _Status
_dll.aruco_marker_calculate_extrinsics.argtypes = ( _Handle, ct.c_float, _Handle )
_dll.aruco_marker_draw.argtypes = (
_Handle, _ImagePtr,
ct.c_float, ct.c_float, ct.c_float, ct.c_int, ct.c_int
)
_dll.aruco_marker_centroid_x.argtypes = (_Handle, )
_dll.aruco_marker_centroid_y.argtypes = (_Handle, )
_dll.aruco_marker_draw_3d_axis.argtypes = ( _Handle, _ImagePtr, _Handle )
_dll.aruco_marker_draw_3d_cube.argtypes = ( _Handle, _ImagePtr, _Handle )
_dll.aruco_detect_markers.restype = _Status
_dll.aruco_detect_markers.argtypes = ( _ImagePtr, _Handle )
_dll.aruco_detect_markers_full.restype = _Status
_dll.aruco_detect_markers_full.argtypes = ( _ImagePtr, _Handle, _Handle, ct.c_float )
_dll.aruco_marker_vector_new.restype = _Handle
_dll.aruco_marker_vector_free.argtypes = ( _Handle, )
_dll.aruco_marker_vector_clear.argtypes = ( _Handle, )
_dll.aruco_marker_vector_size.argtypes = ( _Handle, )
_dll.aruco_marker_vector_element.restype = _Handle
_dll.aruco_marker_vector_element.argtypes = ( _Handle, _Size )
_dll.aruco_marker_vector_push_back.argtypes = ( _Handle, _Handle )
class ArucoError(Exception):
"""An exception which wraps an error returned from the aruco library.
*msg* is a description of the error.
*code* is an integer code for the error.
"""
def __init__(self, msg, code=-1):
self.code = code
self.msg = msg
def __str__(self):
if self.code == -1:
return str(self.msg)
return "%s: %s" % (self.code, self.msg)
# Internal classes
class _HandleWrapper(object):
"""A wrapper around an aruco handle. Set the class attributes ``_new``,
``_free`` and (optionally) ``_copy`` in a derived class.
"""
def __init__(self):
assert _dll is not None
self.handle = self.__class__._new()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def copy_from(self, other_handle):
if hasattr(self.__class__, '_copy'):
self.__class__._copy(self.handle, other_handle)
else:
raise NotImplementedError('Copy not implemented')
def close(self):
if self.handle is None:
return
self.__class__._free(self.handle)
self.handle = None
def _to_image(image, allow_read_only=True):
"""Converts an object whcih exports the array interface to one which can be
passed as an image pointer to a caruco API function.
The array interface is defined at
http://docs.scipy.org/doc/numpy/reference/arrays.interface.html
"""
# Check the input image supports the array interface
if not hasattr(image, '__array_interface__'):
raise ValueError('Input must support the array interface: ' +
'http://docs.scipy.org/doc/numpy/reference/arrays.interface.html')
array = image.__array_interface__
if 'version' in array and array['version'] < 3:
raise ValueError('Input must support >= version 3 of the array interface.')
shape = array['shape']
typestr = array['typestr']
# Check array type and shape
if len(shape) != 3 or shape[2] != 3:
raise ValueError('Input must be an array with three colour channels, i.e. wxhx3 in shape.')
if typestr != '|u1':
raise ValueError('Input must be an array of bytes.')
# Check packing
if 'strides' in array and array['strides'] is not None:
strides = array['strides']
if strides != (shape[1]*shape[2],shape[2],1):
raise ValueError('Input must be packed.')
data_ptr = None
# Do we have a data tuple?
if 'data' in array and array['data'] is not None:
data = array['data']
if isinstance(data, tuple):
# Check for read-only images
if data[1] and not allow_read_only:
raise ValueError('Input must not be read-only.')
# Extract pointer
data_ptr = ct.pointer(ct.c_uint8.from_address(data[0]))
else:
# data object must export buffer inteface.
# An explicit check for string types.
if isinstance(data, str):
if not allow_read_only:
raise ValueError('Input must not be read-only. Strings are read-only.')
data_ptr = ct.cast(ct.c_char_p(data), ct.POINTER(ct.c_uint8))
else:
data_ptr = ct.pointer(ct.c_uint8.from_buffer(data))
else:
# If data entry is not present, use the buffer interface of the input
data_ptr = ct.pointer(ct.c_uint8.from_buffer(image))
assert data_ptr is not None
# Create an image structure
im = _Image()
#im.data = np.array(image, copy=False).ctypes.data_as(ct.POINTER(ct.c_uint8))
im.data = data_ptr
im.size.width = shape[0]
im.size.height = shape[1]
return ct.byref(im)
class _MarkerVector(_HandleWrapper):
_new = _dll.aruco_marker_vector_new
_free = _dll.aruco_marker_vector_free
def clear(self):
_dll.aruco_marker_vector_clear(self.handle)
def size(self):
return _dll.aruco_marker_vector_size(self.handle)
def contents(self):
contents = []
for idx in range(self.size()):
m = Marker()
m.copy_from(_dll.aruco_marker_vector_element(self.handle, idx))
contents.append(m)
return contents
def push_back(self, m):
if not isinstance(m, Marker):
raise ValueError('Expected instance of ardrone.aruco.Marker')
_dll.aruco_marker_vector_push_back(self.handle, m.handle)
# Public classes
class Board(_HandleWrapper):
"""This class encapsulates the orientation and position of a detected board.
"""
_new = _dll.aruco_board_new
_free = _dll.aruco_board_free
def draw_3d_axis(self, image, params):
"""Draw the 3d axis of this object into an image.
*image* is an image to draw into (see Marker.draw for this object's type).
*params* is an instance of CameraParameters.
"""
_dll.aruco_board_draw_3d_axis(self.handle, _to_image(image, allow_read_only=False), params.handle)
def draw_3d_cube(self, image, params):
"""Draw the 3d cube of this object into an image.
*image* is an image to draw into (see Marker.draw for this object's type).
*params* is an instance of CameraParameters.
"""
_dll.aruco_board_draw_3d_cube(self.handle, _to_image(image, allow_read_only=False), params.handle)
def get_extrinsics(self):
"""Return a pair describing the extrinsics of the board.
The first element is a triple giving the Rodrigues rotation.
The second element is a triple giving the translation vector for the board.
"""
r = (ct.c_float * 3)()
t = (ct.c_float * 3)()
_dll.aruco_board_get_extrinsics(self.handle, r, t)
return (tuple([float(x) for x in r]), tuple([float(x) for x in t]))
class BoardConfiguration(_HandleWrapper):
"""This class defines a board with several markers.
"""
_new = _dll.aruco_board_configuration_new
_free = _dll.aruco_board_configuration_free
_copy = _dll.aruco_board_configuration_copy_from
def save_to_file(self, path):
"""Save the board configuration to a file.
*path* is a filesystem path
Raises an ArucoError if there is a file I/O error.
"""
_dll.aruco_board_configuration_save_to_file(self.handle, path)
def read_from_file(self, path):
"""Read the board configuration from a file previously saved via
save_to_file.
*path* is a filesystem path
Raises an ArucoError if there is a file I/O error.
"""
_dll.aruco_board_configuration_read_from_file(self.handle, path)
def marker_ids(self):
"""Return a sequence of integer marker ids for this board."""
sz = _dll.aruco_board_configuration_marker_ids(self.handle, None)
ids = (ct.c_int * sz)()
_dll.aruco_board_configuration_marker_ids(self.handle, ids)
return ids[:]
class CameraParameters(_HandleWrapper):
"""Parameters of the camera.
"""
_new = _dll.aruco_camera_parameters_new
_free = _dll.aruco_camera_parameters_free
_copy = _dll.aruco_camera_parameters_copy_from
def is_valid(self):
"""Return True iff the parameters are valid."""
return _dll.aruco_camera_parameters_is_valid(self.handle) == _ARUCO_TRUE
def save_to_file(self, path):
"""Save the camera parameters to a file.
*path* is a filesystem path
Raises an ArucoError if there is a file I/O error.
"""
_dll.aruco_camera_parameters_save_to_file(self.handle, path)
def read_from_file(self, path):
"""Read the camera parameters from a file previously saved via
save_to_file.
*path* is a filesystem path
Raises an ArucoError if there is a file I/O error.
"""
_dll.aruco_camera_parameters_read_from_file(self.handle, path)
def read_from_xml_file(self, path):
"""Read the camera parameters from an XML or YAML file as generated by
OpenCV's calibration program.
*path* is a filesystem path
Raises an ArucoError if there is a file I/O error.
"""
_dll.aruco_camera_parameters_read_from_xml_file(self.handle, path)
def resize(self, size):
"""Adjust the parameters to the size of the image indicated.
*size* is a pair specifying the width and height of the image in pixels.
"""
sz = _Size()
sz.width, sz.height = size
_dll.aruco_camera_parameters_resize(self.handle, ct.byref(sz))
def get_camera_matrix(self):
m = (ct.c_float * 9)()
_dll.aruco_camera_parameters_get_camera_matrix(self.handle, m)
return (
tuple([float(x) for x in m[0:3]]),
tuple([float(x) for x in m[3:6]]),
tuple([float(x) for x in m[6:9]]) )
def get_distortion_coeffs(self):
m = (ct.c_float * 4)()
_dll.aruco_camera_parameters_get_distortion_coeffs(self.handle, m)
return tuple([float(x) for x in m])
class Marker(_HandleWrapper):
"""This class represents a marker.
"""
_new = _dll.aruco_marker_new
_free = _dll.aruco_marker_free
_copy = _dll.aruco_marker_copy_from
def is_valid(self):
"""Return True iff the marker is valid."""
return _dll.aruco_marker_is_valid(self.handle) == _ARUCO_TRUE
def id(self):
"""Return an integer id for the marker."""
return _dll.aruco_marker_id(self.handle)
def centroid_x(self):
"""Return a cv::Point with location of centroid of marker"""
return _dll.aruco_marker_centroid_x(self.handle)
def centroid_y(self):
"""Return a cv::Point with location of centroid of marker"""
return _dll.aruco_marker_centroid_y(self.handle)
def draw(self, image,
color = tuple((1.0, 0.0, 0.0)),
line_width = 1, write_id = True):
"""Draw the marker into an image.
*image* is an object which supports the array interface. It must be width x
height x 3 in shape and have a datatype of ``u1`` (i.e. a byte).
*color* is a red, green, blue triplet where each element is on the interfal
[0,1].
*line_width* is the width of the line used to draw the marker in the image.
*write_id* is a flag indicating whether the integer marker id should be
drawn into the image.
"""
_dll.aruco_marker_draw(self.handle, _to_image(image, allow_read_only=False),
color[0], color[1], color[2], line_width,
_ARUCO_TRUE if write_id else _ARUCO_FALSE)
def draw_3d_axis(self, image, params):
"""Draw the 3d axis of this object into an image.
*image* is an image to draw into (see Marker.draw for this object's type).
*params* is an instance of CameraParameters.
"""
_dll.aruco_marker_draw_3d_axis(self.handle, _to_image(image, allow_read_only=False), params.handle)
def draw_3d_cube(self, image, params):
"""Draw the 3d cube of this object into an image.
*image* is an image to draw into (see Marker.draw for this object's type).
*params* is an instance of CameraParameters.
"""
_dll.aruco_marker_draw_3d_cube(self.handle, _to_image(image, allow_read_only=False), params.handle)
def detect_board(markers, configuration, params, marker_size):
"""Detects a board given some markers.
*markers* is a sequence of markers as returned from the MarkerDetector.
*configuration* is a BoardConfiguration for the board.
*params* is an instance of CameraParameters which must have been
initialised to the camera intrinsics.
*marker_size* is the size of the marker images in metres.
Returns *board, lik*, an instance of the Board class describing the detected
board and a float giving a measure of it's likelihood of being in the image.
"""
mv = _MarkerVector()
[mv.push_back(m) for m in markers]
b = Board()
lik = ct.c_float(0)
_dll.aruco_detect_board(mv.handle, configuration.handle, b.handle, params.handle, marker_size, ct.byref(lik))
return (b, lik.value)
def detect_markers(image, params=None, marker_size=None):
"""Detects the markers in the image passed.
If you provide information about the camera parameters and the size of the
marker, then, the extrinsics of the markers are detected.
If one of *params* or *marker_size* is not None and the other is None, an
ArucoError is raised.
*params* is an instance of CameraParameters which must have been
initialised to the camera intrinsics.
*marker_size* is the size of the marker images in metres.
Returns a sequence of Marker objects, one for each detected marker.
"""
v = _MarkerVector()
if (params is None) and (marker_size is None):
_dll.aruco_detect_markers(_to_image(image), v.handle)
elif (params is not None) and (marker_size is not None):
_dll.aruco_detect_markers_full(_to_image(image), v.handle,
params.handle, marker_size)
else:
raise ArucoError('Both params and marker_size must be None or ' +
'both must not be None.')
return v.contents()
| {
"content_hash": "6335f5075c092878812576e289a90391",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 120,
"avg_line_length": 32.359767891682786,
"alnum_prop": 0.6805738194859534,
"repo_name": "richstoner/pyaruco",
"id": "b915beb0802ad03c3849aa4851d9db6a5c4f333d",
"size": "16730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyaruco.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8451"
},
{
"name": "C++",
"bytes": "139195"
},
{
"name": "Python",
"bytes": "18398"
}
],
"symlink_target": ""
} |
from jinja2 import Environment, FunctionLoader, ChoiceLoader, FileSystemLoader
from itertools import chain
from collections import OrderedDict
class TemplateError(Exception):
def __init__(self, message):
self.message = message
class _TemplateFunc:
def __init__(self, *params):
self.ids = params
self.values = []
def _getParamDict(self, *args):
return OrderedDict((v,args[k]) for k,v in enumerate(self.ids))
def addValues(self, *args):
if len(args) != len(self.ids):
raise TemplateError("Wrong number of template arguments")
self.values.append(args)
return self._getParamDict(*args)
def getParamDictList(self):
return list(self._getParamDict(*val) for val in self.values)
def __str__(self):
return "ids:" + str(self.ids) + "\nvalues:" + str(self.values)
def _tfrealID(name, paramdic):
return name + "_" + "_".join(chain(*zip(paramdic.keys(), paramdic.values())))
class _TemplateFuncDict:
def __init__(self):
self.d = {}
def define(self, name, *params):
if name in self.d:
raise TemplateError("Redefinition of template function \"" + name + "\"")
self.d[name] = _TemplateFunc(*params)
def call(self, name, *values):
if name not in self.d:
raise TemplateError(name + " is not defined")
paramdic = self.d[name].addValues(*values)
return _tfrealID(name, paramdic)
def get(self, name):
return self.d[name].getParamDictList()
def _load_internal_template_module(name):
if name == "tmplFunc.tmpl":
return """\
{% macro defTmplFunc(name) %}
{% if GLSLJinja_IsInstanciate %}
{% for d in _tfget(name) %}
`caller(_tfrealID(name, d), *d.values())`
{% endfor %}
{% else %}
`_tfdef(name, *varargs)`
{% endif %}
{% endmacro %}
"""
else:
return None
class _PreprocessChoiceLoader(ChoiceLoader):
def __init__(self, loaders):
super().__init__(loaders)
def get_source(self, environment, template):
ret = super().get_source(environment, template)
text = ret[0]
lines = text.split("\n")
lineNo = ["#line %d" % i for i in range(2, len(lines)+2)]
#Last line under last "#line %d" must not empty or boost wave crash.
newText = "\n".join(chain(*zip(lines, lineNo))) + "\n\n"
return (newText,) + (ret[1:])
def load(self, environment, name, globals=None):
return super(ChoiceLoader, self).load(environment, name, globals)
class _GLSLJinaTempl:
def __init__(self, templ):
self.templ = templ
def render(self, *args, **kwargs):
tmplDic = _TemplateFuncDict()
d = dict(
_tfdef = tmplDic.define,
tfcall = tmplDic.call,
_tfget = tmplDic.get,
_tfrealID = _tfrealID)
self.templ.render(GLSLJinja_IsInstanciate = False, *args, **dict(d, **kwargs))
return self.templ.render(GLSLJinja_IsInstanciate = True, *args, **dict(d, **kwargs))
class GLSLJinjaLoader:
def __init__(self, searchpath=""):
self.env = Environment(
loader=_PreprocessChoiceLoader([
FunctionLoader(_load_internal_template_module),
FileSystemLoader(searchpath)]),
variable_start_string="`", variable_end_string="`")
def get_template(self, filename):
return _GLSLJinaTempl(self.env.get_template(filename))
def get_includable_template_from_string(self, source):
return self.env.from_string(source)
if __name__ == "__main__":
def _tftest():
tmplDic = _TemplateFuncDict()
tmplDic.define("testFunc", "T", "U")
tmplDic.define("testFunc2", "X", "Y", "Z")
tmplDic.call("testFunc", "int", "vec3")
tmplDic.call("testFunc", "float", "sometype")
for k,v in tmplDic.d.items():
print(k)
print(v)
d = v.getParamDictList()
for j in d:
print(j)
_tftest()
env = GLSLJinjaLoader()
tmpl = env.get_template("testjinja2.tmpl")
print(tmpl.render(delta="0.01", func="distanceFieldSphere"))
| {
"content_hash": "0b1112bd7d38a6b966f00b328f3c2560",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 92,
"avg_line_length": 34.046875,
"alnum_prop": 0.5686094538779256,
"repo_name": "demotomohiro/Reflection-Refraction-less-Ronpa-Raytracing-Renderer",
"id": "e54d18bce2130cb6d6e627dc9b864487c86c1bdc",
"size": "4358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anim_script/GLSLJinja.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "14209"
},
{
"name": "Batchfile",
"bytes": "314"
},
{
"name": "C++",
"bytes": "50262"
},
{
"name": "CMake",
"bytes": "5019"
},
{
"name": "GLSL",
"bytes": "19273"
},
{
"name": "Python",
"bytes": "21372"
}
],
"symlink_target": ""
} |
"""
github3.repos.contents
======================
This module contains the Contents object pertaining to READMEs and other files
that can be accessed via the GitHub API.
"""
from __future__ import unicode_literals
from json import dumps
from base64 import b64decode, b64encode
from ..git import Commit
from ..models import GitHubCore
from ..decorators import requires_auth
class Contents(GitHubCore):
"""The :class:`Contents <Contents>` object. It holds the information
concerning any content in a repository requested via the API.
Two content instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.sha == c2.sha
c1.sha != c2.sha
See also: http://developer.github.com/v3/repos/contents/
"""
def _update_attributes(self, content):
# links
self._api = content.get('url')
#: Dictionary of links
self.links = content.get('_links')
#: URL of the README on github.com
self.html_url = content.get('html_url')
#: URL for the git api pertaining to the README
self.git_url = content.get('git_url')
#: git:// URL of the content if it is a submodule
self.submodule_git_url = content.get('submodule_git_url')
# should always be 'base64'
#: Returns encoding used on the content.
self.encoding = content.get('encoding', '')
# content, base64 encoded and decoded
#: Base64-encoded content of the file.
self.content = content.get('content')
#: Decoded content of the file as a bytes object. If we try to decode
#: to character set for you, we might encounter an exception which
#: will prevent the object from being created. On python2 this is the
#: same as a string, but on python3 you should call the decode method
#: with the character set you wish to use, e.g.,
#: ``content.decoded.decode('utf-8')``.
#: .. versionchanged:: 0.5.2
self.decoded = self.content
if self.encoding == 'base64' and self.content:
self.decoded = b64decode(self.content.encode())
# file name, path, and size
#: Name of the content.
self.name = content.get('name', '')
#: Path to the content.
self.path = content.get('path', '')
#: Size of the content
self.size = content.get('size', 0)
#: SHA string.
self.sha = content.get('sha', '')
#: Type of content. ('file', 'symlink', 'submodule')
self.type = content.get('type', '')
#: Target will only be set of type is a symlink. This is what the link
#: points to
self.target = content.get('target', '')
self._uniq = self.sha
def _repr(self):
return '<Content [{0}]>'.format(self.path)
def __eq__(self, other):
return self.decoded == other
def __ne__(self, other):
return self.sha != other
@requires_auth
def delete(self, message, branch=None, committer=None, author=None):
"""Delete this file.
:param str message: (required), commit message to describe the removal
:param str branch: (optional), branch where the file exists.
Defaults to the default branch of the repository.
:param dict committer: (optional), if no information is given the
authenticated user's information will be used. You must specify
both a name and email.
:param dict author: (optional), if omitted this will be filled in with
committer information. If passed, you must specify both a name and
email.
:returns: dictionary of new content and associated commit
:rtype: :class:`~github3.repos.contents.Contents` and
:class:`~github3.git.Commit`
"""
json = {}
if message:
data = {'message': message, 'sha': self.sha, 'branch': branch,
'committer': validate_commmitter(committer),
'author': validate_commmitter(author)}
self._remove_none(data)
json = self._json(self._delete(self._api, data=dumps(data)), 200)
if json and 'commit' in json:
json['commit'] = Commit(json['commit'], self)
if json and 'content' in json:
json['content'] = self._instance_or_null(Contents,
json['content'])
return json
@requires_auth
def update(self, message, content, branch=None, committer=None,
author=None):
"""Update this file.
:param str message: (required), commit message to describe the update
:param str content: (required), content to update the file with
:param str branch: (optional), branch where the file exists.
Defaults to the default branch of the repository.
:param dict committer: (optional), if no information is given the
authenticated user's information will be used. You must specify
both a name and email.
:param dict author: (optional), if omitted this will be filled in with
committer information. If passed, you must specify both a name and
email.
:returns: dictionary containing the updated contents object and the
commit in which it was changed.
:rtype: dictionary of :class:`~github3.repos.contents.Contents` and
:class:`~github3.git.Commit`
"""
if content and not isinstance(content, bytes):
raise ValueError( # (No coverage)
'content must be a bytes object') # (No coverage)
json = None
if message and content:
content = b64encode(content).decode('utf-8')
data = {'message': message, 'content': content, 'branch': branch,
'sha': self.sha,
'committer': validate_commmitter(committer),
'author': validate_commmitter(author)}
self._remove_none(data)
json = self._json(self._put(self._api, data=dumps(data)), 200)
if json and 'content' in json:
self._update_attributes(json['content'])
json['content'] = self
if json and 'commit' in json:
json['commit'] = Commit(json['commit'], self)
return json
def validate_commmitter(d):
if d and d.get('name') and d.get('email'):
return d
return None
| {
"content_hash": "8761e4e9ff5fdf38bc8c13fa92829357",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 38.72781065088758,
"alnum_prop": 0.5911382734912146,
"repo_name": "christophelec/github3.py",
"id": "e143d34753dfcdd17ad86a30e816df5f54b14443",
"size": "6569",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "github3/repos/contents.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "97717"
},
{
"name": "Makefile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "693579"
}
],
"symlink_target": ""
} |
import sys
import os
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)
import argparse
import astropy
import astropy.units as units
from astropy.coordinates import SkyCoord
import numpy
import numpy.linalg
from arl.test_support import import_visibility_from_oskar, export_visibility_to_fits
from arl.visibility_operations import *
import gc
# Parse arguments
parser = argparse.ArgumentParser(description='Collect and coalesce visibilities for a baseline from OSKAR files.')
parser.add_argument('input', metavar='files', type=argparse.FileType('r'),
nargs='*',
help='input files')
parser.add_argument('--out', dest='pre', metavar='pre', type=str, required=True,
help='output prefix')
args = parser.parse_args()
# Loop through files
for i, inp in enumerate(args.input):
# Make sure we are going into this with a clean slate
gc.collect()
# Read. First one will already be loaded
print("Reading", inp.name, "...")
vis = import_visibility_from_oskar(inp.name)
gc.collect()
# Loop through visibilities
print("Grouping...")
data_by_antenna = vis.data.group_by(['antenna1', 'antenna2'])
gc.collect()
# Loop through baselines
print("Collecting...", end="", flush=True)
last_a1 = -1
for j, key in enumerate(data_by_antenna.groups.keys):
# Interested in this baseline?
a1 = key['antenna1']
a2 = key['antenna2']
if a1 != last_a1:
print(" %d" % a1, end="", flush=True)
last_a1 = a1
# Concatenate visibilities
v = data_by_antenna.groups[j]['vis']
with open(args.pre + "%d-%d.bin" % (a1, a2), "ab") as f:
f.write(v.tobytes())
print(" done")
| {
"content_hash": "a5b38b1e666ad4e0e55ce0da74b795d2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 114,
"avg_line_length": 30.083333333333332,
"alnum_prop": 0.6393351800554017,
"repo_name": "SKA-ScienceDataProcessor/crocodile",
"id": "6b0bbee06e3d7c36be1493402ee0a813c8a008e9",
"size": "1825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/bin_dataset.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161449"
}
],
"symlink_target": ""
} |
import MySQLdb
import string
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import ConfigParser
def get_item(data_dict,item):
try:
item_value = data_dict[item]
return item_value
except:
pass
def get_parameters(conn):
try:
curs=conn.cursor()
data=curs.execute('select name,value from v$parameter');
data_list=curs.fetchall()
parameters={}
for item in data_list:
parameters[item[0]] = item[1]
return parameters
except Exception,e:
print e
finally:
curs.close()
def get_sysstat(conn):
try:
curs=conn.cursor()
data=curs.execute('select name,value value from v$sysstat');
data_list=curs.fetchall()
sysstat={}
for item in data_list:
sysstat[item[0]] = item[1]
return sysstat
except Exception,e:
print e
finally:
curs.close()
def get_instance(conn,field):
try:
curs=conn.cursor()
curs.execute("select %s from v$instance" %(field) );
result = curs.fetchone()[0]
return result
except Exception,e:
result = ''
print e
finally:
curs.close()
def get_database(conn,field):
try:
curs=conn.cursor()
curs.execute("select %s from v$database" %(field) );
result = curs.fetchone()[0]
return result
except Exception,e:
result = ''
print e
finally:
curs.close()
def get_version(conn):
try:
curs=conn.cursor()
curs.execute("select product,version from product_component_version where product like '%Database%'");
result = curs.fetchone()[1]
return result
except Exception,e:
print e
finally:
curs.close()
def get_current_snap_id(conn, inst_id):
try:
curs=conn.cursor()
curs.execute("select max(snap_id) from wrm$_snapshot where instance_number = %s" %(inst_id));
result = curs.fetchone()[0]
return result
except Exception,e:
print e
finally:
curs.close()
def get_end_interval_time(conn, inst_id):
try:
curs=conn.cursor()
curs.execute("""select to_char(t.end_interval_time, 'yyyy-mm-dd hh24:mi:ss') from wrm$_snapshot t
where t.snap_id in (select max(snap_id) from wrm$_snapshot)
and t.instance_number = %s """ %(inst_id));
result = curs.fetchone()[0]
return result
except Exception,e:
print e
finally:
curs.close()
def get_sessions(conn):
try:
curs=conn.cursor()
curs.execute("select count(*) from v$session");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_actives(conn):
try:
curs=conn.cursor()
curs.execute("select count(*) from v$session where STATUS='ACTIVE'");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_waits(conn):
try:
curs=conn.cursor()
curs.execute("select count(*) from v$session where event like 'library%' or event like 'cursor%' or event like 'latch%' or event like 'enq%' or event like 'log file%'");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_dg_stats(conn):
try:
curs=conn.cursor()
curs.execute("SELECT substr((SUBSTR(VALUE,5)),0,2)*3600 + substr((SUBSTR(VALUE,5)),4,2)*60 + substr((SUBSTR(VALUE,5)),7,2) AS seconds,VALUE FROM v$dataguard_stats a WHERE NAME ='apply lag'");
list = curs.fetchone()
if list:
result = 1
else:
result = 0
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_dg_delay(conn):
try:
curs=conn.cursor()
curs.execute("SELECT substr((SUBSTR(VALUE,5)),0,2)*3600 + substr((SUBSTR(VALUE,5)),4,2)*60 + substr((SUBSTR(VALUE,5)),7,2) AS seconds,VALUE FROM v$dataguard_stats a WHERE NAME ='apply lag'");
list = curs.fetchone()
if list:
result = list[0]
else:
result = '---'
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_sysdate(conn):
try:
curs=conn.cursor()
curs.execute("select to_char(sysdate, 'yyyymmddhh24miss') from dual");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_dg_p_info(conn, dest_id):
try:
curs=conn.cursor()
curs.execute("""select *
from (select dest_id,
thread#,
sequence#+1,
archived,
applied,
current_scn,
to_char(scn_to_timestamp(current_scn), 'yyyy-mm-dd hh24:mi:ss') curr_db_time,
row_number() over(partition by thread# order by sequence# desc) rn
from v$archived_log t, v$database d
where t.dest_id = %s)
where rn = 1 """ %(dest_id));
result = curs.fetchall()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_p_info_2(conn, dest_id):
try:
curs=conn.cursor()
curs.execute("""select *
from (select t.dest_id,
transmit_mode,
thread#,
sequence#+1,
archived,
applied,
current_scn,
to_char(scn_to_timestamp(current_scn), 'yyyy-mm-dd hh24:mi:ss') curr_db_time,
row_number() over(partition by thread# order by sequence# desc) rn
from v$archived_log t, v$archive_dest a, v$database d
where t.dest_id = a.dest_id
and t.dest_id = %s)
where rn = 1 """ %(dest_id));
result = curs.fetchall()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_log_archived_delay(conn, dest_id, thread_id):
try:
result = 0
curs=conn.cursor()
curs.execute("""select count(1) from v$archived_log where dest_id = %s and thread# = %s and archived= 'NO' group by dest_id """ %(dest_id, thread_id));
list = curs.fetchone()
if list:
result = list[0]
else:
result = 0
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_log_applied_delay(conn, dest_id, thread_id):
try:
result = 0
curs=conn.cursor()
curs.execute("""select count(1) from v$archived_log where dest_id = %s and thread# = %s and applied= 'NO' group by dest_id """ %(dest_id, thread_id));
list = curs.fetchone()
if list:
result = list[0]
else:
result = 0
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_redo_per_hour(conn):
try:
curs=conn.cursor()
curs.execute("""select to_char(first_time, 'yyyy-mm-dd hh24')||':00' key_time,
trunc(sum(blocks * block_size) / 1024 / 1024) redo_p_h
from v$archived_log
where to_char(first_time, 'yyyymmddhh24') = to_char(sysdate, 'yyyymmddhh24')
and standby_dest = 'NO'
group by to_char(first_time, 'yyyy-mm-dd hh24') """);
result = curs.fetchone()
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_db_time(conn, snap_id, inst_id):
try:
curs=conn.cursor()
curs.execute("""select snap_id, end_time, dbtime, elapsed, round(dbtime/elapsed, 2) as rate from (
select n.stat_name as name,
e.snap_id,
to_char(te.end_interval_time,'yyyy-mm-dd hh24:mi:ss') as end_time,
round((case when (e.value - b.value) > 0 then e.value - b.value else e.value end) / 1000 / 1000, 2) as dbtime,
(to_date(to_char(te.end_interval_time,'yyyy-mm-dd hh24:mi:ss'),'yyyy-mm-dd hh24:mi:ss') -
to_date(to_char(tb.end_interval_time,'yyyy-mm-dd hh24:mi:ss'),'yyyy-mm-dd hh24:mi:ss'))*86400 as elapsed
from wrh$_sys_time_model e, wrh$_sys_time_model b, wrh$_stat_name n, wrm$_snapshot tb, wrm$_snapshot te
where e.stat_id = n.stat_id
and b.stat_id = n.stat_id
and b.snap_id = e.snap_id - 1
and e.snap_id = %s
and e.snap_id = te.snap_id and e.instance_number = te.instance_number
and b.snap_id = tb.snap_id and b.instance_number = tb.instance_number
and e.instance_number=b.instance_number
and e.instance_number=%s
and n.stat_name = 'DB time') tmp """ %(snap_id, inst_id));
result = curs.fetchone()
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_dg_s_ms(conn):
try:
curs=conn.cursor()
curs.execute("""select ms.thread#,
ms.sequence#,
ms.block#,
ms.delay_mins
from v$managed_standby ms
where ms.process in ('MRP0')
and ms.sequence# <> 0 """);
result = curs.fetchone()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_s_al(conn, scn):
try:
curs=conn.cursor()
curs.execute(""" select thread#,sequence# from v$archived_log where first_change#<%s and next_change#>=%s """ %(scn,scn));
result = curs.fetchone()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_s_rate(conn):
try:
curs=conn.cursor()
curs.execute("""select *
from (select rp.sofar avg_apply_rate
from v$recovery_progress rp
where rp.item = 'Average Apply Rate'
order by start_time desc)
where rownum < 2 """);
result = curs.fetchone()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_s_mrp(conn):
try:
curs=conn.cursor()
curs.execute("""select status from gv$session where program like '%(MRP0)' """);
list = curs.fetchone()
if list:
result = 1
else:
result = 0
return result
except Exception,e:
return 0
print e
finally:
curs.close()
def get_standby_redo_count(conn):
try:
curs=conn.cursor()
curs.execute("""select count(1) from v$standby_log """);
result = curs.fetchone()[0]
return result
except Exception,e:
return 0
print e
finally:
curs.close()
def get_time_by_scn(conn, scn):
try:
result=None
curs=conn.cursor()
curs.execute("""select to_char(scn_to_timestamp(%s), 'yyyy-mm-dd hh24:mi:ss') curr_db_time from v$database """ %(scn));
res = curs.fetchone()
if res:
result = res[0]
else:
result = None
return result
except Exception,e:
#print e
return None
finally:
curs.close()
def get_time_from_restorepoint(conn, scn):
try:
result=None
curs=conn.cursor()
curs.execute("""select to_char(time, 'yyyy-mm-dd hh24:mi:ss') curr_db_time from v$restore_point where scn = %s """ %(scn));
res = curs.fetchone()
if res:
result = res[0]
else:
result = None
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_pri_id_by_server(conn, id):
try:
result=None
curs=conn.cursor()
curs.execute("""select CASE is_switch
WHEN 0 THEN standby_db_id
ELSE primary_db_id
END as sta_id
from db_cfg_oracle_dg
where primary_db_id = %s or standby_db_id = %s """ %(id, id));
res = curs.fetchone()
if res:
result = res[0]
else:
result = None
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_earliest_fbscn(conn):
try:
curs=conn.cursor()
curs.execute("""select min(scn) from v$restore_point """);
result = curs.fetchone()[0]
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_earliest_fbtime(conn,flashback_retention):
try:
curs=conn.cursor()
curs.execute("""select to_char(min(time) + 1/48, 'yyyy-mm-dd hh24:mi:ss') mintime from v$restore_point where time > sysdate -%s/24/60 """ %(flashback_retention));
mintime = curs.fetchone()
result = 'null'
if mintime[0]:
result = mintime[0]
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_last_fbtime(conn):
try:
curs=conn.cursor()
curs.execute("""select to_char(max(time), 'yyyymmddhh24miss') maxtime from v$restore_point """);
lasttime = curs.fetchone()
result = 'null'
if lasttime[0]:
result = lasttime[0]
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_flashback_space_used(conn):
try:
curs=conn.cursor()
curs.execute("""select sum(percent_space_used) from v$flash_recovery_area_usage """);
fb_space = curs.fetchone()
result = 0
if fb_space:
result = fb_space[0]
if result == '' or result is None:
result = 0
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_restorepoint(conn, flashback_retention):
try:
curs=conn.cursor()
curs.execute("select name from v$restore_point where time > sysdate -%s/60/24 order by name desc " %(flashback_retention));
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_expire_restore_list(conn, flashback_retention):
try:
curs=conn.cursor()
curs.execute("select name from v$restore_point where time < sysdate - %s/60/24 " %(flashback_retention));
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_tablespace(conn):
try:
curs=conn.cursor()
curs.execute("""select tpsname,status,mgr,max_size,curr_size, max_used
from (SELECT d.tablespace_name tpsname,
d.status status,
d.segment_space_management mgr,
TO_CHAR(NVL(trunc(A.maxbytes / 1024 / 1024), 0), '99999990') max_size,
TO_CHAR(NVL(trunc(a.bytes / 1024 / 1024), 0), '99999990') curr_size,
TO_CHAR(NVL((a.bytes - NVL(f.bytes, 0)) / a.bytes * 100, 0),
'990D00') c_used,
TO_CHAR(NVL((a.bytes - NVL(f.bytes, 0)) / a.maxbytes * 100, 0),
'990D00') max_used
FROM sys.dba_tablespaces d,
(SELECT tablespace_name,
sum(bytes) bytes,
SUM(case autoextensible
when 'NO' then
BYTES
when 'YES' then
MAXBYTES
else
null
end) maxbytes
FROM dba_data_files
GROUP BY tablespace_name) a,
(SELECT tablespace_name,
SUM(bytes) bytes,
MAX(bytes) largest_free
FROM dba_free_space
GROUP BY tablespace_name) f
WHERE d.tablespace_name = a.tablespace_name
AND d.tablespace_name = f.tablespace_name(+))
order by max_used desc """);
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_diskgroup(conn):
try:
curs=conn.cursor()
curs.execute("""select name,
state,
type,
total_mb,
free_mb,
trunc(((total_mb - free_mb) / total_mb) * 100, 2) used_rate
from v$asm_diskgroup """);
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_tables(conn):
try:
curs=conn.cursor()
curs.execute("select owner, owner || '.' || table_name from dba_tables ");
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
| {
"content_hash": "8686269d9e5fec7f74861e6728710638",
"timestamp": "",
"source": "github",
"line_count": 717,
"max_line_length": 199,
"avg_line_length": 26.702928870292887,
"alnum_prop": 0.490232946829625,
"repo_name": "JK-Warriors/Heimdallr",
"id": "d6773bd3040831389dce08a7c83360bb63ad7f04",
"size": "19185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/include/wl_oracle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138793"
},
{
"name": "HTML",
"bytes": "88463"
},
{
"name": "JavaScript",
"bytes": "9581672"
},
{
"name": "PHP",
"bytes": "2937844"
},
{
"name": "Python",
"bytes": "602526"
},
{
"name": "Shell",
"bytes": "14641"
},
{
"name": "TSQL",
"bytes": "140135"
}
],
"symlink_target": ""
} |
from xmlrpclib import ServerProxy, Fault
from os.path import join, abspath, isfile
from SimpleXMLRPCServer import SimpleXMLRPCServer
from urlparse import urlparse
import sys
SimpleXMLRPCServer.allow_reuse_address = 1
MAX_HISTORY_LENGTH = 6
UNHANDLED = 100
ACCESS_DENIED = 200
class UnhandledQuery(Fault):
def __init__(self, message="Couldn't handle the query"):
Fault.__init__(self, UNHANDLED, message)
class AccessDenied(Fault):
def __init__(self, message="Access denied"):
Fault.__init__(self, ACCESS_DENIED, message)
def inside(dir, name):
dir = abspath(dir)
name = abspath(name)
return name.startswith(join(dir, ''))
def getPort(url):
name = urlparse(url)[1]
parts = name.split(':')
return int(parts[-1])
class Node:
def __init__(self, url, dirname, secret):
self.url = url
self.dirname = dirname
self.secret = secret
self.known = set()
def query(self, query, history=[]):
try:
return self._handle(query)
except UnhandledQuery:
history = history + [self.url]
if len(history) >= MAX_HISTORY_LENGTH:
raise
return self._broadcast(query, history)
def hello(self, other):
self.known.add(other)
return 0
def fetch(self, query, secret):
if secret != self.secret:
raise AccessDenied
result = self.query(query)
f = open(join(self.dirname, query), 'w')
f.write(result)
f.close()
return 0
def _start(self):
s = SimpleXMLRPCServer(("", getPort(self.url)), logRequests=False)
s.register_instance(self)
s.serve_forever()
def _handle(self, query):
dir = self.dirname
name = join(dir, query)
if not isfile(name):
raise UnhandledQuery
if not inside(dir, name):
raise AccessDenied
return open(name).read()
def _broadcase(self, query, history):
for other in self.known.copy():
if other in history:
continue
try:
s = ServerProxy(other)
return s.query(query, history)
except Fault, f:
if f.faultCode == UNHANDLED:
pass
else:
self.known.remove(other)
except:
self.known.remove(other)
raise UnhandledQuery
def main():
url, directory, secret = sys.argv[1:]
n = Node(url, directory, secret)
n._start()
if __name__=='__main__':
main()
| {
"content_hash": "e91846ed765591868f4f9b931cfd9a8f",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 74,
"avg_line_length": 25.50980392156863,
"alnum_prop": 0.568793235972329,
"repo_name": "gavinfish/Awesome-Python",
"id": "a4fc2ff796910dc238a29822353499b7a0f4afb5",
"size": "2602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beginning-python/project/file-share/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "LLVM",
"bytes": "56984"
},
{
"name": "Python",
"bytes": "67143"
}
],
"symlink_target": ""
} |
import pytest
import unittest
from modules.sfp_greensnow import sfp_greensnow
from sflib import SpiderFoot
@pytest.mark.usefixtures
class TestModuleGreensnow(unittest.TestCase):
def test_opts(self):
module = sfp_greensnow()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_greensnow()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_greensnow()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_greensnow()
self.assertIsInstance(module.producedEvents(), list)
| {
"content_hash": "e85ebc5313b1f4dbfd79d9252fe301ac",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 28.46153846153846,
"alnum_prop": 0.6972972972972973,
"repo_name": "smicallef/spiderfoot",
"id": "5a0bcacf82ba55002a8bd727f40b439324a0009c",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/modules/test_sfp_greensnow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
import oz.redis_sessions
import oz
import unittest
@oz.test
class CDNCoreTestCase(unittest.TestCase):
def test_password_hash(self):
hash = oz.redis_sessions.password_hash("bar", password_salt="foo")
self.assertEqual(hash, "sha256!c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")
def test_random_hex(self):
hexchars = set("0123456789abcdef")
s1 = oz.redis_sessions.random_hex(10)
self.assertEqual(len(s1), 10)
for c in s1:
self.assertTrue(c in hexchars, "%s not in %s" % (c, hexchars))
s2 = oz.redis_sessions.random_hex(10)
self.assertEqual(len(s2), 10)
for c in s2:
self.assertTrue(c in hexchars, "%s not in %s" % (c, hexchars))
self.assertNotEqual(s1, s2)
| {
"content_hash": "e85a42f4ffe456064af86cda567f0646",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 105,
"avg_line_length": 31.928571428571427,
"alnum_prop": 0.6621923937360179,
"repo_name": "dailymuse/oz",
"id": "f952216e653ae3f86a2a60f0c35f72cb0d4fbcc9",
"size": "894",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/redis_sessions/test_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9121"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "102713"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
class TripadPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "59d04b64939244c8dbe162954b04dddc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6847826086956522,
"repo_name": "acehanks/projects",
"id": "c487d9df196e6508d47e666ac4234efd3f76ac3b",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tripadvisor_scrapy/tripad/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1663666"
},
{
"name": "Jupyter Notebook",
"bytes": "747568"
},
{
"name": "Python",
"bytes": "10264"
}
],
"symlink_target": ""
} |
from stringmatching.base import Base
from collections import defaultdict
class LastOcc(Base):
def search(self, pattern, text, all=False):
start = 0
self.m = len(pattern)
self.n = len(text)
self.last_occ = self.__last_occurence(pattern, text)
if all:
results = []
limit = len(text) - len(pattern)
stop = False
while start <= limit and not stop:
result = self.__last_occ(pattern, text, start)
if result is not None:
results.append(result)
start = result + len(pattern)
else:
stop = True
return results
else:
return [ self.__last_occ(pattern, text, start) ]
def __last_occ(self, pat, text, start):
'''
Sliding window text search with last occurrence table
P. 41 in Algorithms on Strings, there the alg. is called Fast-Search
Args:
pat (str): pattern to search for
text (str): source in what the pattern should be searched
start (str): position in text where search should be started
'''
j = self.m - 1
while j < self.n:
i = start + j - self.m + 1
if text[i:start+j+1] == pat:
return i
j += self.last_occ[text[j]]
def __last_occurence(self, pat, text):
'''
Returns the last occurrence table
Args:
pat (str): pattern to search for
text (str): source in what the pattern should be searched
'''
last_occ = defaultdict(lambda: self.m) # saves initialization overhead by returning a default value when a non exisiting key is used
for k in range(0, self.m - 1):
last_occ[pat[k]] = self.m - 1 - k
return last_occ | {
"content_hash": "4ff9e57a0907cbd78f1e2baa9943339f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 140,
"avg_line_length": 35.509433962264154,
"alnum_prop": 0.5345377258235919,
"repo_name": "klingtnet/stringologie-ss14",
"id": "9049ef1ca4e0400181e7c4507ace90b3c80486d3",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stringmatching/last_occ.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29176"
},
{
"name": "Shell",
"bytes": "123"
}
],
"symlink_target": ""
} |
"""The tests for the MQTT switch platform."""
import copy
from unittest.mock import patch
import pytest
from homeassistant.components import switch
from homeassistant.components.mqtt.switch import MQTT_SWITCH_ATTRIBUTES_BLOCKED
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_DEVICE_CLASS,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_reloadable_late,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.switch import common
DEFAULT_CONFIG = {
switch.DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"}
}
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
"device_class": "switch",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_DEVICE_CLASS) == "switch"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "0")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "state-topic", "None")
state = hass.states.get("switch.test")
assert state.state == STATE_UNKNOWN
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending MQTT commands in optimistic mode."""
fake_state = ha.State("switch.test", "on")
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
):
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"qos": "2",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "switch.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "beer on", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("switch.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "switch.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", "beer off", 2, False
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_sending_inital_state_and_optimistic(hass, mqtt_mock):
"""Test the initial state in optimistic mode."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_controlling_state_via_topic_and_json_message(hass, mqtt_mock):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer on"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer off"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "state-topic", '{"val": null}')
state = hass.states.get("switch.test")
assert state.state == STATE_UNKNOWN
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
config = {
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
}
await help_test_default_availability_payload(
hass, mqtt_mock, switch.DOMAIN, config, True, "state-topic", "1"
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
config = {
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
}
await help_test_custom_availability_payload(
hass, mqtt_mock, switch.DOMAIN, config, True, "state-topic", "1"
)
async def test_custom_state_payload(hass, mqtt_mock):
"""Test the state payload."""
assert await async_setup_component(
hass,
switch.DOMAIN,
{
switch.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
"state_on": "HIGH",
"state_off": "LOW",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "HIGH")
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "LOW")
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG, MQTT_SWITCH_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one switch per unique_id."""
config = {
switch.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, switch.DOMAIN, config)
async def test_discovery_removal_switch(hass, mqtt_mock, caplog):
"""Test removal of discovered switch."""
data = (
'{ "name": "test",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_removal(hass, mqtt_mock, caplog, switch.DOMAIN, data)
async def test_discovery_update_switch_topic_template(hass, mqtt_mock, caplog):
"""Test update of discovered switch."""
config1 = copy.deepcopy(DEFAULT_CONFIG[switch.DOMAIN])
config2 = copy.deepcopy(DEFAULT_CONFIG[switch.DOMAIN])
config1["name"] = "Beer"
config2["name"] = "Milk"
config1["state_topic"] = "switch/state1"
config2["state_topic"] = "switch/state2"
config1["value_template"] = "{{ value_json.state1.state }}"
config2["value_template"] = "{{ value_json.state2.state }}"
state_data1 = [
([("switch/state1", '{"state1":{"state":"ON"}}')], "on", None),
]
state_data2 = [
([("switch/state2", '{"state2":{"state":"OFF"}}')], "off", None),
([("switch/state2", '{"state2":{"state":"ON"}}')], "on", None),
([("switch/state1", '{"state1":{"state":"OFF"}}')], "on", None),
([("switch/state1", '{"state2":{"state":"OFF"}}')], "on", None),
([("switch/state2", '{"state1":{"state":"OFF"}}')], "on", None),
([("switch/state2", '{"state2":{"state":"OFF"}}')], "off", None),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
switch.DOMAIN,
config1,
config2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_switch_template(hass, mqtt_mock, caplog):
"""Test update of discovered switch."""
config1 = copy.deepcopy(DEFAULT_CONFIG[switch.DOMAIN])
config2 = copy.deepcopy(DEFAULT_CONFIG[switch.DOMAIN])
config1["name"] = "Beer"
config2["name"] = "Milk"
config1["state_topic"] = "switch/state1"
config2["state_topic"] = "switch/state1"
config1["value_template"] = "{{ value_json.state1.state }}"
config2["value_template"] = "{{ value_json.state2.state }}"
state_data1 = [
([("switch/state1", '{"state1":{"state":"ON"}}')], "on", None),
]
state_data2 = [
([("switch/state1", '{"state2":{"state":"OFF"}}')], "off", None),
([("switch/state1", '{"state2":{"state":"ON"}}')], "on", None),
([("switch/state1", '{"state1":{"state":"OFF"}}')], "on", None),
([("switch/state1", '{"state2":{"state":"OFF"}}')], "off", None),
]
await help_test_discovery_update(
hass,
mqtt_mock,
caplog,
switch.DOMAIN,
config1,
config2,
state_data1=state_data1,
state_data2=state_data2,
)
async def test_discovery_update_unchanged_switch(hass, mqtt_mock, caplog):
"""Test update of discovered switch."""
data1 = (
'{ "name": "Beer",'
' "device_class": "switch",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
with patch(
"homeassistant.components.mqtt.switch.MqttSwitch.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, switch.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, switch.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT switch device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT switch device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, switch.DOMAIN, DEFAULT_CONFIG, switch.SERVICE_TURN_ON
)
@pytest.mark.parametrize(
"service,topic,parameters,payload,template",
[
(
switch.SERVICE_TURN_ON,
"command_topic",
None,
"ON",
None,
),
(
switch.SERVICE_TURN_OFF,
"command_topic",
None,
"OFF",
None,
),
],
)
async def test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
service,
topic,
parameters,
payload,
template,
):
"""Test publishing MQTT payload with different encoding."""
domain = switch.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
domain,
config,
service,
topic,
parameters,
payload,
template,
)
async def test_reloadable(hass, mqtt_mock, caplog, tmp_path):
"""Test reloading the MQTT platform."""
domain = switch.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config)
async def test_reloadable_late(hass, mqtt_client_mock, caplog, tmp_path):
"""Test reloading the MQTT platform with late entry setup."""
domain = switch.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable_late(hass, caplog, tmp_path, domain, config)
@pytest.mark.parametrize(
"topic,value,attribute,attribute_value",
[
("state_topic", "ON", None, "on"),
],
)
async def test_encoding_subscribable_topics(
hass, mqtt_mock, caplog, topic, value, attribute, attribute_value
):
"""Test handling of incoming encoded payload."""
await help_test_encoding_subscribable_topics(
hass,
mqtt_mock,
caplog,
switch.DOMAIN,
DEFAULT_CONFIG[switch.DOMAIN],
topic,
value,
attribute,
attribute_value,
)
| {
"content_hash": "8639b31050cb249face438514590e4c2",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 86,
"avg_line_length": 31.102040816326532,
"alnum_prop": 0.6042213473315835,
"repo_name": "rohitranjan1991/home-assistant",
"id": "a458ac03baa14dec9832cfdc218ceeb588612546",
"size": "18288",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
"""Libraries for building Stack Overflow next-word prediction models."""
import tensorflow as tf
class TransposableEmbedding(tf.keras.layers.Layer):
"""A Keras layer implementing a transposed projection output layer."""
def __init__(self, embedding_layer: tf.keras.layers.Embedding):
super().__init__()
self.embeddings = embedding_layer.embeddings
# Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
def call(self, inputs):
return tf.matmul(inputs, self.embeddings, transpose_b=True)
def create_recurrent_model(vocab_size: int,
embedding_size: int = 96,
num_lstm_layers: int = 1,
lstm_size: int = 670,
shared_embedding: bool = False) -> tf.keras.Model:
"""Constructs a recurrent model with an initial embeding layer.
The resulting model embeds sequences of integer tokens (whose values vary
between `0` and `vocab_size-1`) into an `embedding_size`-dimensional space.
It then applies `num_lstm_layers` LSTM layers, each of size `lstm_size`.
Each LSTM is followed by a dense layer mapping the output to `embedding_size`
units. The model then has a final dense layer mapping to `vocab_size` logits
units. Note that this model does not compute any kind of softmax on the final
logits. This should instead be done in the loss function for the purposes of
backpropagation.
Args:
vocab_size: Vocabulary size to use in the initial embedding layer.
embedding_size: The size of the embedding layer.
num_lstm_layers: The number of LSTM layers in the model.
lstm_size: The size of each LSTM layer.
shared_embedding: If set to `True`, the final layer of the model is a dense
layer given by the transposition of the embedding layer. If `False`, the
final dense layer is instead learned separately.
Returns:
An uncompiled `tf.keras.Model`.
"""
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer.')
if embedding_size < 1:
raise ValueError('embedding_size must be a positive integer.')
if num_lstm_layers < 1:
raise ValueError('num_lstm_layers must be a positive integer.')
if lstm_size < 1:
raise ValueError('lstm_size must be a positive integer.')
inputs = tf.keras.layers.Input(shape=(None,))
input_embedding = tf.keras.layers.Embedding(
input_dim=vocab_size, output_dim=embedding_size, mask_zero=True)
embedded = input_embedding(inputs)
projected = embedded
for _ in range(num_lstm_layers):
layer = tf.keras.layers.LSTM(lstm_size, return_sequences=True)
processed = layer(projected)
projected = tf.keras.layers.Dense(embedding_size)(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
logits = tf.keras.layers.Dense(vocab_size, activation=None)(projected)
return tf.keras.Model(inputs=inputs, outputs=logits)
| {
"content_hash": "9003a03ac11ee3863daed7a66cc4c45f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 41.945205479452056,
"alnum_prop": 0.705421293272371,
"repo_name": "tensorflow/federated",
"id": "acdc9bf0ca9100b18429c5373f03675db8dec0ec",
"size": "3639",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/simulation/baselines/stackoverflow/word_prediction_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
} |
def assert_nearly_equal(left, right, precision=7, message=None):
"""Asserts that left is equal to right up to precision digits"""
condition = round(abs(left - right), precision) == 0
if message is not None:
assert condition, message
else:
assert condition
| {
"content_hash": "49befc24ee355af70b56545da15ea2e9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 41.142857142857146,
"alnum_prop": 0.6701388888888888,
"repo_name": "Benjamin-Marks/mit-tab",
"id": "ecdb7d6b38eecd96c6aa3f11802979d900b808f4",
"size": "289",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mittab/libs/tests/assertion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17627"
},
{
"name": "HTML",
"bytes": "58720"
},
{
"name": "JavaScript",
"bytes": "13545"
},
{
"name": "Makefile",
"bytes": "364"
},
{
"name": "Python",
"bytes": "253153"
}
],
"symlink_target": ""
} |
__version__ = '17.11'
from .base import DAL
from .objects import Field
from .helpers.classes import SQLCustomType
from .helpers.methods import geoPoint, geoLine, geoPolygon
| {
"content_hash": "ce0490223e6ecea768ebc590a47d922d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 29,
"alnum_prop": 0.7816091954022989,
"repo_name": "xiang12835/python_web",
"id": "5964d36488a35f34c1ae2547042b0dbede755785",
"size": "174",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "py2_web2py/web2py/gluon/packages/dal/pydal/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3341"
},
{
"name": "Python",
"bytes": "17420"
}
],
"symlink_target": ""
} |
import unittest
from wsgid.core import parser
from wsgid.core.parser import CommandLineOption, BOOL, INT, STRING, LIST
import wsgid.conf
import sys
import signal
import platform
from mock import patch
from wsgid.commands import *
class ParserTest(unittest.TestCase):
def setUp(self):
wsgid.conf.settings = None
'''
Test if we correctly parse options added by sub-commands
--no-daemon is added by the config command
'''
def test_parse_aditional_options(self):
sys.argv[1:] = ['--no-debug', '--app-path=/tmp']
opts = parser.parse_options()
self.assertTrue(opts.no_debug)
def test_parse_aditional_options_py26(self):
with patch('platform.python_version'):
platform.python_version.return_value = '2.6'
# Call the parser
sys.argv[1:] = ['--no-debug', '--app-path=/tmp']
opts = parser.parse_options()
self.assertTrue(opts.no_debug)
'''
Tests that the default signal os 15 (SIGTERM)
'''
def test_default_signal(self):
sys.argv[1:] = ['--app-path=/tmp']
opts = parser.parse_options()
self.assertEquals(signal.SIGTERM, opts.send_signal)
def test_parse_workers_as_integer(self):
with patch('platform.python_version'):
platform.python_version.return_value = '2.7.1'
sys.argv[1:] = ['--workers=4']
opts = parser.parse_options()
self.assertEquals(4, opts.workers)
def test_parse_default_workers_as_integer(self):
with patch('platform.python_version'):
platform.python_version.return_value = '2.7.1'
sys.argv[1:] = []
opts = parser.parse_options()
self.assertEquals(1, opts.workers)
def test_parse_forced_django_app(self):
sys.argv[1:] = ['--app-path=/tmp', '--django']
opts = parser.parse_options()
self.assertTrue(wsgid.conf.settings.django)
def test_parse_forced_django_defaults_to_false(self):
sys.argv[1:] = ['--app-path=/tmp']
opts = parser.parse_options()
self.assertFalse(wsgid.conf.settings.django)
'''
Ensure we save the parsed options at wsgid.conf.settings
'''
def test_write_conf_settings(self):
sys.argv[1:] = ['--app-path=/tmp', '--debug', '--no-daemon']
self.assertTrue(wsgid.conf.settings is None)
opts = parser.parse_options()
self.assertTrue(wsgid.conf.settings is not None)
self.assertEquals('/tmp', wsgid.conf.settings.app_path)
self.assertTrue(wsgid.conf.settings.debug)
self.assertTrue(wsgid.conf.settings.no_daemon)
def test_no_parse_twice(self):
sys.argv[1:] = ['--app-path=/tmp', '--debug']
opts = parser.parse_options()
self.assertTrue(wsgid.conf.settings is not None)
self.assertEquals('/tmp', wsgid.conf.settings.app_path)
self.assertTrue(wsgid.conf.settings.debug)
sys.argv[1:] = ['--app-path=/tmp/2', '--debug', '--recv=tcp://127.0.0.1:9000']
parser.parse_options()
self.assertEquals(None, wsgid.conf.settings.recv)
self.assertEquals('/tmp', wsgid.conf.settings.app_path)
def test_app_path_defaults_to_pwd(self):
sys.argv[1:] = []
with patch('os.getcwd') as cwd:
cwd.return_value = '/my/path'
opts = parser.parse_options()
self.assertEquals('/my/path', opts.app_path)
class CommandLineOptionTest(unittest.TestCase):
def test_bool_default_true(self):
opt = CommandLineOption(name="debug", type = BOOL, dest = 'debug', default_value = True)
self.assertEquals('--debug', opt.name)
self.assertEquals('store_true', opt.action)
self.assertEquals('debug', opt.dest)
def test_bool_default_false(self):
opt = CommandLineOption(name="no-debug", type = BOOL, dest = 'nodebug', default_value = False)
self.assertEquals('--no-debug', opt.name)
self.assertEquals('store_false', opt.action)
self.assertEquals('nodebug', opt.dest)
def test_default_dest(self):
opt = CommandLineOption(name="no-debug", type = BOOL, default_value = False)
self.assertEquals('--no-debug', opt.name)
self.assertEquals('store_false', opt.action)
self.assertEquals('no_debug', opt.dest)
def test_default_action(self):
opt = CommandLineOption(name="no-debug", default_value = False)
self.assertEquals('store', opt.action)
def test_bool_correct_type(self):
opt = CommandLineOption(name="no-debug", type = BOOL, default_value = False)
self.assertEquals(bool, opt.type)
def test_int_correct_type(self):
opt = CommandLineOption(name="workers", type = INT)
self.assertEquals(int, opt.type)
| {
"content_hash": "b8ecf74ea9ec444f84a6e25bc867d535",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 98,
"avg_line_length": 33.01481481481481,
"alnum_prop": 0.672425398249944,
"repo_name": "daltonmatos/wsgid",
"id": "952f7a7312fd9e83d1a4d81e39bae89eeecb6d80",
"size": "4461",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/parser_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "150087"
},
{
"name": "Shell",
"bytes": "195"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
"""
L'application tournament affiche le dernier tournois actif (different de archived or comming)
dans cette page est affiche
* 2 onglets : equipe et resultats
* un bouton pour choisir un autre tournoi
"""
urlpatterns = [
url( r'^$', views.tournament_list, name='tournament_list' ),
url( r'^tournaments/$', views.tournament_list, name='tournament_list' ),
url( r'^tournaments/(?P<tournament_id>[0-9]+)/$', views.tournament_detail, name = "tournament_detail" ),
#rl( r'^tournaments/(?P<tournament_id>[0-9]+)/match_add/$', views.match_add, name='match_add' ),
# TODO url( r'^tournament/(?P<tournament_id>[0-9]+)/team_add/$', views.team_add_to, name='team_add_to' ),
url( r'^teams/$', views.team_list, name="team_list" ),
#rl( r'^teams/add/$', views.team_add, name='team_add' ),
#url( r'^team/team_add/$', views.team_add, name='team_add' ),
url( r'^teams/(?P<team_id>[0-9]+)/$', views.team_detail, name = "team_detail" ),
url( r'^matchs/$', views.match_list, name='match_list' ),
url( r'^matchs/(?P<match_id>[0-9]+)/$', views.match_detail, name = "match_detail" ),
]
| {
"content_hash": "c3c177e246516bed01c1aef7f17e9c18",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 109,
"avg_line_length": 50.56521739130435,
"alnum_prop": 0.6423043852106621,
"repo_name": "cedlerouge/betwit",
"id": "3941a0f85f64c9b2450a6010075c4cf2b3c0973b",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tournaments/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "179301"
},
{
"name": "Dockerfile",
"bytes": "140"
},
{
"name": "HTML",
"bytes": "129068"
},
{
"name": "JavaScript",
"bytes": "289518"
},
{
"name": "Python",
"bytes": "98253"
},
{
"name": "SCSS",
"bytes": "65612"
},
{
"name": "Shell",
"bytes": "2004"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import json
from datetime import datetime, timedelta
import six
from django.conf.urls import url, include
from django.core.exceptions import ImproperlyConfigured
from django.http import QueryDict
from django.utils import six
from django.test import TestCase, SimpleTestCase, override_settings
from haystack.query import SearchQuerySet
from rest_framework import serializers
from rest_framework.fields import CharField, IntegerField
from rest_framework.routers import DefaultRouter
from rest_framework.test import APIRequestFactory, APITestCase
from drf_haystack import fields
from drf_haystack.serializers import (
HighlighterMixin, HaystackSerializer,
HaystackSerializerMixin, HaystackFacetSerializer,
HaystackSerializerMeta)
from drf_haystack.viewsets import HaystackViewSet
from drf_haystack.mixins import MoreLikeThisMixin, FacetMixin
from .mixins import WarningTestCaseMixin
from .mockapp.models import MockPerson, MockAllField
from .mockapp.search_indexes import MockPersonIndex, MockPetIndex, MockAllFieldIndex
factory = APIRequestFactory()
# More like this stuff
class SearchPersonMLTSerializer(HaystackSerializer):
more_like_this = serializers.HyperlinkedIdentityField(view_name="search-person-mlt-more-like-this", read_only=True)
class Meta:
index_classes = [MockPersonIndex]
fields = ["firstname", "lastname", "full_name"]
class SearchPersonMLTViewSet(MoreLikeThisMixin, HaystackViewSet):
serializer_class = SearchPersonMLTSerializer
class Meta:
index_models = [MockPerson]
# Faceting stuff
class SearchPersonFSerializer(HaystackSerializer):
class Meta:
index_classes = [MockPersonIndex]
fields = ["firstname", "lastname", "full_name"]
class SearchPersonFacetSerializer(HaystackFacetSerializer):
serialize_objects = True
class Meta:
index_classes = [MockPersonIndex]
fields = ["firstname", "lastname", "created"]
field_options = {
"firstname": {},
"lastname": {},
"created": {
"start_date": datetime.now() - timedelta(days=10 * 365),
"end_date": datetime.now(),
"gap_by": "month",
"gap_amount": 1
}
}
class SearchPersonFacetViewSet(FacetMixin, HaystackViewSet):
serializer_class = SearchPersonFSerializer
facet_serializer_class = SearchPersonFacetSerializer
class Meta:
index_models = [MockPerson]
router = DefaultRouter()
router.register("search-person-mlt", viewset=SearchPersonMLTViewSet, base_name="search-person-mlt")
router.register("search-person-facet", viewset=SearchPersonFacetViewSet, base_name="search-person-facet")
urlpatterns = [
url(r"^", include(router.urls))
]
class HaystackSerializerTestCase(WarningTestCaseMixin, TestCase):
fixtures = ["mockperson", "mockpet"]
def setUp(self):
MockPersonIndex().reindex()
MockPetIndex().reindex()
class Serializer1(HaystackSerializer):
integer_field = serializers.IntegerField()
city = serializers.CharField()
class Meta:
index_classes = [MockPersonIndex]
fields = ["text", "firstname", "lastname", "autocomplete"]
def get_integer_field(self, obj):
return 1
def get_city(self, obj):
return "Declared overriding field"
class Serializer2(HaystackSerializer):
class Meta:
index_classes = [MockPersonIndex]
exclude = ["firstname"]
class Serializer3(HaystackSerializer):
class Meta:
index_classes = [MockPersonIndex]
fields = ["text", "firstname", "lastname", "autocomplete"]
ignore_fields = ["autocomplete"]
class Serializer7(HaystackSerializer):
class Meta:
index_classes = [MockPetIndex]
class ViewSet1(HaystackViewSet):
serializer_class = Serializer1
class Meta:
index_models = [MockPerson]
self.serializer1 = Serializer1
self.serializer2 = Serializer2
self.serializer3 = Serializer3
self.serializer7 = Serializer7
self.view1 = ViewSet1
def tearDown(self):
MockPersonIndex().clear()
def test_serializer_raise_without_meta_class(self):
try:
class Serializer(HaystackSerializer):
pass
self.fail("Did not fail when defining a Serializer without a Meta class")
except ImproperlyConfigured as e:
self.assertEqual(str(e), "%s must implement a Meta class or have the property _abstract" % "Serializer")
def test_serializer_gets_default_instance(self):
serializer = self.serializer1(instance=None)
self.assertIsInstance(serializer.instance, SearchQuerySet,
"Did not get default instance of type SearchQuerySet")
def test_serializer_get_fields(self):
obj = SearchQuerySet().filter(lastname="Foreman")[0]
serializer = self.serializer1(instance=obj)
fields = serializer.get_fields()
self.assertIsInstance(fields, dict)
self.assertIsInstance(fields["integer_field"], IntegerField)
self.assertIsInstance(fields["text"], CharField)
self.assertIsInstance(fields["firstname"], CharField)
self.assertIsInstance(fields["lastname"], CharField)
self.assertIsInstance(fields["autocomplete"], CharField)
def test_serializer_get_fields_with_exclude(self):
obj = SearchQuerySet().filter(lastname="Foreman")[0]
serializer = self.serializer2(instance=obj)
fields = serializer.get_fields()
self.assertIsInstance(fields, dict)
self.assertIsInstance(fields["text"], CharField)
self.assertIsInstance(fields["lastname"], CharField)
self.assertIsInstance(fields["autocomplete"], CharField)
self.assertFalse("firstname" in fields)
def test_serializer_get_fields_with_ignore_fields(self):
obj = SearchQuerySet().filter(lastname="Foreman")[0]
serializer = self.serializer3(instance=obj)
fields = serializer.get_fields()
self.assertIsInstance(fields, dict)
self.assertIsInstance(fields["text"], CharField)
self.assertIsInstance(fields["firstname"], CharField)
self.assertIsInstance(fields["lastname"], CharField)
self.assertFalse("autocomplete" in fields)
def test_serializer_boolean_field(self):
dog = self.serializer7(instance=SearchQuerySet().filter(species="Dog")[0])
iguana = self.serializer7(instance=SearchQuerySet().filter(species="Iguana")[0])
self.assertTrue(dog.data["has_rabies"])
self.assertFalse(iguana.data["has_rabies"])
class HaystackSerializerAllFieldsTestCase(TestCase):
fixtures = ["mockallfield"]
def setUp(self):
MockAllFieldIndex().reindex()
class Serializer1(HaystackSerializer):
class Meta:
index_classes = [MockAllFieldIndex]
fields = ["charfield", "integerfield", "floatfield",
"decimalfield", "boolfield", "datefield",
"datetimefield", "multivaluefield"]
self.serializer1 = Serializer1
def test_serialize_field_is_correct_type(self):
obj = SearchQuerySet().models(MockAllField).latest('datetimefield')
serializer = self.serializer1(instance=obj, many=False)
self.assertIsInstance(serializer.fields['charfield'], fields.HaystackCharField)
self.assertIsInstance(serializer.fields['integerfield'], fields.HaystackIntegerField)
self.assertIsInstance(serializer.fields['floatfield'], fields.HaystackFloatField)
self.assertIsInstance(serializer.fields['decimalfield'], fields.HaystackDecimalField)
self.assertIsInstance(serializer.fields['boolfield'], fields.HaystackBooleanField)
self.assertIsInstance(serializer.fields['datefield'], fields.HaystackDateField)
self.assertIsInstance(serializer.fields['datetimefield'], fields.HaystackDateTimeField)
self.assertIsInstance(serializer.fields['multivaluefield'], fields.HaystackMultiValueField)
class HaystackSerializerMultipleIndexTestCase(WarningTestCaseMixin, TestCase):
fixtures = ["mockperson", "mockpet"]
def setUp(self):
MockPersonIndex().reindex()
MockPetIndex().reindex()
class Serializer1(HaystackSerializer):
"""
Regular multiple index serializer
"""
class Meta:
index_classes = [MockPersonIndex, MockPetIndex]
fields = ["text", "firstname", "lastname", "name", "species", "autocomplete"]
class Serializer2(HaystackSerializer):
"""
Multiple index serializer with declared fields
"""
_MockPersonIndex__hair_color = serializers.CharField()
extra = serializers.IntegerField()
class Meta:
index_classes = [MockPersonIndex, MockPetIndex]
exclude = ["firstname"]
def get__MockPersonIndex__hair_color(self):
return "black"
def get_extra(self):
return 1
class Serializer3(HaystackSerializer):
"""
Multiple index serializer with index aliases
"""
class Meta:
index_classes = [MockPersonIndex, MockPetIndex]
exclude = ["firstname"]
index_aliases = {
'mockapp.MockPersonIndex': 'People'
}
class ViewSet1(HaystackViewSet):
serializer_class = Serializer1
class ViewSet2(HaystackViewSet):
serializer_class = Serializer2
class ViewSet3(HaystackViewSet):
serializer_class = Serializer3
self.serializer1 = Serializer1
self.serializer2 = Serializer2
self.serializer3 = Serializer3
self.view1 = ViewSet1
self.view2 = ViewSet2
self.view3 = ViewSet3
def tearDown(self):
MockPersonIndex().clear()
MockPetIndex().clear()
def test_serializer_multiple_index_data(self):
objs = SearchQuerySet().filter(text="John")
serializer = self.serializer1(instance=objs, many=True)
data = serializer.data
self.assertEqual(len(data), 4)
for result in data:
if "name" in result:
self.assertTrue("species" in result, "Pet results should have 'species' and 'name' fields")
self.assertTrue("firstname" not in result, "Pet results should have 'species' and 'name' fields")
self.assertTrue("lastname" not in result, "Pet results should have 'species' and 'name' fields")
elif "firstname" in result:
self.assertTrue("lastname" in result, "Person results should have 'firstname' and 'lastname' fields")
self.assertTrue("name" not in result, "Person results should have 'firstname' and 'lastname' fields")
self.assertTrue("species" not in result, "Person results should have 'firstname' and 'lastname' fields")
else:
self.fail("Result should contain either Pet or Person fields")
def test_serializer_multiple_index_declared_fields(self):
objs = SearchQuerySet().filter(text="John")
serializer = self.serializer2(instance=objs, many=True)
data = serializer.data
self.assertEqual(len(data), 4)
for result in data:
if "name" in result:
self.assertTrue("extra" in result, "'extra' should be present in Pet results")
self.assertTrue("hair_color" not in result, "'hair_color' should not be present in Pet results")
elif "lastname" in result:
self.assertTrue("extra" in result, "'extra' should be present in Person results")
self.assertTrue("hair_color" in result, "'hair_color' should be present in Person results")
else:
self.fail("Result should contain either Pet or Person fields")
class HaystackSerializerHighlighterMixinTestCase(WarningTestCaseMixin, TestCase):
fixtures = ["mockperson"]
def setUp(self):
MockPersonIndex().reindex()
class Serializer2(HighlighterMixin, HaystackSerializer):
highlighter_html_tag = "div"
highlighter_css_class = "my-fancy-highlighter"
highlighter_field = "description"
class Meta:
index_classes = [MockPersonIndex]
fields = ["firstname", "lastname", "description"]
class Serializer3(Serializer2):
highlighter_class = None
class ViewSet1(HaystackViewSet):
serializer_class = Serializer2
class ViewSet2(HaystackViewSet):
serializer_class = Serializer3
self.view1 = ViewSet1
self.view2 = ViewSet2
def tearDown(self):
MockPersonIndex().clear()
def test_serializer_highlighting(self):
request = factory.get(path="/", data={"firstname": "jeremy"}, content_type="application/json")
response = self.view1.as_view(actions={"get": "list"})(request)
response.render()
for result in json.loads(response.content.decode()):
self.assertTrue("highlighted" in result)
self.assertEqual(
result["highlighted"],
" ".join(('<%(tag)s class="%(css_class)s">Jeremy</%(tag)s>' % {
"tag": self.view1.serializer_class.highlighter_html_tag,
"css_class": self.view1.serializer_class.highlighter_css_class
}, "%s" % "is a nice chap!"))
)
def test_serializer_highlighter_raise_no_highlighter_class(self):
request = factory.get(path="/", data={"firstname": "jeremy"}, content_type="application/json")
try:
self.view2.as_view(actions={"get": "list"})(request)
self.fail("Did not raise ImproperlyConfigured error when called without a serializer_class")
except ImproperlyConfigured as e:
self.assertEqual(
str(e),
"%(cls)s is missing a highlighter_class. Define %(cls)s.highlighter_class, "
"or override %(cls)s.get_highlighter()." % {"cls": self.view2.serializer_class.__name__}
)
@override_settings(ROOT_URLCONF="tests.test_serializers")
class HaystackSerializerMoreLikeThisTestCase(APITestCase):
fixtures = ["mockperson"]
def setUp(self):
MockPersonIndex().reindex()
def tearDown(self):
MockPersonIndex().clear()
def test_serializer_more_like_this_link(self):
response = self.client.get(
path="/search-person-mlt/",
data={"firstname": "odysseus", "lastname": "cooley"},
format="json"
)
self.assertEqual(
response.data,
[{
"lastname": "Cooley",
"full_name": "Odysseus Cooley",
"firstname": "Odysseus",
"more_like_this": "http://testserver/search-person-mlt/18/more-like-this/"
}]
)
@override_settings(ROOT_URLCONF="tests.test_serializers")
class HaystackFacetSerializerTestCase(TestCase):
fixtures = ["mockperson"]
def setUp(self):
MockPersonIndex().reindex()
self.response = self.client.get(
path="/search-person-facet/facets/",
data={},
format="json"
)
def tearDown(self):
MockPersonIndex().clear()
@staticmethod
def build_absolute_uri(location):
"""
Builds an absolute URI using the test server's domain and the specified location.
"""
location = location.lstrip("/")
return "http://testserver/{location}".format(location=location)
@staticmethod
def is_paginated_facet_response(response):
"""
Returns True if the response.data seems like a faceted result.
Only works for responses created with the test client.
"""
return "objects" in response.data and \
all([k in response.data["objects"] for k in ("count", "next", "previous", "results")])
def test_serializer_facet_top_level_structure(self):
for key in ("fields", "dates", "queries"):
self.assertContains(self.response, key, count=1)
def test_serializer_facet_field_result(self):
fields = self.response.data["fields"]
for field in ("firstname", "lastname"):
self.assertTrue(field in fields)
self.assertTrue(isinstance(fields[field], list))
firstname = fields["firstname"][0]
self.assertTrue({"text", "count", "narrow_url"} <= set(firstname))
self.assertEqual(
firstname["narrow_url"],
self.build_absolute_uri("/search-person-facet/facets/?selected_facets=firstname_exact%3A{term}".format(
term=firstname["text"]))
)
lastname = fields["lastname"][0]
self.assertTrue({"text", "count", "narrow_url"} <= set(lastname))
self.assertEqual(
lastname["narrow_url"],
self.build_absolute_uri("/search-person-facet/facets/?selected_facets=lastname_exact%3A{term}".format(
term=lastname["text"]
))
)
def test_serializer_facet_date_result(self):
dates = self.response.data["dates"]
self.assertTrue("created" in dates)
self.assertEqual(len(dates["created"]), 1)
created = dates["created"][0]
self.assertTrue(all([k in created for k in ("text", "count", "narrow_url")]))
self.assertEqual(created["text"], "2015-05-01T00:00:00Z")
self.assertEqual(created["count"], 100)
self.assertEqual(
created["narrow_url"],
self.build_absolute_uri("/search-person-facet/facets/?selected_facets=created_exact%3A2015-05-01+00%3A00%3A00")
)
def test_serializer_facet_queries_result(self):
# Not Implemented
pass
def test_serializer_facet_narrow(self):
response = self.client.get(
path="/search-person-facet/facets/",
data=QueryDict("selected_facets=firstname_exact:John&selected_facets=lastname_exact:McClane"),
format="json"
)
self.assertEqual(response.data["queries"], {})
self.assertTrue([all(("firstname", "lastname" in response.data["fields"]))])
self.assertEqual(len(response.data["fields"]["firstname"]), 1)
self.assertEqual(response.data["fields"]["firstname"][0]["text"], "John")
self.assertEqual(response.data["fields"]["firstname"][0]["count"], 1)
self.assertEqual(
response.data["fields"]["firstname"][0]["narrow_url"],
self.build_absolute_uri("/search-person-facet/facets/?selected_facets=firstname_exact%3AJohn"
"&selected_facets=lastname_exact%3AMcClane")
)
self.assertEqual(len(response.data["fields"]["lastname"]), 1)
self.assertEqual(response.data["fields"]["lastname"][0]["text"], "McClane")
self.assertEqual(response.data["fields"]["lastname"][0]["count"], 1)
self.assertEqual(
response.data["fields"]["lastname"][0]["narrow_url"],
self.build_absolute_uri("/search-person-facet/facets/?selected_facets=firstname_exact%3AJohn"
"&selected_facets=lastname_exact%3AMcClane")
)
self.assertTrue("created" in response.data["dates"])
self.assertEqual(len(response.data["dates"]), 1)
self.assertEqual(response.data["dates"]["created"][0]["text"], "2015-05-01T00:00:00Z")
self.assertEqual(response.data["dates"]["created"][0]["count"], 1)
self.assertEqual(
response.data["dates"]["created"][0]["narrow_url"],
self.build_absolute_uri("/search-person-facet/facets/?selected_facets=created_exact%3A2015-05-01+00%3A00%3A00"
"&selected_facets=firstname_exact%3AJohn&selected_facets=lastname_exact%3AMcClane"
)
)
def test_serializer_raise_without_meta_class(self):
try:
class FacetSerializer(HaystackFacetSerializer):
pass
self.fail("Did not fail when defining a Serializer without a Meta class")
except ImproperlyConfigured as e:
self.assertEqual(str(e), "%s must implement a Meta class or have the property _abstract" % "FacetSerializer")
class HaystackSerializerMixinTestCase(WarningTestCaseMixin, TestCase):
fixtures = ["mockperson"]
def setUp(self):
MockPersonIndex().reindex()
class MockPersonSerializer(serializers.ModelSerializer):
class Meta:
model = MockPerson
fields = ('id', 'firstname', 'lastname', 'created', 'updated')
read_only_fields = ('created', 'updated')
class Serializer1(HaystackSerializerMixin, MockPersonSerializer):
class Meta(MockPersonSerializer.Meta):
search_fields = ['text', ]
class Viewset1(HaystackViewSet):
serializer_class = Serializer1
self.serializer1 = Serializer1
self.viewset1 = Viewset1
def tearDown(self):
MockPersonIndex().clear()
def test_serializer_mixin(self):
objs = SearchQuerySet().filter(text="Foreman")
serializer = self.serializer1(instance=objs, many=True)
self.assertEqual(
json.loads(json.dumps(serializer.data)),
[{
"id": 1,
"firstname": "Abel",
"lastname": "Foreman",
"created": "2015-05-19T10:48:08.686000Z",
"updated": "2016-04-24T16:02:59.378000Z"
}]
)
class HaystackMultiSerializerTestCase(WarningTestCaseMixin, TestCase):
fixtures = ["mockperson", "mockpet"]
def setUp(self):
MockPersonIndex().reindex()
MockPetIndex().reindex()
class MockPersonSerializer(HaystackSerializer):
class Meta:
index_classes = [MockPersonIndex]
fields = ('text', 'firstname', 'lastname', 'description')
class MockPetSerializer(HaystackSerializer):
class Meta:
index_classes = [MockPetIndex]
exclude = ('description', 'autocomplete')
class Serializer1(HaystackSerializer):
class Meta:
serializers = {
MockPersonIndex: MockPersonSerializer,
MockPetIndex: MockPetSerializer
}
self.serializer1 = Serializer1
def tearDown(self):
MockPersonIndex().clear()
MockPetIndex().clear()
def test_multi_serializer(self):
objs = SearchQuerySet().filter(text="Zane")
serializer = self.serializer1(instance=objs, many=True)
self.assertEqual(
json.loads(json.dumps(serializer.data)),
[{
"has_rabies": True,
"text": "Zane",
"name": "Zane",
"species": "Dog"
},
{
"text": "Zane Griffith\n",
"firstname": "Zane",
"lastname": "Griffith",
"description": "Zane is a nice chap!"
}]
)
class TestHaystackSerializerMeta(SimpleTestCase):
def test_abstract_not_inherited(self):
class Base(six.with_metaclass(HaystackSerializerMeta, serializers.Serializer)):
_abstract = True
def create_subclass():
class Sub(HaystackSerializer):
pass
self.assertRaises(ImproperlyConfigured, create_subclass)
class TestMeta(SimpleTestCase):
def test_inheritance(self):
"""
Tests that Meta fields are correctly overriden by subclasses.
"""
class Serializer(HaystackSerializer):
class Meta:
fields = ('overriden_fields',)
self.assertEqual(Serializer.Meta.fields, ('overriden_fields',))
def test_default_attrs(self):
class Serializer(HaystackSerializer):
class Meta:
fields = ('overriden_fields',)
self.assertEqual(Serializer.Meta.exclude, tuple())
def test_raises_if_fields_and_exclude_defined(self):
def create_subclass():
class Serializer(HaystackSerializer):
class Meta:
fields = ('include_field',)
exclude = ('exclude_field',)
return Serializer
self.assertRaises(ImproperlyConfigured, create_subclass)
| {
"content_hash": "26b5750739ad14e501a11d5c052d99dc",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 123,
"avg_line_length": 36.83652430044182,
"alnum_prop": 0.6193826963057733,
"repo_name": "jannon/drf-haystack",
"id": "5cdbdea72213297e67bf5a4b37357ff5efe1afb4",
"size": "25098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "536"
},
{
"name": "Python",
"bytes": "161158"
}
],
"symlink_target": ""
} |
from urllib import urlencode, unquote
from urlparse import parse_qs
import re, sys, traceback
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import ConnectionRefusedError
from twisted.web import http
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from vumi.transports.base import Transport
from vumi.utils import http_request_full, normalize_msisdn
from vumi import log
class MobivateHttpTransport(Transport):
tranport_type = 'sms'
def mkres(self, cls, publish_func, path_key):
resource = cls(self.config, publish_func)
self._resources.append(resource)
return (resource, "%s/%s" % (self.config['receive_path'], path_key))
@inlineCallbacks
def setup_transport(self):
log.msg("Setup Mobivate Transport %s" % self.config)
super(MobivateHttpTransport, self).setup_transport()
self._resources = []
resources = [
self.mkres(MobivateReceiveSMSResource,
self.publish_message,
"SMSfromMobiles"),
self.mkres(MobivateReceiveReceipt,
self.publish_delivery_report,
"DeliveryReciept")
]
self.web_resource = yield self.start_web_resources(
resources, self.config['receive_port'])
def teardown_transport(self):
log.msg("Stop Mobivate Transport")
if hasattr(self, 'web_resource'):
return self.web_resource.stopListening()
@inlineCallbacks
def handle_outbound_message(self, message):
log.msg("Outbound message %s" % repr(message))
try:
params = {
'USER_NAME': self.config['user_name'],
'PASSWORD': self.config['password'],
'ORIGINATOR': message['from_addr'],
'MESSAGE_TEXT': message['content'],
'RECIPIENT': message['to_addr'],
'REFERENCE': message['message_id']
}
encoded_params = urlencode(params)
log.msg('Hitting %s with %s' % (self.config['url'], encoded_params))
response = yield http_request_full(
"%s?%s" % (self.config['url'], encoded_params),
headers={'User-Agent': ['Vumi Mobivate Transport'],
'Content-Type': ['application/json;charset=UTF-8'], },
method='GET')
if response.code == http.OK:
response_content = response.delivered_body.split("\n")
response_status = response_content[0]
response_msg = response_content[1] if len(response_content) > 1 else ''
if (response_status in ['0', '1']):
yield self.publish_ack(
user_message_id=message['message_id'],
sent_message_id=message['message_id'])
else:
reason = "SERVICE ERROR %s - %s" % (response_status, response_msg)
log.error(reason)
yield self.publish_nack(message['message_id'], reason)
else:
reason = "HTTP ERROR %s - %s" % (response.code, response.delivered_body)
log.error(reason)
yield self.publish_nack(message['message_id'], reason)
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error(
"TRANSPORT ERROR: %r" %
traceback.format_exception(exc_type, exc_value, exc_traceback))
reason = "TRANSPORT ERROR %s" % (ex.message)
yield self.publish_nack(message['message_id'], reason)
class MobivateReceiveSMSResource(Resource):
isLeaf = True
def __init__(self, config, publish_func):
log.msg("Init ReceiveSMSResource %s" % (config))
self.config = config
self.publish_func = publish_func
self.transport_name = self.config['transport_name']
@inlineCallbacks
def do_render(self, request):
log.msg('got hit with %s' % request.args)
request.setResponseCode(http.OK)
request.setHeader('Content-Type', 'text/plain')
try:
yield self.publish_func(
transport_name=self.transport_name,
transport_type='sms',
to_addr=request.args['RECIPIENT'][0],
from_addr=request.args['ORIGINATOR'][0],
content=request.args['MESSAGE_TEXT'][0],
transport_metadata={})
request.write('0')
except Exception, e:
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
log.msg("Error processing the request: %s" % (request,))
request.finish()
def render(self, request):
self.do_render(request)
return NOT_DONE_YET
class MobivateReceiveReceipt(Resource):
isLeaf = True
def __init__(self, config, publish_func):
log.msg("Init ReceiveSMSResource %s" % (config))
self.config = config
self.publish_func = publish_func
self.transport_name = self.config['transport_name']
@inlineCallbacks
def do_render(self, request):
log.msg('got hit with %s' % request.args)
request.setResponseCode(http.OK)
request.setHeader('Content-Type', 'text/plain')
try:
yield self.publish_func(
user_message_id=request.args['REFERENCE'][0],
delivery_status='delivered')
request.write('0')
except Exception, e:
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
log.msg("Error processing the request: %s" % (request,))
request.finish()
def render(self, request):
self.do_render(request)
return NOT_DONE_YET
| {
"content_hash": "a37d82ce7a50f0294404b2d34719f807",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 91,
"avg_line_length": 39.01973684210526,
"alnum_prop": 0.5759568369583544,
"repo_name": "texttochange/vusion-backend",
"id": "695c7101d8f9a7ce5a03c861f15a8e5a466e2ebd",
"size": "5931",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "transports/mobivate_malawi/mobivate_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1510"
},
{
"name": "Python",
"bytes": "1204678"
},
{
"name": "Shell",
"bytes": "798"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .. import base
# Module API
def extract_source(record):
source = {
'id': 'gsk',
'name': 'GlaxoSmithKline',
'type': 'register',
'source_url': 'http://www.gsk.com',
'terms_and_conditions_url': 'http://www.gsk.com/en-gb/terms-of-use',
}
return source
def extract_trial(record):
# Get identifiers
identifiers = base.helpers.clean_identifiers({
'nct': record['clinicaltrials_gov_identifier'],
'gsk': record['study_id'],
})
# Get public title
public_title = base.helpers.get_optimal_title(
record['study_title'],
record['official_study_title'],
record['study_id'],
)
# Get status and recruitment status
statuses = {
None: [None, None],
'Active, not recruiting': ['ongoing', 'not_recruiting'],
'Active not recruiting': ['ongoing', 'not_recruiting'],
'Completed': ['complete', 'not_recruiting'],
'Not yet recruiting': ['ongoing', 'not_recruiting'],
'Recruiting': ['ongoing', 'recruiting'],
'Suspended': ['suspended', 'not_recruiting'],
'Terminated': ['terminated', 'not_recruiting'],
'Withdrawn': ['withdrawn', 'not_recruiting'],
}
status, recruitment_status = statuses[record.get('study_recruitment_status')]
# Get gender
gender = None
if record['gender']:
gender = record['gender'].lower()
# Get has_published_results
has_published_results = False
if record['protocol_id']:
has_published_results = True
# Get study_phase
study_phase = base.normalizers.get_normalized_phase(record['phase'])
trial = {
'identifiers': identifiers,
'registration_date': record['first_received'],
'public_title': public_title,
'brief_summary': record['brief_summary'],
'scientific_title': record['official_study_title'],
'description': record['detailed_description'],
'status': status,
'recruitment_status': recruitment_status,
'eligibility_criteria': {
'criteria': record['eligibility_criteria'],
},
'target_sample_size': record['enrollment'],
'first_enrollment_date': record['study_start_date'],
'study_type': record['study_type'],
'study_design': record['study_design'],
'study_phase': study_phase,
'primary_outcomes': record['primary_outcomes'],
'secondary_outcomes': record['secondary_outcomes'],
'gender': gender,
'has_published_results': has_published_results,
'last_verification_date': record['record_verification_date'],
}
return trial
def extract_conditions(record):
conditions = []
for element in record['conditions'] or []:
conditions.append({
'name': element,
})
return conditions
def extract_interventions(record):
interventions = []
return interventions
def extract_locations(record):
locations = []
return locations
def extract_organisations(record):
organisations = []
return organisations
def extract_persons(record):
persons = []
return persons
def extract_documents(record):
documents = []
results_url = record.get('results_url')
if results_url:
document = {
'name': 'Results',
'source_url': results_url,
}
documents.append(document)
return documents
def extract_document_category(record):
return base.config.DOCUMENT_CATEGORIES['clinical_study_report']
| {
"content_hash": "a1adc021606aa54876bd2821b43774bb",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 81,
"avg_line_length": 28.053030303030305,
"alnum_prop": 0.6138266270591413,
"repo_name": "arthurSena/processors",
"id": "906fc6e7ff63a9d4ce2ec3c3d02d993443312f38",
"size": "3727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processors/gsk/extractors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "414"
},
{
"name": "Python",
"bytes": "253590"
}
],
"symlink_target": ""
} |
__author__ = 'root'
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
# Examples:
# url(r'^telco_billing/', include('telco_billing.urls'), name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^home/$', 'wedding.views.index' , name='index-page'),
)
| {
"content_hash": "15807c0062b2435cc6a0be3e242f51ee",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.6111111111111112,
"repo_name": "harshittrivedi78/hotel_listing",
"id": "aa5782865c821b9c7389c23565b5330d2bcefb9b",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myproject/wedding/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "959022"
},
{
"name": "HTML",
"bytes": "1584548"
},
{
"name": "JavaScript",
"bytes": "4087475"
},
{
"name": "PHP",
"bytes": "1742"
},
{
"name": "Python",
"bytes": "12432"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.WGL import _types as _cs
# End users want this...
from OpenGL.raw.WGL._types import *
from OpenGL.raw.WGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'WGL_NV_DX_interop2'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.WGL,'WGL_NV_DX_interop2',error_checker=_errors._error_checker)
| {
"content_hash": "9ea100a6ec9658292c0e5f6717675603",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 113,
"avg_line_length": 33.13333333333333,
"alnum_prop": 0.7464788732394366,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "7f602b3bf435d8ce878cc374884d763acaf31335",
"size": "497",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/DX_interop2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import tensorflow as tf
import os
import argparse
from tensorpack import *
from tensorpack.utils import viz
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope, under_name_scope
from tensorpack.tfutils import optimizer, summary, gradproc
from tensorpack.dataflow import dataset
from GAN import GANTrainer, GANModelDesc
"""
To train:
./InfoGAN-mnist.py
To visualize:
./InfoGAN-mnist.py --sample --load path/to/model
A pretrained model is at http://models.tensorpack.com/GAN/
"""
BATCH = 128
# latent space is cat(10) x uni(2) x noise(NOISE_DIM)
NUM_CLASS = 10
NUM_UNIFORM = 2
DIST_PARAM_DIM = NUM_CLASS + NUM_UNIFORM
NOISE_DIM = 62
# prior: the assumption how the latent factors are presented in the dataset
DIST_PRIOR_PARAM = [1.] * NUM_CLASS + [0.] * NUM_UNIFORM
def shapeless_placeholder(x, axis, name):
"""
Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared.
"""
shp = x.get_shape().as_list()
if not isinstance(axis, list):
axis = [axis]
for a in axis:
if shp[a] is None:
raise ValueError("Axis {} of shape {} is already unknown!".format(a, shp))
shp[a] = None
x = tf.placeholder_with_default(x, shape=shp, name=name)
return x
def get_distributions(vec_cat, vec_uniform):
cat = tf.distributions.Categorical(logits=vec_cat, validate_args=True, name='cat')
uni = tf.distributions.Normal(vec_uniform, scale=1., validate_args=True, allow_nan_stats=False, name='uni_a')
return cat, uni
def entropy_from_samples(samples, vec):
"""
Estimate H(x|s) ~= -E_{x \sim P(x|s)}[\log Q(x|s)], where x are samples, and Q is parameterized by vec.
"""
samples_cat = tf.argmax(samples[:, :NUM_CLASS], axis=1, output_type=tf.int32)
samples_uniform = samples[:, NUM_CLASS:]
cat, uniform = get_distributions(vec[:, :NUM_CLASS], vec[:, NUM_CLASS:])
def neg_logprob(dist, sample, name):
nll = -dist.log_prob(sample)
# average over batch
return tf.reduce_sum(tf.reduce_mean(nll, axis=0), name=name)
entropies = [neg_logprob(cat, samples_cat, 'nll_cat'),
neg_logprob(uniform, samples_uniform, 'nll_uniform')]
return entropies
@under_name_scope()
def sample_prior(batch_size):
cat, _ = get_distributions(DIST_PRIOR_PARAM[:NUM_CLASS], DIST_PRIOR_PARAM[NUM_CLASS:])
sample_cat = tf.one_hot(cat.sample(batch_size), NUM_CLASS)
"""
OpenAI official code actually models the "uniform" latent code as
a Gaussian distribution, but obtain the samples from a uniform distribution.
"""
sample_uni = tf.random_uniform([batch_size, NUM_UNIFORM], -1, 1)
samples = tf.concat([sample_cat, sample_uni], axis=1)
return samples
class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input')]
def generator(self, z):
l = FullyConnected('fc0', z, 1024, activation=BNReLU)
l = FullyConnected('fc1', l, 128 * 7 * 7, activation=BNReLU)
l = tf.reshape(l, [-1, 7, 7, 128])
l = Conv2DTranspose('deconv1', l, 64, 4, 2, activation=BNReLU)
l = Conv2DTranspose('deconv2', l, 1, 4, 2, activation=tf.identity)
l = tf.sigmoid(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
with argscope(Conv2D, kernel_size=4, strides=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', 64)
.tf.nn.leaky_relu()
.Conv2D('conv1', 128)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.FullyConnected('fc1', 1024)
.BatchNorm('bn2')
.tf.nn.leaky_relu()())
logits = FullyConnected('fct', l, 1)
encoder = (LinearWrap(l)
.FullyConnected('fce1', 128)
.BatchNorm('bne')
.tf.nn.leaky_relu()
.FullyConnected('fce-out', DIST_PARAM_DIM)())
return logits, encoder
def build_graph(self, real_sample):
real_sample = tf.expand_dims(real_sample, -1)
# sample the latent code:
zc = shapeless_placeholder(sample_prior(BATCH), 0, name='z_code')
z_noise = shapeless_placeholder(
tf.random_uniform([BATCH, NOISE_DIM], -1, 1), 0, name='z_noise')
z = tf.concat([zc, z_noise], 1, name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
fake_sample = self.generator(z)
fake_sample_viz = tf.cast((fake_sample) * 255.0, tf.uint8, name='viz')
tf.summary.image('gen', fake_sample_viz, max_outputs=30)
# may need to investigate how bn stats should be updated across two discrim
with tf.variable_scope('discrim'):
real_pred, _ = self.discriminator(real_sample)
fake_pred, dist_param = self.discriminator(fake_sample)
"""
Mutual information between x (i.e. zc in this case) and some
information s (the generated samples in this case):
I(x;s) = H(x) - H(x|s)
= H(x) + E[\log P(x|s)]
The distribution from which zc is sampled, in this case, is set to a fixed prior already.
So the first term is a constant.
For the second term, we can maximize its variational lower bound:
E_{x \sim P(x|s)}[\log Q(x|s)]
where Q(x|s) is a proposal distribution to approximate P(x|s).
Here, Q(x|s) is assumed to be a distribution which shares the form
of P, and whose parameters are predicted by the discriminator network.
"""
with tf.name_scope("mutual_information"):
with tf.name_scope('prior_entropy'):
cat, uni = get_distributions(DIST_PRIOR_PARAM[:NUM_CLASS], DIST_PRIOR_PARAM[NUM_CLASS:])
ents = [cat.entropy(name='cat_entropy'), tf.reduce_sum(uni.entropy(), name='uni_entropy')]
entropy = tf.add_n(ents, name='total_entropy')
# Note that the entropy of prior is a constant. The paper mentioned it but didn't use it.
with tf.name_scope('conditional_entropy'):
cond_ents = entropy_from_samples(zc, dist_param)
cond_entropy = tf.add_n(cond_ents, name="total_entropy")
MI = tf.subtract(entropy, cond_entropy, name='mutual_information')
summary.add_moving_summary(entropy, cond_entropy, MI, *cond_ents)
# default GAN objective
self.build_losses(real_pred, fake_pred)
# subtract mutual information for latent factors (we want to maximize them)
self.g_loss = tf.subtract(self.g_loss, MI, name='total_g_loss')
self.d_loss = tf.subtract(self.d_loss, MI, name='total_d_loss')
summary.add_moving_summary(self.g_loss, self.d_loss)
# distinguish between variables of generator and discriminator updates
self.collect_variables()
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, dtype=tf.float32, trainable=False)
opt = tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-6)
# generator learns 5 times faster
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(('gen/.*', 5))])
def get_data():
ds = ConcatData([dataset.Mnist('train'), dataset.Mnist('test')])
ds = BatchData(ds, BATCH)
ds = MapData(ds, lambda dp: [dp[0]]) # only use the image
return ds
def sample(model_path):
pred = OfflinePredictor(PredictConfig(
session_init=get_model_loader(model_path),
model=Model(),
input_names=['z_code', 'z_noise'],
output_names=['gen/viz']))
# sample all one-hot encodings (10 times)
z_cat = np.tile(np.eye(10), [10, 1])
# sample continuos variables from -2 to +2 as mentioned in the paper
z_uni = np.linspace(-2.0, 2.0, num=100)
z_uni = z_uni[:, None]
IMG_SIZE = 400
while True:
# only categorical turned on
z_noise = np.random.uniform(-1, 1, (100, NOISE_DIM))
zc = np.concatenate((z_cat, z_uni * 0, z_uni * 0), axis=1)
o = pred(zc, z_noise)[0]
viz1 = viz.stack_patches(o, nr_row=10, nr_col=10)
viz1 = cv2.resize(viz1, (IMG_SIZE, IMG_SIZE))
# show effect of first continous variable with fixed noise
zc = np.concatenate((z_cat, z_uni, z_uni * 0), axis=1)
o = pred(zc, z_noise * 0)[0]
viz2 = viz.stack_patches(o, nr_row=10, nr_col=10)
viz2 = cv2.resize(viz2, (IMG_SIZE, IMG_SIZE))
# show effect of second continous variable with fixed noise
zc = np.concatenate((z_cat, z_uni * 0, z_uni), axis=1)
o = pred(zc, z_noise * 0)[0]
viz3 = viz.stack_patches(o, nr_row=10, nr_col=10)
viz3 = cv2.resize(viz3, (IMG_SIZE, IMG_SIZE))
canvas = viz.stack_patches(
[viz1, viz2, viz3],
nr_row=1, nr_col=3, border=5, bgcolor=(255, 0, 0))
viz.interactive_imshow(canvas)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='visualize the space of the 10 latent codes')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.sample:
BATCH = 100
sample(args.load)
else:
logger.auto_set_dir()
GANTrainer(QueueInput(get_data()),
Model()).train_with_defaults(
callbacks=[ModelSaver(keep_checkpoint_every_n_hours=0.1)],
steps_per_epoch=500,
max_epoch=100,
session_init=SaverRestore(args.load) if args.load else None
)
| {
"content_hash": "99d9a30054056bc1219af669b8183fb9",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 113,
"avg_line_length": 38.68,
"alnum_prop": 0.6075961267274608,
"repo_name": "eyaler/tensorpack",
"id": "19be093c97c3a4e410007d162f97b56ca3ac6f56",
"size": "10728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/GAN/InfoGAN-mnist.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "742109"
},
{
"name": "Shell",
"bytes": "1775"
}
],
"symlink_target": ""
} |
__author__ = 'kohlmannj'
from copy import copy, deepcopy
import os
import Ity
import DocuscopeDictionary
from DocuscopeCSVDictionary import DocuscopeCSVDictionary
from Ity.Tokenizers import Tokenizer
from Ity.Taggers import Tagger
import time
class DocuscopeTagger(Tagger):
"""
DocuscopeTagger uses an implementation of the Docuscope rule-matching
algorithm to apply rules ("lats") from the Docucsope dictionary (by Kaufer
and Ishizaki of Carnegie Mellon University). The dictionary maps rule names
to one or more "phrases", which themselves are one or more words ("we") or
"word classes" ("!ROYALWE"). These rules may also include punctuation
characters. The algorithm prioritizes the longest rules, so it applies the
rule for which there appears the longest contiguous subset of matching
words, given a starting token from a text. If the Docuscope dictionary
does not contain an applicable long rule, it provides additional "short"
rules that apply for single words (or punctuation characters, in theory).
This Tagger excludes whitespace and newline characters, but does so in a
way that such tokens are simply passed. There is the potential for
erroneous long rule applications in cases where a long rule may be matched
across a newline token, for example. Most of the time, the structure of
the Docuscope dictionary's rules and the structure of the document itself
should prevent this from happening often. (That is, a long rule matching
"who goes there" could not be applied to "who goes.\n\nThere" because the
period ending the sentence prevents the rule from being applied.)
The long rule application algorithm is based on the original one written by
Michael Gleicher in his DocuscopeJr module.
DocuscopeTagger may be instantiated with an alternative `dictionary_path`,
which refers to either a folder containing Docuscope-style plain text files
with rule and word class specifications, or a CSV file specifying rule and
word class specifications. If `None` is provided, DocuscopeTagger defaults
to the "stock" Docuscope dictionary, which is not publicly available at
this time.
"""
def __init__(
self,
debug=False,
label="",
excluded_token_types=(
Tokenizer.TYPES["WHITESPACE"],
Tokenizer.TYPES["NEWLINE"]
),
untagged_rule_name=None,
unrecognized_rule_name=None,
excluded_rule_name=None,
return_untagged_tags=False,
return_unrecognized_tags=False,
return_excluded_tags=False,
return_included_tags=False,
allow_overlapping_tags=False,
dictionary_path=None,
blacklist=[],
return_tag_maps=False,
):
super(DocuscopeTagger, self).__init__(
debug=debug,
label=label,
excluded_token_types=excluded_token_types,
untagged_rule_name=untagged_rule_name,
unrecognized_rule_name=unrecognized_rule_name,
excluded_rule_name=excluded_rule_name,
return_untagged_tags=return_untagged_tags,
return_unrecognized_tags=return_unrecognized_tags,
return_excluded_tags=return_excluded_tags,
return_included_tags=return_included_tags,
blacklist=blacklist,
return_tag_maps=return_tag_maps
)
# Set blacklist
self.blacklist = blacklist
self.return_tag_maps = return_tag_maps
# This is a weird setting
self.allow_overlapping_tags = allow_overlapping_tags
# Allow DocuscopeTagger to be initialized with a different path to the Docuscope dictionary.
if dictionary_path is not None and os.path.exists(dictionary_path):
self.dictionary_path = dictionary_path
# Swizzle the dictionary filename into this instance's label.
self._label += "." + os.path.basename(dictionary_path)
if self.return_excluded_tags:
self._label += "." + "return_excluded_tags"
if self.allow_overlapping_tags:
self._label += "." + "allow_overlapping_tags"
elif dictionary_path is not None and os.path.exists(os.path.join(Ity.dictionaries_root, 'Docuscope', dictionary_path)):
self.dictionary_path = os.path.join(Ity.dictionaries_root, 'Docuscope', dictionary_path)
self._label += '.' + dictionary_path
# If the given dictionary path is invalid, use the following default value.
else:
# lf.write("swapped to default at 102"+ '\n')
self.dictionary_path = os.path.join(Ity.dictionaries_root, "Docuscope/default")
# Swizzle ".default" into this instance's label.
self._label += ".default"
# Is this dictionary a folder?
if os.path.isdir(self.dictionary_path):
# Cool, use DocuscopeDictionary.getDict to load that dictionary.
self._ds_dict = DocuscopeDictionary.getDict(self.dictionary_path)
# Is the dictionary a file with the extension ".csv"?
elif os.path.isfile(self.dictionary_path) and os.path.splitext(self.dictionary_path)[1] == ".csv":
# Load the Dictionary with a TopicModelDictionary.
self._ds_dict = DocuscopeCSVDictionary(rules_filename=self.dictionary_path)
self._ds_dict._load_rules()
# lf.close()
def _get_ds_words_for_token(self, token, case_sensitive=False):
# Get all the str representations of this token.
token_strs = token[Tokenizer.INDEXES["STRS"]]
# Try to find a matching Docuscope token while we still have
# token_strs to try with.
ds_words = []
for token_str in token_strs:
if not case_sensitive:
token_str = token_str.lower()
# UnicodeWarning previously happened here when this was a try / KeyError block
if token_str in self._ds_dict.words:
ds_words = self._ds_dict.words[token_str]
return ds_words
def _get_ds_words_for_token_index(self, token_index, case_sensitive=False):
try:
token = self.tokens[token_index]
if token[0][0] in self.blacklist:
return []
return self._get_ds_words_for_token(token, case_sensitive)
except IndexError:
return []
def _get_long_rule_tag(self):
rule = copy(Tagger.empty_rule)
tag = deepcopy(Tagger.empty_tag)
# Is this token's type one that is excluded?
if self.tokens[self.token_index][Tokenizer.INDEXES["TYPE"]] in self.excluded_token_types:
# Early return, then.
return None, None
# Is there a next token?
next_token_index = self._get_nth_next_included_token_index()
if next_token_index is None:
# Nope, no next token, so we can't look for long rules.
return None, None
# Oh good, there's a next token. Go find the longest rule, then.
# This algorithm below is based on Mike Gleicher's DocuscopeJr tagger.
best_ds_rule = None
best_ds_lat = None
best_ds_rule_len = 0
for token_ds_word in self._get_ds_words_for_token_index(self.token_index):
try:
rule_dict = self._ds_dict.rules[token_ds_word]
for next_token_ds_word in self._get_ds_words_for_token_index(next_token_index):
try: # for the rd[nw]
for ds_lat, ds_rule in rule_dict[next_token_ds_word]:
# check to see if the rule applies
ds_rule_len = len(ds_rule)
if ds_rule_len > best_ds_rule_len and self._long_rule_applies_at_token_index(ds_rule):
# keep the "best" rule
best_ds_rule = ds_rule
best_ds_lat = ds_lat
best_ds_rule_len = ds_rule_len
except KeyError:
pass
except KeyError:
pass
if best_ds_rule is not None and best_ds_rule_len > 0:
# Update the rule structure.
rule["name"] = best_ds_lat
rule["full_name"] = best_ds_lat
# Update the tag structure.
last_token_index = self._get_nth_next_included_token_index(n=best_ds_rule_len - 1)
tag.update(
rules=[
(rule["name"], best_ds_rule)
],
index_start=self.token_index,
index_end=last_token_index,
pos_start=self.tokens[self.token_index][Tokenizer.INDEXES["POS"]],
pos_end=self.tokens[last_token_index][Tokenizer.INDEXES["POS"]],
len=tag["index_end"] - tag["index_start"] + 1,
token_end_len=self.tokens[last_token_index][Tokenizer.INDEXES["LENGTH"]],
num_included_tokens=best_ds_rule_len
)
# Okay, do we have a valid tag and tag to return? (That's the best rule).
if self._is_valid_rule(rule) and self._is_valid_tag(tag):
# Return the best rule's rule and tag.
return rule, tag
else:
# No long rule applies.
return None, None
def _long_rule_applies_at_token_index(self, rule):
try:
# Get the next token index so that the first reassignment to
# next_token_index in the loop references the 3rd token in the rule.
next_token_index = self._get_nth_next_included_token_index()
for i in range(2, len(rule)):
next_token_index = self._get_nth_next_included_token_index(starting_token_index=next_token_index)
if next_token_index is None or not (rule[i] in self._get_ds_words_for_token_index(next_token_index)):
return False
# Made it out of the loop? Then the rule applies!
return next_token_index
except IndexError:
return False
def _get_short_rule_tag(self):
rule = copy(Tagger.empty_rule)
# Some data for the current token.
token = self.tokens[self.token_index]
token_ds_words = self._get_ds_words_for_token(token)
# Update some information in tag right away for this one-token tag.
tag = deepcopy(Tagger.empty_tag)
tag.update(
index_start=self.token_index,
index_end=self.token_index,
pos_start=token[Tokenizer.INDEXES["POS"]],
pos_end=token[Tokenizer.INDEXES["POS"]],
len=1,
num_included_tokens=1,
token_end_len=token[Tokenizer.INDEXES["LENGTH"]]
)
# For words and punctuation...
matching_ds_word = None
if token[0][0] in self.blacklist:
rule["name"] = "!BLACKLISTED"
elif token[Tokenizer.INDEXES["TYPE"]] not in self.excluded_token_types:
# Try to find a short rule for one of this token's ds_words.
for ds_word in token_ds_words:
try:
# Note: we'll set rule["full_name"] later.
rule["name"] = self._ds_dict.shortRules[ds_word]
matching_ds_word = ds_word
break
except KeyError:
continue
# Handle untagged tokens (words and punctuation that
# exist in the Docuscope dictionary's words dict but do not have
# an applicable rule).
if rule["name"] is None:
for ds_word in token_ds_words:
if ds_word in self._ds_dict.words:
rule["name"] = self.untagged_rule_name
break
# Still don't have a rule?
# Handle !UNRECOGNIZED tokens---tokens that do not exist in the dictionary.
if rule["name"] is None:
rule["name"] = self.unrecognized_rule_name
# For excluded token types...uh, they're excluded.
else:
rule["name"] = self.excluded_rule_name
# For all cases, we should have a rule "name" by now.
# Update the rule's full_name value and append a rule tuple to the
# tag's "rules" list.
if "name" in rule and type(rule["name"]) is str:
rule["full_name"] = rule["name"]
rule_tuple = (rule["full_name"], matching_ds_word)
tag["rules"].append(rule_tuple)
# self._get_tag() will validate the returned rule and tag.
return rule, tag
def _get_tag(self):
# Try finding a long rule.
rule, tag = self._get_long_rule_tag()
# If the long rule and tag are invalid (i.e. we got None and None), try finding a short rule.
if not self._is_valid_rule(rule) and not self._is_valid_tag(tag):
# Try finding a short rule (which could be the "untagged",
# "no rule", or "excluded" rules). This method *should* never
# return None, None (but technically it can).
rule, tag = self._get_short_rule_tag()
# We should absolutely have a valid rule and tag at this point.
if not self._is_valid_rule(rule) or not self._is_valid_tag(tag):
raise ValueError("Unexpected None, None return value/s from\
self._get_short_rule_tag(). Can't tag token '%s' at index %u." % (
self.tokens[self.token_index],
self.token_index
))
# Add the rule to self.rules (if we're supposed to) and add the tag to
# self.tags.
if self._should_return_rule(rule):
# Is this the first time we've seen this rule?
if rule["full_name"] not in self.rules:
rule["num_tags"] = 1
rule["num_included_tokens"] = tag["num_included_tokens"]
self.rules[rule["name"]] = rule
# We've seen this rule already, but update its num_tags count.
else:
self.rules[rule["name"]]["num_tags"] += 1
self.rules[rule["name"]]["num_included_tokens"] += tag["num_included_tokens"]
# Append the tag to self.tags.
if self.return_tag_maps:
self.tags.append(tag)
# Debug: print the tokens that have been tagged.
if self.debug:
tag_token_strs = []
for token in self.tokens[tag["index_start"]:(tag["index_end"] + 1)]:
tag_token_strs.append(token[Tokenizer.INDEXES["STRS"]][-1])
print ">>> BEST RULE: %s for \"%s\"" % (
rule["name"],
str(tag_token_strs)
)
# Compute the new token index.
# If "overlapping tags" are allowed, start at the token following
# the **first** token in the tag we just finished making.
if self.allow_overlapping_tags:
self.token_index = tag["index_start"] + 1
# Otherwise, start at the token following the **last** token in the
# tag we just finished making.
else:
self.token_index = tag["index_end"] + 1
def tag(self, tokens):
# Several helper methods need access to the tokens.
self.tokens = tokens
self.token_index = 0
# Loop through the tokens and tag them.
while self.token_index < len(self.tokens) and self.token_index is not None:
if self.debug:
print "\nPassing self.tokens[%u] = %s" % (self.token_index, str(self.tokens[self.token_index]))
self._get_tag()
# All done, so let's do some cleanup.
rules = self.rules
tags = self.tags
# Clear this instance's tokens, rules, and tags.
# (This is an attempt to free up memory a bit earlier.)
self.tokens = []
self.rules = {}
self.tags = []
# Return the goods.
if self.return_tag_maps:
return rules, tags
else:
return rules
| {
"content_hash": "1ae3886e9892091b6c93a2b5d922c99c",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 127,
"avg_line_length": 48.01775147928994,
"alnum_prop": 0.5863832409118915,
"repo_name": "uwgraphics/Ubiqu-Ity",
"id": "57e3e19b90d67971a1ceb3790e1ca6d341a4243d",
"size": "16245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ity/Taggers/DocuscopeTagger/DocuscopeTagger.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "604180"
},
{
"name": "HTML",
"bytes": "750751"
},
{
"name": "JavaScript",
"bytes": "381542"
},
{
"name": "Makefile",
"bytes": "10123"
},
{
"name": "Python",
"bytes": "348678"
},
{
"name": "Ruby",
"bytes": "1797"
}
],
"symlink_target": ""
} |
import sys
import pyrfa
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.setDebugMode(False)
p.acquireSession("Session3")
p.createOMMConsumer()
p.login()
p.directoryRequest()
p.dictionaryRequest()
p.historyRequest("tANZ.AX")
count = 0
while not p.isHistoryRefreshComplete():
for u in p.dispatchEventQueue():
if count == 1:
print(u['SERVICE'] + " - " + u['RIC'])
print("-----------------------")
for k,v in u.items():
sys.stdout.write(k+',')
print("")
for k,v in u.items():
sys.stdout.write(str(v)+',')
elif count > 1:
for k,v in u.items():
sys.stdout.write(str(v)+',')
count += 1
print("")
print("\n\n########## total history records: %s ###################\n\n" % (count - 1)) | {
"content_hash": "174119241fc59d152df8b7e48a938437",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 87,
"avg_line_length": 26.25,
"alnum_prop": 0.5,
"repo_name": "devcartel/pyrfa",
"id": "04df1992a66ddbca846956ad53f9a1dbba7cae76",
"size": "1258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/history.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1884"
},
{
"name": "C++",
"bytes": "940595"
},
{
"name": "Python",
"bytes": "2071"
}
],
"symlink_target": ""
} |
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
import os, re, sys
import logging
import logging.config
import urlparse
from boto.exception import InvalidUriError
__version__ = '2.4.1'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
config = Config()
def init_logging():
for file in BotoConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('boto')
log.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sqs.connection.SQSConnection`
:return: A connection to Amazon's SQS
"""
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Amazon's S3
"""
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@param gs_access_key_id: Your Google Cloud Storage Access Key ID
@type gs_secret_access_key: string
@param gs_secret_access_key: Your Google Cloud Storage Secret Access Key
@rtype: L{GSConnection<boto.gs.connection.GSConnection>}
@return: A connection to Google's Storage service
"""
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.elb.ELBConnection`
:return: A connection to Amazon's Load Balancing Service
"""
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
"""
from boto.ec2.autoscale import AutoScaleConnection
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sdb.connection.SDBConnection`
:return: A connection to Amazon's SDB
"""
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.vpc.VPCConnection`
:return: A connection to VPC
"""
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds.RDSConnection`
:return: A connection to RDS
"""
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.emr.EmrConnection`
:return: A connection to Elastic mapreduce
"""
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sns.SNSConnection`
:return: A connection to Amazon's SNS
"""
from boto.sns import SNSConnection
return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.iam.IAMConnection`
:return: A connection to Amazon's IAM
"""
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dns.Route53Connection`
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudformation.CloudFormationConnection`
:return: A connection to Amazon's CloudFormation Service
"""
from boto.cloudformation import CloudFormationConnection
return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2 import EC2Connection
from boto.ec2.regioninfo import RegionInfo
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'eucalyptus_host', None)
reg = RegionInfo(name='eucalyptus', endpoint=host)
return EC2Connection(aws_access_key_id, aws_secret_access_key,
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
Connect to an EC2 Api endpoint. Additional arguments are passed
through to connect_ec2.
:type url: string
:param url: A url for the ec2 api endpoint to connect to
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2.regioninfo import RegionInfo
purl = urlparse.urlparse(url)
kwargs['port'] = purl.port
kwargs['host'] = purl.hostname
kwargs['path'] = purl.path
if not 'is_secure' in kwargs:
kwargs['is_secure'] = (purl.scheme == "https")
kwargs['region'] = RegionInfo(name = purl.hostname,
endpoint = purl.hostname)
kwargs['aws_access_key_id']=aws_access_key_id
kwargs['aws_secret_access_key']=aws_secret_access_key
return(connect_ec2(**kwargs))
def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
Connect to a Walrus service.
:type host: string
:param host: the host name or ip address of the Walrus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Walrus
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'walrus_host', None)
return S3Connection(aws_access_key_id, aws_secret_access_key,
host=host, port=port, path=path,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ses.SESConnection`
:return: A connection to Amazon's SES
"""
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sts.STSConnection`
:return: A connection to Amazon's STS
"""
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
:param ia_access_key_id: Your IA Access Key ID. This will also look in your
boto config file for an entry in the Credentials
section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
look in your boto config file for an entry
in the Credentials section called
"ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
access_key = config.get('Credentials', 'ia_access_key_id',
ia_access_key_id)
secret_key = config.get('Credentials', 'ia_secret_access_key',
ia_secret_access_key)
return S3Connection(access_key, secret_key,
host='s3.us.archive.org',
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_dynamodb(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dynamodb.layer2.Layer2`
:return: A connection to the Layer2 interface for DynamoDB.
"""
from boto.dynamodb.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_swf(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.swf.layer1.Layer1`
:return: A connection to the Layer1 interface for SWF.
"""
from boto.swf.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.CloudSearchConnection`
:return: A connection to Amazon's CloudSearch service
"""
from boto.cloudsearch.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def check_extensions(module_name, module_path):
"""
This function checks for extensions to boto modules. It should be called in the
__init__.py file of all boto modules. See:
http://code.google.com/p/boto/wiki/ExtendModules
for details.
"""
option_name = '%s_extend' % module_name
version = config.get('Boto', option_name, None)
if version:
dirname = module_path[0]
path = os.path.join(dirname, version)
if os.path.isdir(path):
log.info('extending module %s with: %s' % (module_name, path))
module_path.insert(0, path)
_aws_cache = {}
def _get_aws_conn(service):
global _aws_cache
conn = _aws_cache.get(service)
if not conn:
meth = getattr(sys.modules[__name__], 'connect_' + service)
conn = meth()
_aws_cache[service] = conn
return conn
def lookup(service, name):
global _aws_cache
conn = _get_aws_conn(service)
obj = _aws_cache.get('.'.join((service, name)), None)
if not obj:
obj = conn.lookup(name)
_aws_cache['.'.join((service, name))] = obj
return obj
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True):
"""
Instantiate a StorageUri from a URI string.
:type uri_str: string
:param uri_str: URI naming bucket + optional object.
:type default_scheme: string
:param default_scheme: default scheme for scheme-less URIs.
:type debug: int
:param debug: debug level to pass in to boto connection (range 0..2).
:type validate: bool
:param validate: whether to check for bucket name validity.
:type bucket_storage_uri_class: BucketStorageUri interface.
:param bucket_storage_uri_class: Allows mocking for unit tests.
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
see gsutil).
:rtype: :class:`boto.StorageUri` subclass
:return: StorageUri subclass for given URI.
``uri_str`` must be one of the following formats:
* gs://bucket/name
* s3://bucket/name
* gs://bucket
* s3://bucket
* filename
The last example uses the default scheme ('file', unless overridden)
"""
# Manually parse URI components instead of using urlparse.urlparse because
# what we're calling URIs don't really fit the standard syntax for URIs
# (the latter includes an optional host/net location part).
end_scheme_idx = uri_str.find('://')
if end_scheme_idx == -1:
# Check for common error: user specifies gs:bucket instead
# of gs://bucket. Some URI parsers allow this, but it can cause
# confusion for callers, so we don't.
if uri_str.find(':') != -1:
raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str)
scheme = default_scheme.lower()
path = uri_str
else:
scheme = uri_str[0:end_scheme_idx].lower()
path = uri_str[end_scheme_idx + 3:]
if scheme not in ['file', 's3', 'gs']:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if scheme == 'file':
# For file URIs we have no bucket name, and use the complete path
# (minus 'file://') as the object name.
is_stream = False
if path == '-':
is_stream = True
return FileStorageUri(path, debug, is_stream)
else:
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
if (validate and bucket_name and
# Disallow buckets violating charset or not [3..255] chars total.
(not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name)
# Disallow buckets with individual DNS labels longer than 63.
or re.search('[-_a-z0-9]{64}', bucket_name))):
raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
# If enabled, ensure the bucket name is valid, to avoid possibly
# confusing other parts of the code. (For example if we didn't
# catch bucket names containing ':', when a user tried to connect to
# the server with that name they might get a confusing error about
# non-integer port numbers.)
object_name = ''
if len(path_parts) > 1:
object_name = path_parts[1]
return bucket_storage_uri_class(
scheme, bucket_name, object_name, debug,
suppress_consec_slashes=suppress_consec_slashes)
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: URI naming bucket + optional object.
"""
if not isinstance(key, boto.s3.key.Key):
raise InvalidUriError('Requested key (%s) is not a subclass of '
'boto.s3.key.Key' % str(type(key)))
prov_name = key.bucket.connection.provider.get_provider_name()
uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
return storage_uri(uri_str)
boto.plugin.load_plugins(config)
| {
"content_hash": "17c72be48301dc4b04d1167715f19c3b",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 89,
"avg_line_length": 36.69624060150376,
"alnum_prop": 0.656271769864361,
"repo_name": "darcyliu/storyboard",
"id": "9ae6ccaf7c0425adc4d08412c1aed99c33ce4f3d",
"size": "25683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57467"
},
{
"name": "JavaScript",
"bytes": "483502"
},
{
"name": "Python",
"bytes": "4223732"
},
{
"name": "Shell",
"bytes": "751"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='fureon',
version='0.0',
description='Crowd enabled music streamer and library',
author='Andy Tran',
author_email='[email protected]',
url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'tornado',
'sqlalchemy',
'sqlalchemy_utils',
'mutagen',
'redis',
'passlib',
'psycopg2',
'itsdangerous',
],
entry_points={
'console_scripts': [
'fureon-start = fureon.app:main',
]
}
)
| {
"content_hash": "29448ab1c126f9f8087a4f7b96c70484",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 59,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.5490196078431373,
"repo_name": "ATRAN2/fureon",
"id": "f579f2ee7eda43c28e9eba36b7c34aad0502c901",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77288"
}
],
"symlink_target": ""
} |
"""CouchMail.py imports your mail from an IMAP server into a CouchDB database
License: Apache 2.0 - http://opensource.org/licenses/Apache-2.0
"""
import calendar
from base64 import b64encode
from dateutil.parser import parse
from couchdb import ResourceConflict
def headers(msg):
mail = {}
for header, value in msg._message.items():
header = header.lower()
if header in mail:
if isinstance(mail[header], str):
mail[header] = [mail[header], value]
else:
mail[header].append(value)
else:
mail[header] = value
return mail
def parts(attachments):
parts = {}
for (filename, data, content_type) in attachments:
parts[filename] = {'content_type': content_type,
'data': b64encode(data)}
return parts
def truly_unique_id(msg):
# TODO: check `date` header existence?
if msg.message_id:
unique_id = msg.message_id.strip()
else:
dt = parse(msg.date)
unique_id = calendar.timegm(dt.timetuple())
return unique_id
def archive_msg(couch, msg):
dt = parse(msg.date)
hdrs = headers(msg)
doc_id = truly_unique_id(msg)
base_doc = {'_id': doc_id,
'headers': hdrs,
'date': [dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second],
'to': msg.to,
'from': msg.from_addr,
'sender': msg.sender,
'cc': msg.cc,
'deliveredto': msg.delivered_to,
'references': msg.references,
'subject': msg.title,
'message': msg.body}
# clean out empty values top-level keys
doc = dict((k,v) for k, v in base_doc.iteritems() if v)
# Add the raw message content for "auditing"
doc['_attachments'] = parts(msg.attachments)
doc['_attachments']['raw.eml'] = {
'content_type': 'message/rfc822',
'data': b64encode(msg.raw)}
if doc_id in couch:
doc['_rev'] = couch[doc_id]['_rev']
try:
couch.save(doc)
if '_rev' in doc:
print doc_id + ' updated.'
else:
print doc_id + ' stored.'
except ResourceConflict:
print doc_id + ' could not be updated.'
| {
"content_hash": "af69f95c263abcce6b89f0eb6c9f2dc0",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 28.78205128205128,
"alnum_prop": 0.5661469933184855,
"repo_name": "BigBlueHat/couchmail.py",
"id": "dd0d480f3c5234f9d1c01b8d89e4878a7877789c",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchmail/couchmail.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20700"
},
{
"name": "HTML",
"bytes": "7440"
},
{
"name": "JavaScript",
"bytes": "130314"
},
{
"name": "Python",
"bytes": "9071"
},
{
"name": "Shell",
"bytes": "32"
}
],
"symlink_target": ""
} |
from __pyversor__.sta import *
| {
"content_hash": "60362885849bcab46267873b28fb982c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.6774193548387096,
"repo_name": "tingelst/pyversor",
"id": "e5444bc6936eea4335785329df9bfcc4ce02b3b0",
"size": "31",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyversor/sta/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "C++",
"bytes": "299962"
},
{
"name": "CMake",
"bytes": "808"
},
{
"name": "Python",
"bytes": "27114"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import itertools
from collections import OrderedDict
import nnabla as nn
import numpy as np
class NnpNetwork(object):
'''A graph object which is read from nnp file.
An instance of NnpNetwork is usually created by an NnpLoader instance.
See an example usage described in :obj:`NnpLoader`.
Attributes:
variables (dict): A dict of all variables in a created graph
with a variable name as a key, and a nnabla.Variable as a value.
inputs (dict): All input variables.
outputs (dict): All output variables.
'''
def __init__(self, proto_network, batch_size, callback):
proto_network = proto_network.expand_loop_control()
self.proto_network = proto_network.promote(callback)
self.proto_network(batch_size=batch_size)
for k, v in itertools.chain(
self.proto_network.variables.items(), self.proto_network.parameters.items()):
if v.variable_instance is not None:
v.variable_instance.name = k
self._inputs = {
i: self.proto_network.variables[i].variable_instance
for i in self.proto_network.inputs
}
self._outputs = {
i: self.proto_network.variables[i].variable_instance
for i in self.proto_network.outputs
}
self._variables = {
k: v.variable_instance
for k, v in itertools.chain(
self.proto_network.variables.items(), self.proto_network.parameters.items())
}
# publish network's parameters to current parameter scope
# like original implementation.
with nn.parameter_scope('', nn.get_current_parameter_scope()):
for k, v in self.proto_network.parameters.items():
nn.parameter.set_parameter(k, v.variable_instance)
@property
def inputs(self):
return self._inputs
@property
def outputs(self):
return self._outputs
@property
def variables(self):
return self._variables
class NnpLoader(object):
'''An NNP file loader.
Args:
filepath : file-like object or filepath.
extension: if filepath is file-like object, extension is one of ".nnp", ".nntxt", ".prototxt".
Example:
.. code-block:: python
from nnabla.utils.nnp_graph import NnpLoader
# Read a .nnp file.
nnp = NnpLoader('/path/to/nnp.nnp')
# Assume a graph `graph_a` is in the nnp file.
net = nnp.get_network(network_name, batch_size=1)
# `x` is an input of the graph.
x = net.inputs['x']
# 'y' is an outputs of the graph.
y = net.outputs['y']
# Set random data as input and perform forward prop.
x.d = np.random.randn(*x.shape)
y.forward(clear_buffer=True)
print('output:', y.d)
'''
def __init__(self, filepath, scope=None, extension=".nntxt"):
# OrderedDict maintains loaded parameters from nnp files.
# The loaded parameters will be copied to the current
# scope when get_network is called.
self._params = scope if scope else OrderedDict()
self.g = nn.graph_def.load(
filepath, parameter_scope=self._params, rng=np.random.RandomState(1223), extension=extension)
self.network_dict = {
name: pn for name, pn in self.g.networks.items()
}
def get_network_names(self):
'''Returns network names available.
'''
return list(self.network_dict.keys())
def get_network(self, name, batch_size=None, callback=None):
'''Create a variable graph given network by name
Returns: NnpNetwork
'''
return NnpNetwork(self.network_dict[name], batch_size, callback=callback)
class NnpNetworkPass(object):
def _no_verbose(self, *a, **kw):
pass
def _verbose(self, *a, **kw):
print(*a, **kw)
def __init__(self, verbose=0):
self._variable_callbacks = {}
self._function_callbacks_by_name = {}
self._function_callbacks_by_type = {}
self._passes_by_name = {}
self._passes_by_type = {}
self._fix_parameters = False
self._use_up_to_variables = set()
self.verbose = self._no_verbose
self.verbose2 = self._no_verbose
if verbose:
self.verbose = self._verbose
if verbose > 1:
self.verbose2 = self._verbose
def on_function_pass_by_name(self, name):
def _on_function_pass_by_name(callback):
def _callback(f, variables, param_scope):
return callback(f, variables, param_scope)
self._passes_by_name[name] = _callback
return _callback
return _on_function_pass_by_name
def on_function_pass_by_type(self, name):
def _on_function_pass_by_type(callback):
def _callback(f, variables, param_scope):
return callback(f, variables, param_scope)
self._passes_by_name[name] = _callback
return _callback
return _on_function_pass_by_type
def on_generate_variable(self, name):
def _on_generate_variable(callback):
def _callback(v):
return callback(v)
self._variable_callbacks[name] = _callback
return _callback
return _on_generate_variable
def on_generate_function_by_name(self, name):
def _on_generate_function_by_name(callback):
def _callback(v):
return callback(v)
self._function_callbacks_by_name[name] = _callback
return _callback
return _on_generate_function_by_name
def on_generate_function_by_type(self, name):
def _on_generate_function_by_type(callback):
def _callback(v):
return callback(v)
self._function_callbacks_by_type[name] = _callback
return _callback
return _on_generate_function_by_type
def drop_function(self, *names):
def callback(f, variables, param_scope):
self.verbose('Pass: Deleting {}.'.format(f.name))
f.disable()
for name in names:
self.on_function_pass_by_name(name)(callback)
def fix_parameters(self):
self._fix_parameters = True
def use_up_to(self, *names):
self._use_up_to_variables.update(set(names))
def remove_and_rewire(self, name, i=0, o=0):
@self.on_function_pass_by_name(name)
def on_dr(f, variables, param_scope):
fi = f.inputs[i]
fo = f.outputs[o]
self.verbose('Removing {} and rewire input={} and output={}.'.format(
f.name, fi.name, fo.name))
fo.rewire_on(fi)
# Use input name
fo.proto.name = fi.name
def set_variable(self, name, input_var):
@self.on_generate_variable(name)
def on_input_x(v):
self.verbose('Replace {} by {}.'.format(name, input_var))
v.proto.shape.dim[:] = input_var.shape
v.variable = input_var
input_var.name = v.name
return v
def force_average_pooling_global(self, name, by_type=False):
dec = self.on_generate_function_by_name
if by_type:
dec = self.on_generate_function_by_type
@dec(name)
def on_avgpool(f):
pool_shape = f.inputs[0].proto.shape.dim[2:]
self.verbose('Change strides of {} to {}.'.format(
f.name, pool_shape))
p = f.proto.average_pooling_param
p.kernel.dim[:] = pool_shape
p.stride.dim[:] = pool_shape
return f
def check_average_pooling_global(self, name, by_type=False):
dec = self.on_generate_function_by_name
if by_type:
dec = self.on_generate_function_by_type
@dec(name)
def on_avgpool_check(f):
pool_shape = f.inputs[0].proto.shape.dim[2:]
p = f.proto.average_pooling_param
if p.kernel.dim[:] != pool_shape or p.stride.dim[:] != pool_shape:
raise ValueError(
'Stride configuration of average pooling is not for global pooling.'
' Given Image shape is {}, whereas pooling window size is {} and its stride is {}.'
' Consider using force_global_pooling=True'.format(
pool_shape, p.kernel.dim[:], p.stride.dim[:]))
return f
def set_batch_normalization_batch_stat_all(self, batch_stat):
@self.on_generate_function_by_type('BatchNormalization')
def on_bn(f):
self.verbose('Setting batch_stat={} at {}.'.format(
batch_stat, f.name))
p = f.proto.batch_normalization_param
p.batch_stat = batch_stat
return f
def _apply_function_pass_by_name(self, f, variables, param_scope):
if f.name not in self._passes_by_name:
return f
return self._passes_by_name[f.name](f, variables, param_scope)
def _apply_function_pass_by_type(self, f, variables, param_scope):
if f.proto.type not in self._passes_by_type:
return f
return self._passes_by_type[f.proto.type](f, variables, param_scope)
def _apply_generate_variable(self, v):
if v.name in self._variable_callbacks:
v = self._variable_callbacks[v.name](v)
if self._fix_parameters:
v.need_grad = False
return v
def _apply_generate_function_by_name(self, f):
if f.name not in self._function_callbacks_by_name:
return f
return self._function_callbacks_by_name[f.name](f)
def _apply_generate_function_by_type(self, f):
if f.proto.type not in self._function_callbacks_by_type:
return f
return self._function_callbacks_by_type[f.proto.type](f)
def _apply_use_up_to(self, variables):
for v in variables:
if v.name in self._use_up_to_variables:
self.verbose('Stopping at {}.'.format(v.name))
v.stop = True
| {
"content_hash": "55c5ad77acec682f52808eb66298a206",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 105,
"avg_line_length": 35.02739726027397,
"alnum_prop": 0.581834180680485,
"repo_name": "sony/nnabla",
"id": "da4ceb81634c515f0b6f34ff728775200e4714ef",
"size": "10866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/nnabla/utils/nnp_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
} |
import sqlite3
db_name = 'HTMLContent.db'
table_name = 'webPageContent'
def connectionAndTableDecorator(function):
def wrapper(*args, **kwargs):
connection = sqlite3.connect(db_name)
cursor = connection.cursor()
table_create_query = "CREATE TABLE IF NOT EXISTS {} (ID INTEGER PRIMARY KEY, Content TEXT NOT NULL, Name TEXT);".format(table_name)
cursor.execute(table_create_query)
return_value = function(cursor, *args, **kwargs)
connection.commit()
connection.close()
return return_value
return wrapper
@connectionAndTableDecorator
def queryAll(cursor):
query = 'SELECT * FROM {};'.format(table_name)
query_results = cursor.execute(query)
results_list = convertSQLResultsToList(query_results)
return results_list
def convertSQLResultsToList(query_results):
results_list = []
for item in query_results:
results_list.append(item)
return results_list
@connectionAndTableDecorator
def queryContent(cursor, row_ID):
query = 'SELECT Content FROM {} WHERE ID = {};'.format(table_name, row_ID)
query_results = cursor.execute(query)
content = convertSQLResultsToList(query_results)[0][0]
return content
@connectionAndTableDecorator
def insertItem(cursor, content, name):
insert = 'INSERT INTO {} (Content, Name) VALUES (?,?);'.format(table_name)
values = (content, name)
cursor.execute(insert, values)
def connectionOnlyDecorator(function, *args, **kwargs):
def wrapper(*args, **kwargs):
connection = sqlite3.connect(db_name)
cursor = connection.cursor()
function(cursor, *args, **kwargs)
connection.commit()
connection.close()
return wrapper
@connectionOnlyDecorator
def dropTable(cursor, table_name):
drop_table_query = "DROP TABLE IF EXISTS {};".format(table_name)
cursor.execute(drop_table_query)
if __name__ == '__main__':
#dropTable(table_name)
#insertItem('Here is some conent', 'Item number 1')
#insertItem('This is the content column', 'Item number 2')
#insertItem('This record has None in the name', None)
#query_results = queryAll()
#print(type(query_results))
#print(query_results)
content = queryContent(1)
print(type(content))
#print(content)
| {
"content_hash": "599d9fd40762f6a999b6f0330f8f0cfa",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 139,
"avg_line_length": 27.61904761904762,
"alnum_prop": 0.6724137931034483,
"repo_name": "danielharada/HtmlFileGenerator",
"id": "817b53595aff2dc9312cb600e032fd785db4df28",
"size": "2320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqliteAccessors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13077"
}
],
"symlink_target": ""
} |
import pandas as pd
import os
import numpy as np
from collections import namedtuple
task_status_tuple = namedtuple('task_status_tuple', ['todo', 'doing', 'done'])
task_status = task_status_tuple('todo', 'doing', 'done')
class project:
_tasklist = pd.DataFrame(columns=['workpackage', 'task', 'status', 'size'])
_wp = pd.DataFrame(columns=['workpackage','order'])
_name = 'empty'
_status = 1
_priority = 1
_AOI = 'General'
def __init__(self, name='empty', priority=1):
self._name = name
self._priority = priority
def _load_from_series(self, directory, projectseries):
self._load(directory, projectseries.AOI, projectseries.name, projectseries.status, projectseries.priority)
def _load(self, directory, AOI, name, status, priority):
self._status = status
self._priority =priority
self._name = name
self._AOI = AOI
self._tasklist = pd.read_csv(os.path.join(directory, name, 'tasklist.csv'), index_col=0)
self._wp = pd.read_csv(os.path.join(directory, name, 'workpackages.csv'), index_col=0)
def _save(self, directory):
project_dir, success = self.prepare_directory(directory)
if success:
self._tasklist.to_csv(os.path.join(project_dir, 'tasklist.csv'))
self._wp.to_csv(os.path.join(project_dir,'workpackages.csv'))
def prepare_directory(self, directory):
success = False
project_dir = ''
if not os.path.isdir(directory):
print('Directory \'{}\' does not exist'.format(directory))
print('Save failed')
else:
project_dir = os.path.join(directory, self._name)
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
success = True
return project_dir, success
def add_workpackage(self,workpackage):
self.insert_workpackage(workpackage, np.max(np.append(self._wp['order'].values, [0]))+1)
def insert_workpackage(self, workpackage, position):
if self.wp_exists(workpackage):
print('workpackage \'{}\' alread exists'.format(workpackage))
else:
self.shift_positions(position)
self._wp = self._wp.append({'workpackage': workpackage, 'order': position}, ignore_index = True)
self.sort_workpackages()
def sort_workpackages(self):
if self._wp.order.size > 1:
if self._wp.order.values[-2] > self._wp.order.values[-1:]:
self._wp = self._wp.sort_values(by='order')
def shift_positions(self, position):
self._wp.loc[self._wp.order >= position, 'order'] += 1
def repos_workpackage(self, workpackage, position):
self._wp = self._wp.drop(self._wp[self._wp.workpackage == workpackage].index)
self.insert_workpackage(workpackage, position)
def remove_workpackage(self, workpackage):
self._wp = self._wp.drop(self._wp[self._wp.workpackage == workpackage].index)
self._tasklist = self._tasklist.drop(self._tasklist[self._tasklist.workpackage == workpackage].index)
def add_task(self, workpackage, task, size = 1, status=task_status.todo):
if self.task_exists(task, workpackage):
print('Task \'{}\' already exists in workpackage \'{}\'.'.format(task, workpackage))
elif self.wp_exists(workpackage):
self._tasklist = self._tasklist.append({'workpackage': workpackage, 'task': task, 'status': status, 'size': size}, ignore_index = True)
else:
print('add workpackage \'{}\' first'.format(workpackage))
def wp_exists(self, workpackage):
return workpackage in self._wp.workpackage.values
def task_exists(self, task, workpackage):
return task in self._tasklist[self._tasklist.workpackage == workpackage].task.values
def remove_task(self, workpackage, task):
index = self.get_task_index(workpackage, task)
if index.size==1:
self._tasklist = self._tasklist.drop(index)
elif index.size ==0:
print('Task \'{}\' does not exist in workpackage \'{}\'.'.format(task, workpackage))
else:
raise RuntimeError('ERROR in remove_task: task exists twice in same workpackage')
def set_task_status(self, workpackage, task, status):
index = self.get_task_index(workpackage, task)
self._tasklist.loc[index, 'status'] = status
def get_tasklist(self):
return self._tasklist
def get_workpackages(self):
return self._wp
def name(self):
return self._name
def get_task_index(self, workpackage, task):
return self._tasklist[(self._tasklist.workpackage == workpackage) & (self._tasklist.task == task)].index
def print(self):
print('--------------------')
print('name: ' + self._name)
print('--------------------')
print('workpackages:')
print(self._wp)
print('--------------------')
print('tasks:')
print(self._tasklist)
print('--------------------')
print('status = {}'.format(self._status))
print('--------------------')
| {
"content_hash": "4c295ef605b9a2f1a4c5a705893fed76",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 147,
"avg_line_length": 40.09090909090909,
"alnum_prop": 0.58994708994709,
"repo_name": "Koet-273/Projectmanagement",
"id": "922db5cd703ef679336530ddfc94f9e5abc1677c",
"size": "5292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12332"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class VirtualNetworkPeeringPaged(Paged):
"""
A paging container for iterating over a list of :class:`VirtualNetworkPeering <azure.mgmt.network.v2017_08_01.models.VirtualNetworkPeering>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VirtualNetworkPeering]'}
}
def __init__(self, *args, **kwargs):
super(VirtualNetworkPeeringPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "f24c816a30f453918a012eb5d6eb761b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 151,
"avg_line_length": 32.6875,
"alnum_prop": 0.6462715105162524,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "a5f6cf1d46a903a74e1a3a50005f022cbe0a9fb0",
"size": "997",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/virtual_network_peering_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from staff.models import Department, Position
class PositionInline(admin.TabularInline):
model = Position
extra = 1
class DepartmentAdmin(admin.ModelAdmin):
fields = ['name', 'email', 'chair']
inlines = [PositionInline]
list_display = ('name', 'email')
ordering = ['name']
admin.site.register(Department, DepartmentAdmin) | {
"content_hash": "9c332c00520bb56cc4df4c14328db675",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 48,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.7017994858611826,
"repo_name": "lynchnf/maneki-neko-web",
"id": "950aff3210f93755ac8846df919dfe8d66c37877",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "staff/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17591"
},
{
"name": "HTML",
"bytes": "36953"
},
{
"name": "JavaScript",
"bytes": "331952"
},
{
"name": "Python",
"bytes": "55173"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, event, ForeignKey, Integer, String
from rdr_service.model.base import Base, model_insert_listener, model_update_listener
from rdr_service.model.consent_file import ConsentFile
from rdr_service.model.participant import Participant
from rdr_service.model.utils import UTCDateTime
class HealthProConsentFile(Base):
__tablename__ = 'hpro_consent_files'
id = Column("id", Integer, primary_key=True, autoincrement=True, nullable=False)
created = Column(UTCDateTime)
modified = Column(UTCDateTime)
participant_id = Column(Integer, ForeignKey(Participant.participantId))
consent_file_id = Column(Integer, ForeignKey(ConsentFile.id))
file_upload_time = Column(UTCDateTime, nullable=True)
file_path = Column(String(250), nullable=True)
event.listen(HealthProConsentFile, "before_insert", model_insert_listener)
event.listen(HealthProConsentFile, "before_update", model_update_listener)
| {
"content_hash": "bbf4948e0bd2f113da65ae4072d811d6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 85,
"avg_line_length": 42.95454545454545,
"alnum_prop": 0.7777777777777778,
"repo_name": "all-of-us/raw-data-repository",
"id": "6ea8439b9ef20f3ee2a9507866d8502fef282976",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/model/hpro_consent_files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
from subprocess import check_output
import re
import json
from os.path import exists, join, normpath
from licenses_vivaldi_texts import onlineLicenses
try:
f = open(normpath("gen/vivaldi/vivapp/module_list"), mode="r", encoding="utf-8")
maindeps = f.read()
f.close()
except:
maindeps = ""
modules = {}
for m in re.findall(r"(.*node_modules[/\\]((@[^/\\]+[/\\])?[^@][^/\\\n]+))\n", maindeps):
moduledir = m[0]
modulename = m[1]
if (moduledir in modules
or (modulename == "chrome") # ours
or (modulename == "wo-stringencoding") # ours
or modulename == "vivaldi-color" # ours
or (modulename == "url") # ignore for now, LICENSE file is on master, https://github.com/defunctzombie/node-url
or (modulename == "ripemd160") # ignore for now, LICENSE file is on master, https://github.com/cryptocoinjs/ripemd160
or (modulename == "indexof") # trivial
or (modulename == "binary-search") # CC0-1.0, no need to put in credits file
):
continue
entry = {
"name": modulename,
"License File": "", # Can't be None due to string conversion below
}
# get license file (in order of preference)
for l in ["LICENSE-MIT", "LICENSE-MIT.TXT", "LICENSE.MIT", "LICENSE.BSD",
"LICENSE.APACHE2", "LICENSE", "LICENSE.txt", "LICENSE.md", "License",
"license.txt", "License.md", "LICENSE.mkd", "UNLICENSE"]:
file_name = join(moduledir, l)
if exists(file_name):
entry["License File"] = file_name
f = open(file_name, mode="r", encoding="utf-8")
entry["license"] = f.read()
f.close()
break
# get one word license type from package.json
f = open(join(moduledir, "package.json"), mode="r", encoding="utf-8")
pjson = json.loads(f.read())
f.close()
preferred = None
if "license" in pjson:
preferred = "license"
elif "licence" in pjson: # typo in react-list
preferred = "licence"
elif "licenses" in pjson:
preferred = "licenses"
if preferred:
if type(pjson[preferred]) is list:
entry["licensetype"] = pjson[preferred][0]["type"]
entry["licenseurl"] = pjson[preferred][0]["url"]
elif type(pjson[preferred]) is dict:
entry["licensetype"] = pjson[preferred]["type"]
entry["licenseurl"] = pjson[preferred]["url"]
else:
entry["licensetype"] = pjson[preferred]
if "licensetype" in entry and entry["licensetype"] not in [
"(BSD-3-Clause AND Apache-2.0)",
"(MIT AND Zlib)",
"Apache 2.0",
"Apache License, Version 2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"BSD",
"0BSD",
"CC0-1.0",
"Creative Commons Attribution 2.5 License",
"ISC",
"MIT Licensed. http://www.opensource.org/licenses/mit-license.php",
"MIT",
"(MPL-2.0 OR Apache-2.0)",
"MPL",
"Public Domain",
"WTFPL"
]:
print("ERROR: " + moduledir + " uses a license that hasn't been reviewed for Vivaldi: " + entry["licensetype"])
exit(1)
if not "license" in entry and "licenseurl" in entry:
if entry["licenseurl"] in onlineLicenses:
entry["license"] = onlineLicenses[entry["licenseurl"]]
else:
print("ERROR: " + modulename + " provides URL " + entry["licenseurl"] + " as a license but it hasn't been copied to licenses_vivaldi_texts.py")
exit(1)
if not "license" in entry and "licensetype" in entry:
entry["license"] = entry["licensetype"]
if not "license" in entry:
print("ERROR: License statement missing for module " + moduledir + ". Add it to the list of exceptions in licenses_vivaldi.py if it's not a third party module.")
exit(1)
if "homepage" in pjson:
entry["url"] = pjson["homepage"]
else:
entry["url"] = ("https://www.npmjs.org/package/"+entry["name"])
if "licensetype" in entry and entry["licensetype"] == "MPL":
entry["license_unescaped"] = "Source code is available for download at <a href='http://registry.npmjs.org/"+modulename+"/-/"+modulename+"-"+pjson["version"]+".tgz'>http://registry.npmjs.org/"+modulename+"/-/"+modulename+"-"+pjson["version"]+".tgz<a/>. No source code files were modified for use in Vivaldi."
else:
entry["license_unescaped"] = ""
for e in entry:
try:
entry[e] = entry[e]
except:
pass
modules[moduledir] = entry
ADDITIONAL_PATHS = (
join('..', 'third_party', '_winsparkle_lib'),
join('..', 'third_party', 'sparkle_lib'),
join('..', 'platform_media'),
join('..', 'scripts', 'licenses'),
join('..', 'vivapp', 'src', 'browserjs'),
join('..', 'vivapp', 'src', 'components', 'image-inspector'),
join('..', 'vivapp', 'src', 'util'),
)
SPECIAL_CASES = {
join('..', 'platform_media'): {
"Name": "Opera",
"URL": "http://www.opera.com/",
"License": "BSD",
"License File": "/../platform_media/OPERA_LICENSE.txt",
},
join('..', 'third_party', '_winsparkle_lib'): {
"Name": "WinSparkle",
"URL": "http://winsparkle.org/",
"License": "MIT",
"License File": "/../third_party/_winsparkle_lib/COPYING",
},
join('..', 'thirdparty', 'macsparkle'): {
"Name": "Sparkle",
"URL": "http://sparkle-project.org/",
"License": "MIT",
"License File": "/../thirdparty/macsparkle/LICENSE",
},
join('..', 'vivapp', 'src', 'browserjs'): {
"Name": "boss-select",
"URL": "https://github.com/grks/boss-select#readme",
"License": "MIT",
"License File": "/../vivapp/src/browserjs/boss-select-license.txt",
},
join('..', 'vivapp', 'src', 'components', 'image-inspector'): {
"Name": "Exif.js",
"URL": "https://github.com/exif-js/exif-js",
"License": "MIT",
"License File": "/../vivapp/src/components/image-inspector/exif_js_license.txt",
},
join('..', 'scripts', 'licenses'): {
"Name": "Profile avatar illustrations",
"URL": "https://www.flaticon.com/",
"License": "attribution required",
"License File": "/../scripts/licenses/avatar_profiles_license.txt",
},
join('..', 'vivapp', 'src', 'util'): {
"Name": "Boyer-Moore-Horspool Search",
"URL": "https://github.com/Chocobo1/bmhs",
"License": "BSD-3-Clause",
"License File": "/../vivapp/src/util/bmhs_license.txt",
},
}
def GetEntries(entry_template, EvaluateTemplate):
entries = []
for module, license in modules.items():
entries.append({
'name': license['name'],
'content': EvaluateTemplate(entry_template, license),
'license_file': license['License File'],
})
return entries
| {
"content_hash": "3bd3a39ac1f3aa5b86173ba6bc0f7604",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 311,
"avg_line_length": 35.766304347826086,
"alnum_prop": 0.5970217292204832,
"repo_name": "ric2b/Vivaldi-browser",
"id": "e28c58f23f121297478db4c31915abeb973e649f",
"size": "6581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/licenses/licenses_vivaldi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
def get_recently_modified_keys(db_connection, no_of_days = -4):
o = urlparse(db_connction)
db_scheme = o.scheme
db_host = o.hostname
db_name = o.path.lstrip('/')
db_user = o.username
db_password = o.password
# NOTE: SQL expects a positive number for interval value...
if no_of_days < 0:
no_of_days = no_of_days * -1
sql_src = f'''SELECT eprintid AS eprint_id FROM eprint WHERE CONCAT_WS('-', LPAD(lastmod_year, 4, '0'), LPAD(lastmod_month, 2, '0'), LPAD(lastmod_day, 2, '0')) >= DATE_SUB(NOW(), INTERVAL {no_of_days} DAY) ORDER BY eprintid DESC'''
#log.print(f'DEBUG SQL: {sql_src}')
log.print('Updating my.cnf')
with open('my.cnf', 'w') as f:
f.write('[client]\n')
f.write(f'host = {db_host}\n')
f.write(f'user = {db_user}\n')
f.write(f'password = {db_password}\n')
os.chmod('my.cnf', stat.S_IWUSR | stat.S_IRUSR)
# Verify that SQL file exists
# Build up command and execute it
cmd = [ "mysql" ]
if os.path.exists('my.cnf'):
my_cnf = os.path.abspath('my.cnf')
cmd.append(f'--defaults-file={my_cnf}')
else:
if db_host != '':
cmd.append('--host')
cmd.append(db_host)
if db_user != '':
cmd.append('--user')
cmd.append(db_user)
if db_password != '':
cmd.append(f'--password={db_password}')
cmd.append('--default-character-set=utf8')
cmd.append(f'--execute={sql_src}')
cmd.append('--batch')
if db_name:
cmd.append(db_name)
else:
printf("Missing repository id db name")
return []
# NOTE: Now assemble and run the MySQL command to read in the file.
eprint_ids = []
with Popen(cmd, stdout = PIPE, encoding = 'utf-8') as proc:
src = proc.stdout.read()
for line in src.split("\n"):
if line.isdigit():
eprint_ids.append(line.strip(" \n\r"))
return eprint_ids
| {
"content_hash": "b8be3c9534d9dbf2937780277c00d727",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 235,
"avg_line_length": 35.81818181818182,
"alnum_prop": 0.5614213197969543,
"repo_name": "caltechlibrary/epgo",
"id": "28a55d61baad46b1fec1b7c1bf7ed6bc1f74b883",
"size": "2095",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "eprinttools/mysql_access.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8820"
},
{
"name": "Go",
"bytes": "65250"
},
{
"name": "HTML",
"bytes": "5343"
},
{
"name": "Makefile",
"bytes": "3662"
},
{
"name": "PHP",
"bytes": "1299"
},
{
"name": "Shell",
"bytes": "6978"
}
],
"symlink_target": ""
} |
import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote() is deprecated in favor of '
'urllib.parse.quote().',
RemovedInDjango40Warning, stacklevel=2,
)
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlquote_plus() is deprecated in favor of '
'urllib.parse.quote_plus(),',
RemovedInDjango40Warning, stacklevel=2,
)
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote() is deprecated in favor of '
'urllib.parse.unquote().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
warnings.warn(
'django.utils.http.urlunquote_plus() is deprecated in favor of '
'urllib.parse.unquote_plus().',
RemovedInDjango40Warning, stacklevel=2,
)
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if nv[1] or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
| {
"content_hash": "4497e3d58ae8b59304aeff17c36da2cd",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 93,
"avg_line_length": 34.708695652173915,
"alnum_prop": 0.6156833270700238,
"repo_name": "schinckel/django",
"id": "050375832cf8640ea25a2dd5d70d6033cff2734e",
"size": "15966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/utils/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85024"
},
{
"name": "HTML",
"bytes": "224566"
},
{
"name": "JavaScript",
"bytes": "251536"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13234142"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
Subsets and Splits