repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
r39132/airflow | tests/operators/test_hive_to_mysql.py | 4 | 4630 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from mock import patch, PropertyMock
from airflow.operators.hive_to_mysql import HiveToMySqlTransfer
from airflow.utils.operator_helpers import context_to_airflow_vars
class TestHiveToMySqlTransfer(unittest.TestCase):
def setUp(self):
self.kwargs = dict(
sql='sql',
mysql_table='table',
hiveserver2_conn_id='hiveserver2_default',
mysql_conn_id='mysql_default',
task_id='test_hive_to_mysql',
dag=None
)
@patch('airflow.operators.hive_to_mysql.MySqlHook')
@patch('airflow.operators.hive_to_mysql.HiveServer2Hook')
def test_execute(self, mock_hive_hook, mock_mysql_hook):
HiveToMySqlTransfer(**self.kwargs).execute(context={})
mock_hive_hook.assert_called_once_with(hiveserver2_conn_id=self.kwargs['hiveserver2_conn_id'])
mock_hive_hook.return_value.get_records.assert_called_once_with('sql', hive_conf={})
mock_mysql_hook.assert_called_once_with(mysql_conn_id=self.kwargs['mysql_conn_id'])
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table=self.kwargs['mysql_table'],
rows=mock_hive_hook.return_value.get_records.return_value
)
@patch('airflow.operators.hive_to_mysql.MySqlHook')
@patch('airflow.operators.hive_to_mysql.HiveServer2Hook')
def test_execute_mysql_preoperator(self, mock_hive_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_preoperator='preoperator'))
HiveToMySqlTransfer(**self.kwargs).execute(context={})
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs['mysql_preoperator'])
@patch('airflow.operators.hive_to_mysql.MySqlHook')
@patch('airflow.operators.hive_to_mysql.HiveServer2Hook')
def test_execute_with_mysql_postoperator(self, mock_hive_hook, mock_mysql_hook):
self.kwargs.update(dict(mysql_postoperator='postoperator'))
HiveToMySqlTransfer(**self.kwargs).execute(context={})
mock_mysql_hook.return_value.run.assert_called_once_with(self.kwargs['mysql_postoperator'])
@patch('airflow.operators.hive_to_mysql.MySqlHook')
@patch('airflow.operators.hive_to_mysql.NamedTemporaryFile')
@patch('airflow.operators.hive_to_mysql.HiveServer2Hook')
def test_execute_bulk_load(self, mock_hive_hook, mock_tmp_file, mock_mysql_hook):
type(mock_tmp_file).name = PropertyMock(return_value='tmp_file')
context = {}
self.kwargs.update(dict(bulk_load=True))
HiveToMySqlTransfer(**self.kwargs).execute(context=context)
mock_tmp_file.assert_called_once_with()
mock_hive_hook.return_value.to_csv.assert_called_once_with(
self.kwargs['sql'],
mock_tmp_file.return_value.name,
delimiter='\t',
lineterminator='\n',
output_header=False,
hive_conf=context_to_airflow_vars(context)
)
mock_mysql_hook.return_value.bulk_load.assert_called_once_with(
table=self.kwargs['mysql_table'],
tmp_file=mock_tmp_file.return_value.name
)
mock_tmp_file.return_value.close.assert_called_once_with()
@patch('airflow.operators.hive_to_mysql.MySqlHook')
@patch('airflow.operators.hive_to_mysql.HiveServer2Hook')
def test_execute_with_hive_conf(self, mock_hive_hook, mock_mysql_hook):
context = {}
self.kwargs.update(dict(hive_conf={'mapreduce.job.queuename': 'fake_queue'}))
HiveToMySqlTransfer(**self.kwargs).execute(context=context)
hive_conf = context_to_airflow_vars(context)
hive_conf.update(self.kwargs['hive_conf'])
mock_hive_hook.return_value.get_records.assert_called_once_with(
self.kwargs['sql'],
hive_conf=hive_conf
)
| apache-2.0 |
Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/oscurart_tools/oscurart_objects.py | 1 | 16403 | import bpy
import math
import sys
import os
import stat
import bmesh
import time
import random
from bpy_extras.object_utils import world_to_camera_view
##------------------------ SEARCH AND SELECT ------------------------
## SETEO VARIABLE DE ENTORNO
bpy.types.Scene.SearchAndSelectOt = bpy.props.StringProperty(default="Object name initials")
class SearchAndSelectOt(bpy.types.Operator):
bl_idname = "object.search_and_select_osc"
bl_label = "Search And Select"
bl_options = {"REGISTER", "UNDO"}
start = bpy.props.BoolProperty(name="Start With", default=True)
count = bpy.props.BoolProperty(name="Contain", default=True)
end = bpy.props.BoolProperty(name="End", default=True)
def execute(self, context):
for objeto in bpy.context.scene.objects:
variableNombre = bpy.context.scene.SearchAndSelectOt
if self.start:
if objeto.name.startswith(variableNombre):
objeto.select = True
if self.count:
if objeto.name.count(variableNombre):
objeto.select = True
if self.end:
if objeto.name.count(variableNombre):
objeto.select = True
return {'FINISHED'}
##-------------------------RENAME OBJECTS----------------------------------
## CREO VARIABLE
bpy.types.Scene.RenameObjectOt = bpy.props.StringProperty(default="Type here")
class renameObjectsOt (bpy.types.Operator):
bl_idname = "object.rename_objects_osc"
bl_label = "Rename Objects"
bl_options = {"REGISTER", "UNDO"}
def execute(self,context):
listaObj = bpy.context.selected_objects[:]
for objeto in listaObj:
objeto.name = bpy.context.scene.RenameObjectOt
return {'FINISHED'}
##---------------------------REMOVE MODIFIERS Y APPLY MODIFIERS------------------
class oscRemModifiers (bpy.types.Operator):
bl_idname = "object.modifiers_remove_osc"
bl_label = "Remove modifiers"
bl_options = {"REGISTER", "UNDO"}
def execute(self,context):
for objeto in bpy.context.selected_objects:
for modificador in objeto.modifiers:
print(modificador.type)
bpy.context.scene.objects.active=objeto
bpy.ops.object.modifier_remove(modifier=modificador.name)
return {'FINISHED'}
class oscApplyModifiers (bpy.types.Operator):
bl_idname = "object.modifiers_apply_osc"
bl_label = "Apply modifiers"
bl_options = {"REGISTER", "UNDO"}
def execute(self,context):
for objeto in bpy.context.selected_objects:
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active=objeto
objeto.select = True
if objeto.data.users >= 2:
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=False, texture=False, animation=False)
for modificador in objeto.modifiers:
try:
bpy.ops.object.modifier_apply(apply_as="DATA", modifier=modificador.name)
except:
bpy.ops.object.modifier_remove(modifier=modificador.name)
print("* Modifier %s skipping apply" % (modificador.name))
return {'FINISHED'}
## ------------------------------------ RELINK OBJECTS--------------------------------------
def relinkObjects (self):
LISTSCENE=[]
for SCENE in bpy.data.scenes[:]:
if bpy.selection_osc[-1] in SCENE.objects[:]:
LISTSCENE.append(SCENE)
OBJECTS = bpy.selection_osc[:-1]
ACTOBJ = bpy.selection_osc[-1]
OBJSEL = bpy.selection_osc[:]
LISTSCENE.remove(bpy.context.scene)
bpy.ops.object.select_all(action='DESELECT')
for OBJETO in OBJECTS:
if OBJETO.users != len(bpy.data.scenes):
print(OBJETO.name)
OBJETO.select = True
for SCENE in LISTSCENE:
bpy.ops.object.make_links_scene(scene=SCENE.name)
bpy.context.scene.objects.active=ACTOBJ
for OBJ in OBJSEL:
OBJ.select=True
class OscRelinkObjectsBetween (bpy.types.Operator):
bl_idname = "object.relink_objects_between_scenes"
bl_label = "Relink Objects Between Scenes"
bl_options = {"REGISTER", "UNDO"}
def execute (self, context):
relinkObjects(self)
return {'FINISHED'}
## ------------------------------------ COPY GROUPS AND LAYERS--------------------------------------
def CopyObjectGroupsAndLayers (self):
OBSEL=bpy.selection_osc[:]
GLOBALLAYERS=list(OBSEL[-1].layers[:])
ACTSCENE=bpy.context.scene
GROUPS=OBSEL[-1].users_group
ACTOBJ=OBSEL[-1]
for OBJECT in OBSEL[:-1]:
for scene in bpy.data.scenes[:]:
# SI EL OBJETO ACTIVO ESTA EN LA ESCENA
if ACTOBJ in scene.objects[:] and OBJECT in scene.objects[:]:
scene.object_bases[OBJECT.name].layers[:] = scene.object_bases[ACTOBJ.name].layers[:]
elif ACTOBJ not in scene.objects[:] and OBJECT in scene.objects[:]:
scene.object_bases[OBJECT.name].layers[:] = list(GLOBALLAYERS)
# REMUEVO DE TODO GRUPO
for GROUP in bpy.data.groups[:]:
if GROUP in OBJECT.users_group[:]:
GROUP.objects.unlink(OBJECT)
# INCLUYO OBJETO EN GRUPOS
for GROUP in GROUPS:
GROUP.objects.link(OBJECT)
bpy.context.window.screen.scene = ACTSCENE
bpy.context.scene.objects.active=ACTOBJ
class OscCopyObjectGAL (bpy.types.Operator):
bl_idname = "object.copy_objects_groups_layers"
bl_label = "Copy Groups And Layers"
bl_options = {"REGISTER", "UNDO"}
def execute (self, context):
CopyObjectGroupsAndLayers (self)
return {'FINISHED'}
## ------------------------------------ SELECTION --------------------------------------
bpy.selection_osc=[]
def select_osc():
if bpy.context.mode == "OBJECT":
obj = bpy.context.object
sel = len(bpy.context.selected_objects)
if sel == 0:
bpy.selection_osc=[]
else:
if sel == 1:
bpy.selection_osc=[]
bpy.selection_osc.append(obj)
elif sel > len(bpy.selection_osc):
for sobj in bpy.context.selected_objects:
if (sobj in bpy.selection_osc) == False:
bpy.selection_osc.append(sobj)
elif sel < len(bpy.selection_osc):
for it in bpy.selection_osc:
if (it in bpy.context.selected_objects) == False:
bpy.selection_osc.remove(it)
class OscSelection(bpy.types.Header):
bl_label = "Selection Osc"
bl_space_type = "VIEW_3D"
def __init__(self):
select_osc()
def draw(self, context):
"""
layout = self.layout
row = layout.row()
row.label("Sels: "+str(len(bpy.selection_osc)))
"""
##=============== DISTRIBUTE ======================
def ObjectDistributeOscurart (self, X, Y, Z):
if len(bpy.selection_osc[:]) > 1:
# VARIABLES
dif = bpy.selection_osc[-1].location-bpy.selection_osc[0].location
chunkglobal = dif/(len(bpy.selection_osc[:])-1)
chunkx = 0
chunky = 0
chunkz = 0
deltafst = bpy.selection_osc[0].location
#ORDENA
for OBJECT in bpy.selection_osc[:]:
if X: OBJECT.location.x=deltafst[0]+chunkx
if Y: OBJECT.location[1]=deltafst[1]+chunky
if Z: OBJECT.location.z=deltafst[2]+chunkz
chunkx+=chunkglobal[0]
chunky+=chunkglobal[1]
chunkz+=chunkglobal[2]
else:
self.report({'ERROR'}, "Selection is only 1!")
class DialogDistributeOsc(bpy.types.Operator):
bl_idname = "object.distribute_osc"
bl_label = "Distribute Objects"
Boolx = bpy.props.BoolProperty(name="X")
Booly = bpy.props.BoolProperty(name="Y")
Boolz = bpy.props.BoolProperty(name="Z")
def execute(self, context):
ObjectDistributeOscurart(self, self.Boolx,self.Booly,self.Boolz)
return {'FINISHED'}
def invoke(self, context, event):
self.Boolx = True
self.Booly = True
self.Boolz = True
return context.window_manager.invoke_props_dialog(self)
## ======================== SET LAYERS TO OTHER SCENES =====================================
def DefSetLayersToOtherScenes():
actsc = bpy.context.screen.scene
for object in bpy.context.selected_objects[:]:
bpy.context.screen.scene = actsc
lyrs = object.layers[:]
for scene in bpy.data.scenes[:]:
if object in scene.objects[:]:
bpy.context.screen.scene = scene
object.layers = lyrs
else:
print ("* %s is not in %s" % (object.name, scene.name))
bpy.context.screen.scene = actsc
class SetLayersToOtherScenes (bpy.types.Operator):
bl_idname = "object.set_layers_to_other_scenes"
bl_label = "Copy actual Layers to Other Scenes"
bl_options = {"REGISTER", "UNDO"}
def execute (self, context):
DefSetLayersToOtherScenes()
return {'FINISHED'}
## ======================== RENDER OBJECTS IN CAMERA =====================================
def DefRenderOnlyInCamera():
#crea grupos
if "INCAMERA" not in bpy.data.groups:
bpy.data.groups.new("INCAMERA")
if "NOTINCAMERA" not in bpy.data.groups:
bpy.data.groups.new("NOTINCAMERA")
#limpio grupos
for ob in bpy.data.objects:
if ob.name in bpy.data.groups["INCAMERA"].objects:
bpy.data.groups["INCAMERA"].objects.unlink(ob)
if ob.name in bpy.data.groups["NOTINCAMERA"].objects:
bpy.data.groups["NOTINCAMERA"].objects.unlink(ob)
#ordeno grupos
for ob in bpy.data.objects:
obs = False
if ob.type == "MESH":
tm = ob.to_mesh(bpy.context.scene, True, "RENDER")
for vert in tm.vertices:
cam = world_to_camera_view(bpy.context.scene,bpy.context.scene.camera,vert.co+ob.location)
if cam[0] >= -0 and cam[0] <= 1 and cam[1] >= 0 and cam[1] <= 1:
obs = True
del(tm)
else:
obs = True
if obs:
bpy.data.groups["INCAMERA"].objects.link(ob)
else:
bpy.data.groups["NOTINCAMERA"].objects.link(ob)
class RenderOnlyInCamera (bpy.types.Operator):
bl_idname = "group.group_in_out_camera"
bl_label = "Make a group for objects in outer camera"
bl_options = {"REGISTER", "UNDO"}
def execute (self, context):
DefRenderOnlyInCamera()
return {'FINISHED'}
##------------------------ DUPLICATE OBJECTS SYMMETRY ------------------------
def duplicateSymmetrical (self, disconect):
for objeto in bpy.context.selected_objects:
OBSEL = objeto
bpy.ops.object.select_all(action='DESELECT')
objeto.select = 1
bpy.context.scene.objects.active = objeto
bpy.ops.object.duplicate(linked=1)
OBDUP=bpy.context.active_object
print(OBDUP)
OBDUP.driver_add("location")
OBDUP.animation_data.drivers[0].driver.expression = "-var"
OBDUP.animation_data.drivers[0].driver.variables.new()
OBDUP.animation_data.drivers[0].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[0].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[0].driver.variables[0].targets[0].transform_type = 'LOC_X'
OBDUP.animation_data.drivers[1].driver.expression = "var"
OBDUP.animation_data.drivers[1].driver.variables.new()
OBDUP.animation_data.drivers[1].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[1].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[1].driver.variables[0].targets[0].transform_type = 'LOC_Y'
OBDUP.animation_data.drivers[2].driver.expression = "var"
OBDUP.animation_data.drivers[2].driver.variables.new()
OBDUP.animation_data.drivers[2].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[2].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[2].driver.variables[0].targets[0].transform_type = 'LOC_Z'
OBDUP.driver_add("scale")
OBDUP.animation_data.drivers[3].driver.expression = "-var"
OBDUP.animation_data.drivers[3].driver.variables.new()
OBDUP.animation_data.drivers[3].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[3].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[3].driver.variables[0].targets[0].transform_type = 'SCALE_X'
OBDUP.animation_data.drivers[4].driver.expression = "var"
OBDUP.animation_data.drivers[4].driver.variables.new()
OBDUP.animation_data.drivers[4].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[4].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[4].driver.variables[0].targets[0].transform_type = 'SCALE_Y'
OBDUP.animation_data.drivers[5].driver.expression = "var"
OBDUP.animation_data.drivers[5].driver.variables.new()
OBDUP.animation_data.drivers[5].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[5].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[5].driver.variables[0].targets[0].transform_type = 'SCALE_Z'
OBDUP.driver_add("rotation_euler")
OBDUP.animation_data.drivers[6].driver.expression = "var"
OBDUP.animation_data.drivers[6].driver.variables.new()
OBDUP.animation_data.drivers[6].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[6].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[6].driver.variables[0].targets[0].transform_type = 'ROT_X'
OBDUP.animation_data.drivers[7].driver.expression = "-var"
OBDUP.animation_data.drivers[7].driver.variables.new()
OBDUP.animation_data.drivers[7].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[7].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[7].driver.variables[0].targets[0].transform_type = 'ROT_Y'
OBDUP.animation_data.drivers[8].driver.expression = "-var"
OBDUP.animation_data.drivers[8].driver.variables.new()
OBDUP.animation_data.drivers[8].driver.variables[0].type = "TRANSFORMS"
OBDUP.animation_data.drivers[8].driver.variables[0].targets[0].id = objeto
OBDUP.animation_data.drivers[8].driver.variables[0].targets[0].transform_type = 'ROT_Z'
if disconect != True:
bpy.ops.object.make_single_user(obdata=True, object=True)
bpy.context.active_object.driver_remove("location")
bpy.context.active_object.driver_remove("rotation_euler")
bpy.context.active_object.driver_remove("scale")
class oscDuplicateSymmetricalOp (bpy.types.Operator):
bl_idname = "object.duplicate_object_symmetry_osc"
bl_label = "Oscurart Duplicate Symmetrical"
bl_options = {"REGISTER", "UNDO"}
desconecta = bpy.props.BoolProperty(name="Keep Connection", default=True)
def execute(self,context):
duplicateSymmetrical(self, self.desconecta)
return {'FINISHED'}
##------------------------ OBJECTS TO GROUPS ------------------------
def DefObjectToGroups():
scgr = bpy.data.groups.new("%s_MSH" % (os.path.basename(bpy.data.filepath).replace(".blend","")))
for ob in bpy.data.objects:
if ob.type == "MESH":
gr = bpy.data.groups.new(ob.name)
gr.objects.link(ob)
scgr.objects.link(ob)
class ObjectsToGroups (bpy.types.Operator):
bl_idname = "object.objects_to_groups"
bl_label = "Objects to Groups"
bl_options = {"REGISTER", "UNDO"}
def execute (self, context):
DefObjectToGroups()
return {'FINISHED'}
| gpl-3.0 |
kinow-io/kinow-python-sdk | kinow_client/apis/countries_api.py | 1 | 4956 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CountriesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_countries(self, **kwargs):
"""
Get country list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_countries(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param bool bypass_pagination:
:return: Countries
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_countries_with_http_info(**kwargs)
else:
(data) = self.get_countries_with_http_info(**kwargs)
return data
def get_countries_with_http_info(self, **kwargs):
"""
Get country list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_countries_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param bool bypass_pagination:
:return: Countries
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page', 'bypass_pagination']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_countries" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/countries'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'bypass_pagination' in params:
query_params['bypass_pagination'] = params['bypass_pagination']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Countries',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 |
dredgar/ss-info-fetcher | lib/lxml/cssselect.py | 57 | 3366 | """CSS Selectors based on XPath.
This module supports selecting XML/HTML tags based on CSS selectors.
See the `CSSSelector` class for details.
This is a thin wrapper around cssselect 0.7 or later.
"""
from __future__ import absolute_import
from . import etree
try:
import cssselect as external_cssselect
except ImportError:
raise ImportError(
'cssselect does not seem to be installed. '
'See http://packages.python.org/cssselect/')
SelectorSyntaxError = external_cssselect.SelectorSyntaxError
ExpressionError = external_cssselect.ExpressionError
SelectorError = external_cssselect.SelectorError
__all__ = ['SelectorSyntaxError', 'ExpressionError', 'SelectorError',
'CSSSelector']
class LxmlTranslator(external_cssselect.GenericTranslator):
"""
A custom CSS selector to XPath translator with lxml-specific extensions.
"""
def xpath_contains_function(self, xpath, function):
# Defined there, removed in later drafts:
# http://www.w3.org/TR/2001/CR-css3-selectors-20011113/#content-selectors
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError(
"Expected a single string or ident for :contains(), got %r"
% function.arguments)
value = function.arguments[0].value
return xpath.add_condition(
'contains(__lxml_internal_css:lower-case(string(.)), %s)'
% self.xpath_literal(value.lower()))
class LxmlHTMLTranslator(LxmlTranslator, external_cssselect.HTMLTranslator):
"""
lxml extensions + HTML support.
"""
def _make_lower_case(context, s):
return s.lower()
ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/')
ns.prefix = '__lxml_internal_css'
ns['lower-case'] = _make_lower_case
class CSSSelector(etree.XPath):
"""A CSS selector.
Usage::
>>> from lxml import etree, cssselect
>>> select = cssselect.CSSSelector("a tag > child")
>>> root = etree.XML("<a><b><c/><tag><child>TEXT</child></tag></b></a>")
>>> [ el.tag for el in select(root) ]
['child']
To use CSS namespaces, you need to pass a prefix-to-namespace
mapping as ``namespaces`` keyword argument::
>>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
>>> select_ns = cssselect.CSSSelector('root > rdf|Description',
... namespaces={'rdf': rdfns})
>>> rdf = etree.XML((
... '<root xmlns:rdf="%s">'
... '<rdf:Description>blah</rdf:Description>'
... '</root>') % rdfns)
>>> [(el.tag, el.text) for el in select_ns(rdf)]
[('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')]
"""
def __init__(self, css, namespaces=None, translator='xml'):
if translator == 'xml':
translator = LxmlTranslator()
elif translator == 'html':
translator = LxmlHTMLTranslator()
elif translator == 'xhtml':
translator = LxmlHTMLTranslator(xhtml=True)
path = translator.css_to_xpath(css)
etree.XPath.__init__(self, path, namespaces=namespaces)
self.css = css
def __repr__(self):
return '<%s %s for %r>' % (
self.__class__.__name__,
hex(abs(id(self)))[2:],
self.css)
| gpl-3.0 |
DemokratieInBewegung/abstimmungstool | voty/initproc/migrations/0036_resistance.py | 1 | 1335 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2019-03-20 17:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('initproc', '0035_new_topic_fields'),
]
operations = [
migrations.CreateModel(
name='Resistance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('changed_at', models.DateTimeField(auto_now=True)),
('value', models.IntegerField()),
('reason', models.CharField(blank=True, max_length=100)),
('contribution', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resistances', to='initproc.Initiative')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='resistance',
unique_together=set([('user', 'contribution')]),
),
]
| agpl-3.0 |
khalim19/gimp-plugin-export-layers | export_layers/tests/test_exportlayers.py | 1 | 4934 | # -*- coding: utf-8 -*-
#
# This file is part of Export Layers.
#
# Copyright (C) 2013-2019 khalim19 <[email protected]>
#
# Export Layers is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Export Layers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Export Layers. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import mock
import unittest
from gimp import pdb
import gimpenums
from export_layers import pygimplib as pg
from export_layers import builtin_procedures
from export_layers.pygimplib.tests import stubs_gimp
from .. import exportlayers
from .. import operations
from .. import settings_plugin
class TestLayerExporterInitialOperations(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.image = pdb.gimp_image_new(1, 1, gimpenums.RGB)
@classmethod
def tearDownClass(cls):
pdb.gimp_image_delete(cls.image)
def test_add_procedure_added_procedure_is_first_in_execution_list(self):
settings = settings_plugin.create_settings()
settings["special/image"].set_value(self.image)
settings["main/file_extension"].set_value("xcf")
layer_exporter = exportlayers.LayerExporter(
settings["special/run_mode"].value,
settings["special/image"].value,
settings["main"])
operations.add(
settings["main/procedures"],
builtin_procedures.BUILTIN_PROCEDURES["insert_background_layers"])
layer_exporter.add_procedure(
pg.utils.empty_func, [operations.DEFAULT_PROCEDURES_GROUP])
layer_exporter.export(processing_groups=[])
added_operation_items = layer_exporter.operation_executor.list_operations(
group=operations.DEFAULT_PROCEDURES_GROUP)
# Includes built-in procedures added by default
self.assertEqual(len(added_operation_items), 4)
initial_executor = added_operation_items[1]
self.assertIsInstance(initial_executor, pg.operations.OperationExecutor)
operations_in_initial_executor = initial_executor.list_operations(
group=operations.DEFAULT_PROCEDURES_GROUP)
self.assertEqual(len(operations_in_initial_executor), 1)
self.assertEqual(operations_in_initial_executor[0], (pg.utils.empty_func, (), {}))
class TestAddOperationFromSettings(unittest.TestCase):
def setUp(self):
self.executor = pg.operations.OperationExecutor()
self.procedures = operations.create("procedures")
self.procedure_stub = stubs_gimp.PdbProcedureStub(
name="file-png-save",
type_=gimpenums.PLUGIN,
params=(
(gimpenums.PDB_INT32, "run-mode", "The run mode"),
(gimpenums.PDB_INT32ARRAY, "save-options", "Save options"),
(gimpenums.PDB_STRING, "filename", "Filename to save the image in")),
return_vals=None,
blurb="Saves files in PNG file format")
def test_add_operation_from_settings(self):
procedure = operations.add(
self.procedures, builtin_procedures.BUILTIN_PROCEDURES["insert_background_layers"])
exportlayers.add_operation_from_settings(procedure, self.executor)
added_operation_items = self.executor.list_operations(
group=operations.DEFAULT_PROCEDURES_GROUP)
self.assertEqual(len(added_operation_items), 1)
self.assertEqual(added_operation_items[0][1], ("background",))
self.assertEqual(added_operation_items[0][2], {})
def test_add_pdb_proc_as_operation_without_run_mode(self):
self.procedure_stub.params = self.procedure_stub.params[1:]
self._test_add_pdb_proc_as_operation(self.procedure_stub, ((), ""), {})
def test_add_pdb_proc_as_operation_with_run_mode(self):
self._test_add_pdb_proc_as_operation(
self.procedure_stub, ((), ""), {"run_mode": gimpenums.RUN_NONINTERACTIVE})
def _test_add_pdb_proc_as_operation(self, pdb_procedure, expected_args, expected_kwargs):
procedure = operations.add(self.procedures, pdb_procedure)
with mock.patch("export_layers.exportlayers.pdb") as pdb_mock:
pdb_mock.__getitem__.return_value = pdb_procedure
exportlayers.add_operation_from_settings(procedure, self.executor)
added_operation_items = self.executor.list_operations(
group=operations.DEFAULT_PROCEDURES_GROUP)
self.assertEqual(len(added_operation_items), 1)
self.assertEqual(added_operation_items[0][1], expected_args)
self.assertDictEqual(added_operation_items[0][2], expected_kwargs)
| gpl-3.0 |
bkj/ernest | enrich/compute-ownership.py | 2 | 2228 | #!/usr/bin/env python2.7
import argparse
from modules.compute_ownership_graph import COMPUTE_OWNERSHIP
from modules.compute_symbology import TO_SYMBOLOGY
from modules.add_sic_descs import ADD_SIC_DESCRIPTION
from modules.enrich_terminal_nodes import ENRICH_TERMINAL_NODES
from generic.generic_meta_enrich import GENERIC_META_ENRICH
from generic.logger import LOGGER
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='grab_new_filings')
parser.add_argument('--log-file',
type=str,
dest='log_file',
action='store',
required=True)
parser.add_argument('--index',
type=str,
dest='index',
action="store")
parser.add_argument('--date',
type=str,
dest='date',
action="store")
parser.add_argument('--from-scratch',
dest='from_scratch',
action="store_true")
parser.add_argument('--last-week',
dest='last_week',
action="store_true")
parser.add_argument('--expected',
type=str,
dest='expected',
action="store")
parser.add_argument("--config-path",
type=str,
action='store',
default='../config.json')
parser.add_argument('--most-recent',
dest='most_recent',
action="store_true")
args = parser.parse_args()
logger = LOGGER('compute_ownership', args.log_file).create_parent()
cog = COMPUTE_OWNERSHIP(args)
ts = TO_SYMBOLOGY(args, 'compute_ownership')
asd = ADD_SIC_DESCRIPTION(args, 'compute_ownership')
etn = ENRICH_TERMINAL_NODES(args, 'compute_ownership')
gme = GENERIC_META_ENRICH(args, 'compute_ownership')
doc_count = cog.main()
gme.main(doc_count, 'ernest_ownership_cat')
ts.update_symbology('ownership')
asd.main('symbology')
asd.main('ownership')
etn.main('issuer')
etn.main('owner')
| apache-2.0 |
noroutine/ansible | lib/ansible/modules/cloud/openstack/os_recordset.py | 25 | 7951 | #!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_recordset
short_description: Manage OpenStack DNS recordsets
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
updated. Only the I(records), I(description), and I(ttl) values
can be updated.
options:
zone:
description:
- Zone managing the recordset
required: true
name:
description:
- Name of the recordset
required: true
recordset_type:
description:
- Recordset type
required: true
records:
description:
- List of recordset definitions
required: true
description:
description:
- Description of the recordset
required: false
default: None
ttl:
description:
- TTL (Time To Live) value in seconds
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a recordset named "www.example.net."
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
recordset_type: primary
records: ['10.1.1.1']
description: test recordset
ttl: 3600
# Update the TTL on existing "www.example.net." recordset
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
ttl: 7200
# Delete recorset named "www.example.net."
- os_recordset:
cloud: mycloud
state: absent
zone: example.net.
name: www
'''
RETURN = '''
recordset:
description: Dictionary describing the recordset.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Unique recordset ID
type: string
sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
name:
description: Recordset name
type: string
sample: "www.example.net."
zone_id:
description: Zone id
type: string
sample: 9508e177-41d8-434e-962c-6fe6ca880af7
type:
description: Recordset type
type: string
sample: "A"
description:
description: Recordset description
type: string
sample: "Test description"
ttl:
description: Zone TTL value
type: int
sample: 3600
records:
description: Recordset records
type: list
sample: ['10.0.0.1']
'''
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
def _system_state_change(state, records, description, ttl, zone, recordset):
if state == 'present':
if recordset is None:
return True
if records is not None and recordset.records != records:
return True
if description is not None and recordset.description != description:
return True
if ttl is not None and recordset.ttl != ttl:
return True
if state == 'absent' and recordset:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
zone=dict(required=True),
name=dict(required=True),
recordset_type=dict(required=False),
records=dict(required=False, type='list'),
description=dict(required=False, default=None),
ttl=dict(required=False, default=None, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
required_if=[
('state', 'present',
['recordset_type', 'records'])],
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) <= StrictVersion('1.8.0'):
module.fail_json(msg="To utilize this module, the installed version of "
"the shade library MUST be >1.8.0")
zone = module.params.get('zone')
name = module.params.get('name')
state = module.params.get('state')
try:
cloud = shade.openstack_cloud(**module.params)
recordset_type = module.params.get('recordset_type')
recordset_filter = {'type': recordset_type}
recordsets = cloud.search_recordsets(zone, name_or_id=name + '.' + zone, filters=recordset_filter)
if len(recordsets) == 1:
recordset = recordsets[0]
try:
recordset_id = recordset['id']
except KeyError as e:
module.fail_json(msg=str(e))
else:
# recordsets is filtered by type and should never be more than 1 return
recordset = None
if state == 'present':
records = module.params.get('records')
description = module.params.get('description')
ttl = module.params.get('ttl')
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
records, description,
ttl, zone,
recordset))
if recordset is None:
recordset = cloud.create_recordset(
zone=zone, name=name, recordset_type=recordset_type,
records=records, description=description, ttl=ttl)
changed = True
else:
if records is None:
records = []
pre_update_recordset = recordset
changed = _system_state_change(state, records,
description, ttl,
zone, pre_update_recordset)
if changed:
zone = cloud.update_recordset(
zone, recordset_id,
records=records,
description=description,
ttl=ttl)
module.exit_json(changed=changed, recordset=recordset)
elif state == 'absent':
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
None, None,
None,
None, recordset))
if recordset is None:
changed = False
else:
cloud.delete_recordset(zone, recordset_id)
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
bhansa/fireball | pyvenv/Lib/encodings/cp862.py | 272 | 33370 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp862',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
'\u05d1' # 0x0081 -> HEBREW LETTER BET
'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
'\u05d3' # 0x0083 -> HEBREW LETTER DALET
'\u05d4' # 0x0084 -> HEBREW LETTER HE
'\u05d5' # 0x0085 -> HEBREW LETTER VAV
'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x0087 -> HEBREW LETTER HET
'\u05d8' # 0x0088 -> HEBREW LETTER TET
'\u05d9' # 0x0089 -> HEBREW LETTER YOD
'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
'\u05db' # 0x008b -> HEBREW LETTER KAF
'\u05dc' # 0x008c -> HEBREW LETTER LAMED
'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
'\u05de' # 0x008e -> HEBREW LETTER MEM
'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x0090 -> HEBREW LETTER NUN
'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x0094 -> HEBREW LETTER PE
'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
'\u05e7' # 0x0097 -> HEBREW LETTER QOF
'\u05e8' # 0x0098 -> HEBREW LETTER RESH
'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
'\u05ea' # 0x009a -> HEBREW LETTER TAV
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x05d0: 0x0080, # HEBREW LETTER ALEF
0x05d1: 0x0081, # HEBREW LETTER BET
0x05d2: 0x0082, # HEBREW LETTER GIMEL
0x05d3: 0x0083, # HEBREW LETTER DALET
0x05d4: 0x0084, # HEBREW LETTER HE
0x05d5: 0x0085, # HEBREW LETTER VAV
0x05d6: 0x0086, # HEBREW LETTER ZAYIN
0x05d7: 0x0087, # HEBREW LETTER HET
0x05d8: 0x0088, # HEBREW LETTER TET
0x05d9: 0x0089, # HEBREW LETTER YOD
0x05da: 0x008a, # HEBREW LETTER FINAL KAF
0x05db: 0x008b, # HEBREW LETTER KAF
0x05dc: 0x008c, # HEBREW LETTER LAMED
0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
0x05de: 0x008e, # HEBREW LETTER MEM
0x05df: 0x008f, # HEBREW LETTER FINAL NUN
0x05e0: 0x0090, # HEBREW LETTER NUN
0x05e1: 0x0091, # HEBREW LETTER SAMEKH
0x05e2: 0x0092, # HEBREW LETTER AYIN
0x05e3: 0x0093, # HEBREW LETTER FINAL PE
0x05e4: 0x0094, # HEBREW LETTER PE
0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
0x05e6: 0x0096, # HEBREW LETTER TSADI
0x05e7: 0x0097, # HEBREW LETTER QOF
0x05e8: 0x0098, # HEBREW LETTER RESH
0x05e9: 0x0099, # HEBREW LETTER SHIN
0x05ea: 0x009a, # HEBREW LETTER TAV
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
hradec/gaffer | python/GafferDispatchUITest/NodeUITest.py | 4 | 2059 | ##########################################################################
#
# Copyright (c) 2021, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import GafferDispatch
import GafferDispatchUI
import GafferUITest
class NodeUITest( GafferUITest.TestCase ) :
def testLifetimes( self ) :
self.assertNodeUIsHaveExpectedLifetime( GafferDispatch )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
cjqian/incubator-airflow | airflow/operators/sensors.py | 1 | 26221 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
from airflow.utils.log.logging_mixin import LoggingMixin
standard_library.install_aliases()
from builtins import str
from past.builtins import basestring
from datetime import datetime
from urllib.parse import urlparse
from time import sleep
import re
import sys
from airflow import settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout, AirflowSkipException
from airflow.models import BaseOperator, TaskInstance
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.hdfs_hook import HDFSHook
from airflow.hooks.http_hook import HttpHook
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
'''
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
'''
ui_color = '#e6f1f2'
@apply_defaults
def __init__(
self,
poke_interval=60,
timeout=60*60*24*7,
soft_fail=False,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
raise AirflowException('Override me.')
def execute(self, context):
started_at = datetime.utcnow()
while not self.poke(context):
if (datetime.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying while
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#7c7287'
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
self.sql = sql
self.conn_id = conn_id
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
hook = BaseHook.get_connection(self.conn_id).get_hook()
self.log.info('Poking: %s', self.sql)
records = hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
class MetastorePartitionSensor(SqlSensor):
"""
An alternative to the HivePartitionSensor that talk directly to the
MySQL db. This was created as a result of observing sub optimal
queries generated by the Metastore thrift service when hitting
subpartitioned tables. The Thrift service's queries were written in a
way that wouldn't leverage the indexes.
:param schema: the schema
:type schema: str
:param table: the table
:type table: str
:param partition_name: the partition name, as defined in the PARTITIONS
table of the Metastore. Order of the fields does matter.
Examples: ``ds=2016-01-01`` or
``ds=2016-01-01/sub=foo`` for a sub partitioned table
:type partition_name: str
:param mysql_conn_id: a reference to the MySQL conn_id for the metastore
:type mysql_conn_id: str
"""
template_fields = ('partition_name', 'table', 'schema')
ui_color = '#8da7be'
@apply_defaults
def __init__(
self, table, partition_name, schema="default",
mysql_conn_id="metastore_mysql",
*args, **kwargs):
self.partition_name = partition_name
self.table = table
self.schema = schema
self.first_poke = True
self.conn_id = mysql_conn_id
# TODO(aoen): We shouldn't be using SqlSensor here but MetastorePartitionSensor.
# The problem is the way apply_defaults works isn't compatible with inheritance.
# The inheritance model needs to be reworked in order to support overriding args/
# kwargs with arguments here, then 'conn_id' and 'sql' can be passed into the
# constructor below and apply_defaults will no longer throw an exception.
super(SqlSensor, self).__init__(*args, **kwargs)
def poke(self, context):
if self.first_poke:
self.first_poke = False
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.sql = """
SELECT 'X'
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
WHERE
B0.TBL_NAME = '{self.table}' AND
C0.NAME = '{self.schema}' AND
A0.PART_NAME = '{self.partition_name}';
""".format(self=self)
return super(MetastorePartitionSensor, self).poke(context)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a task to complete in a different DAG
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: string
:param external_task_id: The task_id that contains the task you want to
wait for
:type external_task_id: string
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: datetime.timedelta
:param execution_date_fn: function that receives the current execution date
and returns the desired execution dates to query. Either execution_delta
or execution_date_fn can be passed to ExternalTaskSensor, but not both.
:type execution_date_fn: callable
"""
ui_color = '#19647e'
@apply_defaults
def __init__(
self,
external_dag_id,
external_task_id,
allowed_states=None,
execution_delta=None,
execution_date_fn=None,
*args, **kwargs):
super(ExternalTaskSensor, self).__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_date` or `execution_date_fn` may'
'be provided to ExternalTaskSensor; not both.')
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self.execution_date_fn(context['execution_date'])
else:
dttm = context['execution_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(
[datetime.isoformat() for datetime in dttm_filter])
self.log.info(
'Poking for '
'{self.external_dag_id}.'
'{self.external_task_id} on '
'{} ... '.format(serialized_dttm_filter, **locals()))
TI = TaskInstance
count = session.query(TI).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date.in_(dttm_filter),
).count()
session.commit()
return count == len(dttm_filter)
class NamedHivePartitionSensor(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form ``schema.table/pk1=pv1/pk2=pv2``, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client ``get_partitions_by_name`` method. Note that
you cannot use logical or comparison operators as in
HivePartitionSensor.
:type partition_names: list of strings
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('partition_names', )
ui_color = '#8d99ae'
@apply_defaults
def __init__(
self,
partition_names,
metastore_conn_id='metastore_default',
poke_interval=60 * 3,
*args,
**kwargs):
super(NamedHivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if isinstance(partition_names, basestring):
raise TypeError('partition_names must be an array of strings')
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.next_poke_idx = 0
@classmethod
def parse_partition_name(self, partition):
try:
schema, table_partition = partition.split('.', 1)
table, partition = table_partition.split('/', 1)
return schema, table, partition
except ValueError as e:
raise ValueError('Could not parse ' + partition)
def poke(self, context):
if not hasattr(self, 'hook'):
from airflow.hooks.hive_hooks import HiveMetastoreHook
self.hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
def poke_partition(partition):
schema, table, partition = self.parse_partition_name(partition)
self.log.info(
'Poking for {schema}.{table}/{partition}'.format(**locals())
)
return self.hook.check_for_named_partition(
schema, table, partition)
while self.next_poke_idx < len(self.partition_names):
if poke_partition(self.partition_names[self.next_poke_idx]):
self.next_poke_idx += 1
else:
return False
return True
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: string
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:type partition: string
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
ui_color = '#C5CAE9'
@apply_defaults
def __init__(
self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60*3,
*args, **kwargs):
super(HivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.log.info(
'Poking for table {self.schema}.{self.table}, '
'partition {self.partition}'.format(**locals()))
if not hasattr(self, 'hook'):
from airflow.hooks.hive_hooks import HiveMetastoreHook
self.hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return self.hook.check_for_partition(
self.schema, self.table, self.partition)
class HdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
ui_color = settings.WEB_COLORS['LIGHTBLUE']
@apply_defaults
def __init__(
self,
filepath,
hdfs_conn_id='hdfs_default',
ignored_ext=['_COPYING_'],
ignore_copying=True,
file_size=None,
hook=HDFSHook,
*args, **kwargs):
super(HdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
self.file_size = file_size
self.ignored_ext = ignored_ext
self.ignore_copying = ignore_copying
self.hook = hook
@staticmethod
def filter_for_filesize(result, size=None):
"""
Will test the filepath result and test if its size is at least self.filesize
:param result: a list of dicts returned by Snakebite ls
:param size: the file size in MB a file should be at least to trigger True
:return: (bool) depending on the matching criteria
"""
if size:
log = LoggingMixin().log
log.debug('Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result))
size *= settings.MEGABYTE
result = [x for x in result if x['length'] >= size]
log.debug('HdfsSensor.poke: after size filter result is %s', result)
return result
@staticmethod
def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: (list) of dicts returned by Snakebite ls
:param ignored_ext: (list) of ignored extensions
:param ignore_copying: (bool) shall we ignore ?
:return: (list) of dicts which were not removed
"""
if ignore_copying:
log = LoggingMixin().log
regex_builder = "^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extentions_regex = re.compile(regex_builder)
log.debug(
'Filtering result for ignored extensions: %s in files %s',
ignored_extentions_regex.pattern, map(lambda x: x['path'], result)
)
result = [x for x in result if not ignored_extentions_regex.match(x['path'])]
log.debug('HdfsSensor.poke: after ext filter result is %s', result)
return result
def poke(self, context):
sb = self.hook(self.hdfs_conn_id).get_conn()
self.log.info('Poking for file {self.filepath}'.format(**locals()))
try:
# IMOO it's not right here, as there no raise of any kind.
# if the filepath is let's say '/data/mydirectory', it's correct but if it is '/data/mydirectory/*',
# it's not correct as the directory exists and sb does not raise any error
# here is a quick fix
result = [f for f in sb.ls([self.filepath], include_toplevel=False)]
self.log.debug('HdfsSensor.poke: result is %s', result)
result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying)
result = self.filter_for_filesize(result, self.file_size)
return bool(result)
except:
e = sys.exc_info()
self.log.debug("Caught an exception !: %s", str(e))
return False
class WebHdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
webhdfs_conn_id='webhdfs_default',
*args, **kwargs):
super(WebHdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.webhdfs_conn_id = webhdfs_conn_id
def poke(self, context):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(self.webhdfs_conn_id)
self.log.info('Poking for file {self.filepath}'.format(**locals()))
return c.check_for_path(hdfs_path=self.filepath)
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
:param aws_conn_id: a reference to the s3 connection
:type aws_conn_id: str
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_key,
bucket_name=None,
wildcard_match=False,
aws_conn_id='aws_default',
*args, **kwargs):
super(S3KeySensor, self).__init__(*args, **kwargs)
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
if parsed_url.path[0] == '/':
bucket_key = parsed_url.path[1:]
else:
bucket_key = parsed_url.path
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.aws_conn_id = aws_conn_id
def poke(self, context):
from airflow.hooks.S3_hook import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id)
full_url = "s3://" + self.bucket_name + "/" + self.bucket_key
self.log.info('Poking for key : {full_url}'.format(**locals()))
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
else:
return hook.check_for_key(self.bucket_key, self.bucket_name)
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_name,
prefix, delimiter='/',
aws_conn_id='aws_default',
*args, **kwargs):
super(S3PrefixSensor, self).__init__(*args, **kwargs)
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.aws_conn_id = aws_conn_id
def poke(self, context):
self.log.info('Poking for prefix : {self.prefix}\n'
'in bucket s3://{self.bucket_name}'.format(**locals()))
from airflow.hooks.S3_hook import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
template_fields = tuple()
@apply_defaults
def __init__(self, target_time, *args, **kwargs):
super(TimeSensor, self).__init__(*args, **kwargs)
self.target_time = target_time
def poke(self, context):
self.log.info('Checking if the time (%s) has come', self.target_time)
return datetime.utcnow().time() > self.target_time
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
template_fields = tuple()
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super(TimeDeltaSensor, self).__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
dag = context['dag']
target_dttm = dag.following_schedule(context['execution_date'])
target_dttm += self.delta
self.log.info('Checking if the time (%s) has come', target_dttm)
return datetime.utcnow() > target_dttm
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param method: The HTTP request method to use
:type method: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param request_params: The parameters to be added to the GET url
:type request_params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint', 'request_params')
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
method='GET',
request_params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.request_params = request_params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = HttpHook(
method=method,
http_conn_id=http_conn_id)
def poke(self, context):
self.log.info('Poking: %s', self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.request_params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if str(ae).startswith("404"):
return False
raise ae
return True
| apache-2.0 |
chenzw95/Discord-Selfbot | cogs/imagedump.py | 4 | 23084 | import datetime
import asyncio
import re
import sys
import subprocess
import json
import time
import os
from datetime import datetime
from discord.ext import commands
from cogs.utils.checks import cmd_prefix_len
'''Module for miscellaneous commands'''
class Imagedump:
def __init__(self, bot):
self.bot = bot
def check_images(self, message, images, type_of_items):
if message.attachments:
yield from (item.url for item in message.attachments if item.url != '' and item.url not in images
for i in type_of_items if item.url.lower().endswith(i.strip()))
if message.embeds:
for embed in message.embeds:
data = embed.to_dict()
try:
url = data['image']['url']
except KeyError:
try:
url = data['thumbnail']['url']
except KeyError:
continue
if (url.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.gifv', '.webm'))
or data['type'] in {'jpg', 'jpeg', 'png', 'gif', 'gifv', 'webm', 'image'}) and url not in images:
for i in type_of_items:
if url.lower().endswith(i.strip()):
yield url
urls = []
try:
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message.content)
except:
pass
if urls is not []:
yield from (url for url in urls
if url.lower().endswith(('.jpg', '.jpeg', '.png', '.gif', '.gifv', '.webm')) and url not in images
for i in type_of_items if url.lower().endswith(i.strip()))
@commands.group(pass_context=True)
async def imagedump(self, ctx):
"""Mass downloads images from a channel. [p]help imagedump for info.
----Simple----
[p]imagedump <n> - checks the last <n> messages in this chat and downloads all the images/gifs/webms found.
----More options----
Example: I want a new wallpaper. I'll check the last 5000 messages in this channel and download 100 items with type .png that fit on my 16:9 monitor with dimensions 1920x1080. This is what I would do:
[p]imagedump 5000 | items=100 | type=png | ratio=16:9 | dim=1920x1080
----
General Syntax (only include the options you want):
[p]imagedump <n> | items=<m> | before=YYYY-MM-DD | after=YYYY-MM-DD | dim=WidthxHeight | ratio=Width:Height | type=<type_of_item> | channel=<id> | user=<id> - add any one or more of these to the command to furthur specify your requirements to find items.
- items=<m> - when checking the last <n> messages, only download <m> items max.
- before=YYYY-MM-DD - check <n> messages before this date. Ex: before=2017-02-16
- after=YYYY-MM-DD - check <n> messages after this date.
- dim=WidthxHeight - only download items with these dimensions. Ex: dim=1920x1080 Optionally, do dim>=WidthxHeight for images greater than or equal and dim<=WidthxHeight for less than or equal to these dimensions.
- ratio=Width:Height - only download items with these ratios. Ex: ratio=16:9
- type=<type_of_item> - only download these types of files. Ex: type=png or type=gif, webm All options: jpg, png, gif (includes gifv), webm.
- channel=<id> - download from a different channel (can be a from a different server). Enable developer mode, right click on the channel name, and hit copy id to get the id. Ex: channel=299293492645986307
- user=<id> - download only items posted by this user. Enable developer mode, right click on user, copy id to get their id. Ex: user=124910128582361092
"""
if ctx.invoked_subcommand is None:
pre = cmd_prefix_len()
error = 'Invalid syntax. ``>imagedump <n>`` where n is the number of messages to search in this channel. Ex: ``>imagedump 100``\n``>imagedump dir path/to/directory`` if you want to change where images are saved.'
if ctx.message.content[9 + pre:].strip():
finished_dls = os.listdir('cogs/utils/')
finished = []
for i in finished_dls:
if i.startswith('finished'):
finished.append(i)
for i in finished:
os.remove('cogs/utils/{}'.format(i))
if ctx.message.content[pre + 10] == 's':
silent = True
msg = ctx.message.content[11 + pre:].strip()
else:
silent = False
msg = ctx.message.content[9 + pre:].strip()
before = after = limit_images = user = None
type_of_items = ['jpg', 'jpeg', 'png', 'gif', 'gifv', 'webm']
x = y = dimx = dimy = 'None'
fixed = 'no'
before_msg = after_msg = limit_images_msg = type_of_items_msg = dimensions_msg = ratio_msg = channel_msg = user_msg = ''
simple = True
channel = ctx.message.channel
if ' | ' not in msg:
if msg.isdigit():
limit = int(msg) + 1
else:
return await ctx.send(self.bot.bot_prefix + error)
else:
simple = False
msg = msg.split(' | ')
if msg[0].strip().isdigit():
limit = int(msg[0].strip()) + 1
else:
return await ctx.send(self.bot.bot_prefix + error)
for i in msg:
if i.strip().lower().startswith('items='):
limit_images = i.strip()[6:].strip()
if limit_images.isdigit():
limit_images_msg = 'Up to {} items. '.format(limit_images)
limit_images = int(limit_images)
else:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``items=`` should be the number of images. Ex: ``>imagedump 500 | items=10``')
if i.strip().lower().startswith('dim='):
dimensions = i.strip()[4:].strip()
if 'x' not in dimensions:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``dim=`` should be the dimensions of the image in the form WidthxHeight. Ex: ``>imagedump 500 | dim=1920x1080``')
x, y = dimensions.split('x')
if not x.strip().isdigit() or not y.strip().isdigit():
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``dim=`` should be the dimensions of the image in the form WidthxHeight. Ex: ``>imagedump 500 | dim=1920x1080``')
else:
x, y = x.strip(), y.strip()
fixed = 'yes'
dimensions_msg = 'Dimensions: {}. '.format(dimensions)
if i.strip().lower().startswith('dim>='):
dimensions = i.strip()[5:].strip()
if 'x' not in dimensions:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``dim>=`` should be the dimensions of the image in the form WidthxHeight. Ex: ``>imagedump 500 | dim>=1920x1080``')
x, y = dimensions.split('x')
if not x.strip().isdigit() or not y.strip().isdigit():
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``dim>=`` should be the dimensions of the image in the form WidthxHeight. Ex: ``>imagedump 500 | dim>=1920x1080``')
else:
x, y = x.strip(), y.strip()
fixed = 'more'
dimensions_msg = 'Dimensions: {} or larger. '.format(dimensions)
if i.strip().lower().startswith('dim<='):
dimensions = i.strip()[5:].strip()
if 'x' not in dimensions:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``dim<=`` should be the dimensions of the image in the form WidthxHeight. Ex: ``>imagedump 500 | dim<=1920x1080``')
x, y = dimensions.split('x')
if not x.strip().isdigit() or not y.strip().isdigit():
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``dim<=`` should be the dimensions of the image in the form WidthxHeight. Ex: ``>imagedump 500 | dim<=1920x1080``')
else:
x, y = x.strip(), y.strip()
fixed = 'less'
dimensions_msg = 'Dimensions: {} or smaller. '.format(dimensions)
if i.strip().lower().startswith('ratio='):
ratio = i.strip()[6:].strip()
if ':' not in ratio:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``ratio=`` should be the ratio of the image in the form w:h. Ex: ``>imagedump 500 | ratio=16:9``')
dimx, dimy = ratio.split(':')
if not dimx.strip().isdigit() or not dimy.strip().isdigit():
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``ratio=`` should be the ratio of the image in the form w:h. Ex: ``>imagedump 500 | ratio=16:9``')
else:
dimx, dimy = dimx.strip(), dimy.strip()
ratio_msg = 'Ratio: {}.'.format(ratio)
if i.strip().lower().startswith('before='):
try:
date = i.strip()[7:].strip()
before = datetime.strptime(date, '%Y-%m-%d')
before_msg = 'Before: {} '.format(date)
except:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``before=`` should be a date in the format YYYY-MM-DD. Ex: ``>imagedump 500 | before=2017-02-15``')
if i.strip().lower().startswith('after='):
try:
date = i.strip()[6:].strip()
after = datetime.strptime(date, '%Y-%m-%d')
after_msg = 'After: {} '.format(date)
except:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``after=`` should be a date in the format YYYY-MM-DD. Ex: ``>imagedump 500 | after=2017-02-15``')
if i.strip().lower().startswith('type='):
type = i.strip()[5:].strip()
if ',' in type:
type_of_items = type.split(',')
else:
type_of_items = [type]
for i in type_of_items:
if 'png' in i or 'jpg' in i or 'gif' in i or 'webm' in i:
pass
else:
return await ctx.send(self.bot.bot_prefix + 'Invalid Syntax. ``type=`` should be tye type(s) of items to download. Ex: ``>imagedump 500 | type=png`` or ``>imagedump 500 | type=png, gif``')
if 'jpg' in type_of_items or '.jpg' in type_of_items:
type_of_items.append('.jpeg')
type_of_items_msg = 'Types: {} '.format(type)
if i.strip().lower().startswith('channel='):
channel = i.strip()[8:].strip()
channel = self.bot.get_channel(int(channel))
if not channel:
return await ctx.send(self.bot.bot_prefix + 'Channel not found. Are you using the right syntax? ``channel=`` should be the channel id. '
'Ex: ``>imagedump 500 | channel=299431230984683520``')
limit -= 1
channel_msg = 'Channel: {} '.format(channel.name)
if i.strip().lower().startswith('user='):
user_id = i.strip()[5:].strip()
for j in self.bot.guilds:
user = j.get_member(int(user_id))
if user:
break
if not user:
return await ctx.send(self.bot.bot_prefix + 'User not found. Are you using the right syntax? ``user=`` should be the user\'s id. '
'Ex: ``>imagedump 500 | user=124910128582361092``')
user_msg = 'User: {}'.format(user.name)
await ctx.message.delete()
with open('settings/optional_config.json', 'r+') as fp:
opt = json.load(fp)
if 'image_dump_delay' not in opt:
opt['image_dump_delay'] = "0"
fp.seek(0)
fp.truncate()
json.dump(opt, fp, indent=4)
if 'image_dump_location' not in opt:
path = ''
else:
path = opt['image_dump_location']
if not os.path.exists('{}image_dump'.format(path)):
os.makedirs('{}image_dump'.format(path))
try:
new_dump = time.strftime("%Y-%m-%dT%H_%M_%S_") + channel.name + '_' + channel.guild.name
except:
new_dump = time.strftime("%Y-%m-%dT%H_%M_%S_")
new_dump = "".join([x if x.isalnum() else "_" for x in new_dump])
new_dump.replace('/', '_')
os.makedirs('{}image_dump/{}'.format(path, new_dump))
if not silent:
which_channel = 'in this channel...'
if ctx.message.channel != channel:
which_channel = 'in channel ``{}``'.format(channel.name)
if not simple:
params = 'Parameters: ``{}{}{}{}{}{}{}{}``'.format(limit_images_msg, before_msg, after_msg, dimensions_msg, ratio_msg, type_of_items_msg, channel_msg, user_msg)
else:
params = ''
await ctx.send(self.bot.bot_prefix + 'Downloading all images/gifs/webms from the last {} messages {}\nSaving to ``image_dump/{}`` Check console for progress.\n{}'.format(str(limit-1), which_channel, new_dump, params))
start = time.time()
images = []
if limit > 100000:
print('Fetching last %s messages (this may take a few minutes)...' % str(limit - 1))
else:
print('Fetching last %s messages...' % str(limit-1))
async for message in channel.history(limit=limit, before=before, after=after):
if message.author == user or not user:
for url in self.check_images(message, images, type_of_items):
if url:
images.append(url)
if len(images) == limit_images:
break
with open('cogs/utils/urls{}.txt'.format(new_dump), 'w') as fp:
for url in images:
fp.write(url + '\n')
args = [sys.executable, 'cogs/utils/image_dump.py', path, new_dump, opt['image_dump_delay'], x, y, dimx, dimy, fixed]
p = subprocess.Popen(args)
self.bot.imagedumps.append(p)
while p.poll() is None:
await asyncio.sleep(1)
if os.path.exists('cogs/utils/paused{}.txt'.format(new_dump)):
return
try:
with open('cogs/utils/finished{}.txt'.format(new_dump), 'r') as fp:
stop = float(fp.readline())
total = fp.readline()
failures = fp.readline()
size = fp.readline()
except:
return print('Something went wrong when saving items and the download was stopped. Error posted above.')
try:
os.remove('cogs/utils/finished{}.txt'.format(new_dump))
except:
pass
if int(failures) != 0:
if not silent:
await ctx.send(self.bot.bot_prefix + 'Done! ``{}`` items downloaded. ``{}`` However, ``{}`` items failed to download. Check your console for more info on which ones were missed. '
'Finished in: ``{} seconds.``'.format(str(total), size, str(failures), str(round(stop - start, 2))))
else:
print('{} items failed to download. See above for missed links. '
'Finished in: {} seconds.'.format(str(failures), str(round(stop - start, 2))))
else:
if not silent:
await ctx.send(self.bot.bot_prefix + 'Done! ``{}`` items downloaded. ``{}`` Finished in: ``{} seconds.``'.format(str(total), size, str(round(stop-start, 2))))
else:
print('Finished in: {} seconds'.format(str(round(stop-start, 2))))
else:
await ctx.send(self.bot.bot_prefix + 'Invalid syntax. ``>imagedump <n>`` where n is the number of messages to search in this channel. '
'Ex: ``>imagedump 100``\n``>imagedump dir path/to/directory`` if you want to change where images are saved.')
@imagedump.command(pass_context=True)
async def dir(self, ctx, *, msg: str = None):
"""Set directory to save to. Ex: [p]imagedump dir C:/Users/Bill/Desktop"""
if msg:
msg = msg.strip() if msg.strip().endswith('/') else msg.strip() + '/'
if os.path.exists(msg):
if not os.path.exists('{}image_dump'.format(msg)):
os.makedirs('{}image_dump'.format(msg))
with open('settings/optional_config.json', 'r+') as fp:
opt = json.load(fp)
opt['image_dump_location'] = msg
fp.seek(0)
fp.truncate()
json.dump(opt, fp, indent=4)
await ctx.send(self.bot.bot_prefix + 'Successfully set path. Images will be saved to: ``{}image_dump/``'.format(msg))
else:
await ctx.send(self.bot.bot_prefix + 'Invalid path. Try again. Example: ``>imagedump dir C:/Users/Bill/Desktop``')
else:
with open('settings/optional_config.json', 'r') as fp:
opt = json.load(fp)
if 'image_dump_location' not in opt:
path = os.path.abspath("settings")[:-8] + 'image_dump'
else:
path = opt['image_dump_location'] + 'image_dump'
await ctx.send(self.bot.bot_prefix + 'Current imagedump download location: ``{}``'.format(path.replace('\\', '/')))
@imagedump.command(pass_context=True, aliases=['stop'])
async def cancel(self, ctx):
"""Cancel ongoing imagedump downloads."""
for i in self.bot.imagedumps:
i.kill()
self.bot.imagedumps = []
if os.path.exists('pause.txt'):
os.remove('pause.txt')
paused_dls = os.listdir('cogs/utils/')
for i in paused_dls:
if i.startswith('paused') or i.startswith('urls'):
os.remove('cogs/utils/{}'.format(i))
await ctx.send(self.bot.bot_prefix + 'Cancelled all imagedump processes currently running.')
print('\nImagedump forcibily cancelled.')
@imagedump.command(pass_context=True)
async def pause(self, ctx):
"""Pause ongoing imagedump downloads."""
for i in self.bot.imagedumps:
if i.poll() is not None:
pass
else:
open('pause.txt', 'a').close()
self.bot.imagedumps = []
return await ctx.send(self.bot.bot_prefix + 'Paused download. ``>imagedump resume`` to resume. Imagedumps can be resumed even after a restart.')
return await ctx.send(self.bot.bot_prefix + 'No imagedumps processes are running currently.')
@imagedump.command(pass_context=True, aliases=['unpause'])
async def resume(self, ctx):
"""Resume paused imagedump downloads. (you can resume even after restart)"""
if os.path.exists('pause.txt'):
os.remove('pause.txt')
paused_dls = os.listdir('cogs/utils/')
proc = []
for i in paused_dls:
if i.startswith('paused'):
proc.append(i)
for i in proc:
with open('cogs/utils/{}'.format(i), 'r') as fp:
fp.readline()
path = fp.readline().strip()
new_dump = fp.readline().strip()
delay = fp.readline().strip()
x = fp.readline().strip()
y = fp.readline().strip()
dimx = fp.readline().strip()
dimy = fp.readline().strip()
fixed = fp.readline().strip()
os.remove('cogs/utils/{}'.format(i))
args = [sys.executable, 'cogs/utils/image_dump.py', path, new_dump, delay, x, y, dimx, dimy, fixed]
print('\nResuming...')
p = subprocess.Popen(args)
self.bot.imagedumps.append(p)
await ctx.send(self.bot.bot_prefix + 'Resumed imagedump. Check console for progress.')
else:
await ctx.send(self.bot.bot_prefix + 'No imagedump processes are paused.')
def setup(bot):
bot.add_cog(Imagedump(bot))
| gpl-3.0 |
3DGenomes/tadbit | _pytadbit/utils/hmm.py | 1 | 6952 | from numpy import log, pi as pi_num, exp
import sys
def best_path(probs, pi, T):
"""
Viterbi algorithm with backpointers
"""
n = len(T)
m = len(probs[0])
log_V = [[0. for _ in xrange(m)] for _ in xrange(n)]
backpt = [[0 for _ in xrange(m)] for _ in xrange(n)]
states = [0 for _ in xrange(m)]
log_pi = [float('-inf') if pi[i] < 0. else log(pi[i]) for i in xrange(len(pi))]
log_T = [float('-inf') if T [i] < 0. else log(T[i]) for i in xrange(len(T))]
log_probs = [[float('-inf') if probs[i][j] < 0. else log(probs[i][j])
for j in xrange(m)]
for i in xrange(n)]
for i in xrange(n):
log_V[i][0] = log_probs[i][0] + log_pi[i]
for k in xrange(1, m):
for stat in xrange(n):
log_V[stat][k] = float('-inf')
for prev in xrange(n):
# original state prob times transition prob
prob = log_V[prev][k-1] + log_T[prev][stat]
if prob > log_V[stat][k]:
log_V[stat][k] = prob
backpt[stat][k-1] = prev
log_V[stat][k] += log_probs[stat][k]
# get the likelihood of the most probable path
prob = log_V[0][-1]
for i in xrange(1, n):
if log_V[i][-1] > prob:
prob = log_V[i][-1]
states[-1] = i
# Follow the backtrack: get the path which maximize the path prob.
for i in xrange(m - 2, -1, -1):
states[i] = backpt[states[i + 1]][i]
return states, prob
def baum_welch_optimization(xh, T, E, new_pi, new_T, corrector,
new_E, etas, gammas):
"""
implementation of the baum-welch algorithm
"""
n = len(T)
m = len(gammas[0])
for i in xrange(n):
for j in xrange(n):
new_pi[i] += etas[i][j][0]
for i in xrange(n):
for j in xrange(n):
new_T[i][j] += sum(etas[i][j][k] for k in xrange(m - 1))
for k in xrange(m):
for i in xrange(n):
gik = gammas[i][k]
corrector[i] += gik
new_E[i][0] += gik * xh[k]
new_E[i][1] += gik * (xh[k] - E[i][0])**2
def update_parameters(corrector, pi, new_pi, T, new_T, E, new_E):
"""
final round of the baum-welch
"""
### update initial probabilities
n = len(T)
total = 0.
delta = 0.
for i in xrange(n):
total += new_pi[i]
for i in xrange(n):
new_pi[i] /= total
delta = max(delta, abs(new_pi[i] - pi[i]))
pi[i] = new_pi[i]
### update transitions
for i in xrange(n):
total = 0.
for j in xrange(n):
total += new_T[i][j]
for j in xrange(n):
new_T[i][j] /= total
delta = max(delta, abs(new_T[i][j] - T[i][j]))
T[i][j] = new_T[i][j]
### update emissions
for i in xrange(n):
# update the means
if corrector[i] > 0.:
new_E[i][0] /= corrector[i]
new_E[i][1] /= corrector[i]
# corrector[i] = 1.
delta = max(delta, abs(new_E[i][0] - E[i][0]))
E[i][0] = new_E[i][0]
# update the stdevs
delta = max(delta, abs(new_E[i][1] - E[i][1]))
E[i][1] = new_E[i][1]
return delta
def train(pi, T, E, observations, verbose=False, threshold=1e-6, n_iter=1000):
delta = float('inf')
for it in xrange(n_iter):
# reset for new iteration
new_pi = [0. for _ in pi]
new_T = [[0. for _ in i] for i in T]
new_E = [[0. for _ in i] for i in E]
corrector = [0. for _ in xrange(len(T))]
for h in xrange(len(observations)):
probs = gaussian_prob(observations[h], E)
alphas, scalars = get_alpha(probs, pi, T)
betas = get_beta(probs, T, scalars)
etas = get_eta(probs, T, alphas, betas)
gammas = get_gamma(T, alphas, betas)
baum_welch_optimization(observations[h], T, E, new_pi, new_T, corrector,
new_E, etas, gammas)
delta = update_parameters(corrector, pi, new_pi, T, new_T, E, new_E)
if verbose:
print ("\rTraining: %03i/%04i (diff: %.8f)") % (it, n_iter, delta),
sys.stdout.flush()
if delta <= threshold:
break
if verbose:
print "\n"
def get_eta(probs, T, alphas, betas):
"""
for Baum-Welch: probability of being in states i and j at times t and t+1
"""
n = len(T)
m = len(probs[0])
etas = [[[0. for _ in xrange(m - 1)] for _ in xrange(n)] for _ in xrange(n)]
for k in xrange(m - 1):
tot = 0.
for i in xrange(n):
for j in xrange(n):
etas[i][j][k] += alphas[i][k] * T[i][j] * probs[j][k+1] * betas[j][k+1]
tot += etas[i][j][k]
for i in xrange(n):
for j in xrange(n):
etas[i][j][k] /= tot
return etas
def get_gamma(T, alphas, betas):
"""
for Baum-Welch: probability of being in state i at time t
"""
n = len(T)
m = len(alphas[0])
return [[alphas[i][k] * betas[i][k] for k in xrange(m)] for i in xrange(n)]
def gaussian_prob(x, E):
"""
of x to follow the gaussian with given E
https://en.wikipedia.org/wiki/Normal_distribution
"""
probs = []
for d, (mu, sd) in enumerate(E):
pi2sd = (2. * pi_num * sd)**-0.5
inv2sd = 1. / (2. * sd)
probs.append([])
for obs in x:
p = pi2sd * exp(-(obs - mu)**2 * inv2sd)
probs[d].append(p)
return probs
def get_alpha(probs, pi, T):
"""
computes alphas using forward algorithm
"""
n = len(T)
m = len(probs[0])
alphas = [[0. for _ in xrange(m)] for _ in xrange(n)]
scalars = [0. for _ in xrange(m)]
# initialize alpha for each state
for i in xrange(n):
alphas[i][0] = pi[i] * probs[i][0]
scalars[0] += alphas[i][0]
for i in xrange(n):
alphas[i][0] /= scalars[0]
for k in xrange(1, m):
for i in xrange(n):
for j in xrange(n):
# all transition probabilities to become "i" times previous alpha
alphas[i][k] += alphas[j][k-1] * T[j][i]
# times probablity to belong to this states
alphas[i][k] *= probs[i][k]
scalars[k] += alphas[i][k]
for i in xrange(n):
alphas[i][k] /= scalars[k]
return alphas, scalars
def get_beta(probs, T, scalars):
"""
computes betas using backward algorithm
"""
n = len(T)
m = len(probs[0])
# intialize beta at 1.0
betas = [[0. for _ in xrange(m - 1)] + [1.0] for _ in xrange(n)]
for k in xrange(-2, -m - 1, -1):
for i in xrange(n):
for j in xrange(n):
betas[i][k] += betas[j][k + 1] * probs[j][k + 1] * T[i][j]
betas[i][k] /= scalars[k + 1]
return betas
| gpl-3.0 |
LoHChina/nova | nova/tests/unit/conductor/tasks/test_live_migrate.py | 11 | 21241 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor.tasks import live_migrate
from nova import exception
from nova import objects
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_instance
from nova import utils
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = "context"
self.instance_host = "host"
self.instance_uuid = "uuid"
self.instance_image = "image_ref"
db_instance = fake_instance.fake_db_instance(
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'}
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self.migration = objects.Migration()
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(),
servicegroup.API(), scheduler_client.SchedulerClient())
def test_execute_with_destination(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_check_requested_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._check_requested_destination()
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migration=self.migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
self.assertEqual("bob", self.task.execute())
def test_execute_without_destination(self):
self.destination = None
self._generate_task()
self.assertIsNone(self.task.destination)
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task, '_find_destination')
self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
self.task._check_host_is_up(self.instance_host)
self.task._find_destination().AndReturn("found_host")
self.task.compute_rpcapi.live_migration(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migration=self.migration,
migrate_data=None).AndReturn("bob")
self.mox.ReplayAll()
with mock.patch.object(self.migration, 'save') as mock_save:
self.assertEqual("bob", self.task.execute())
self.assertTrue(mock_save.called)
self.assertEqual('found_host', self.migration.dest_compute)
def test_check_instance_is_active_passes_when_paused(self):
self.task.instance['power_state'] = power_state.PAUSED
self.task._check_instance_is_active()
def test_check_instance_is_active_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceInvalidState,
self.task._check_instance_is_active)
def test_check_instance_host_is_up(self):
self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
objects.Service.get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
self.mox.ReplayAll()
self.task._check_host_is_up("host")
def test_check_instance_host_is_up_fails_if_not_up(self):
self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
objects.Service.get_by_compute_host(self.context,
"host").AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_instance_host_is_up_fails_if_not_found(self):
self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host')
objects.Service.get_by_compute_host(
self.context, "host").AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
def test_check_requested_destination(self):
self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
self.mox.StubOutWithMock(self.task.compute_rpcapi,
'check_can_live_migrate_destination')
objects.Service.get_by_compute_host(
self.context, self.destination).AndReturn("service")
self.task.servicegroup_api.service_is_up("service").AndReturn(True)
hypervisor_details = {
"hypervisor_type": "a",
"hypervisor_version": 6.1,
"free_ram_mb": 513
}
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.instance_host)\
.AndReturn(hypervisor_details)
self.task._get_compute_info(self.destination)\
.AndReturn(hypervisor_details)
self.task.compute_rpcapi.check_can_live_migrate_destination(
self.context, self.instance, self.destination,
self.block_migration, self.disk_over_commit).AndReturn(
"migrate_data")
self.mox.ReplayAll()
self.task._check_requested_destination()
self.assertEqual("migrate_data", self.task.migrate_data)
def test_check_requested_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_requested_destination)
def test_check_requested_destination_fails_when_destination_is_up(self):
self.mox.StubOutWithMock(objects.Service, 'get_by_compute_host')
objects.Service.get_by_compute_host(
self.context, self.destination).AndRaise(exception.NotFound)
self.mox.ReplayAll()
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_not_enough_memory(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(objects.ComputeNode,
'get_first_node_by_host_for_old_compat')
self.task._check_host_is_up(self.destination)
objects.ComputeNode.get_first_node_by_host_for_old_compat(self.context,
self.destination).AndReturn({"free_ram_mb": 511})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_diff(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "b"
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a"
})
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_requested_destination)
def test_check_requested_destination_fails_with_hypervisor_too_old(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
self.mox.StubOutWithMock(self.task,
'_check_destination_has_enough_memory')
self.mox.StubOutWithMock(self.task, '_get_compute_info')
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 7
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 6
})
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_requested_destination)
def test_find_destination_works(self):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
scheduler_utils.build_request_spec(
self.context,
{'properties': {'hw_disk_bus': 'scsi'}},
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
def test_find_destination_retry_with_invalid_livem_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_with_failed_migration_pre_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.MigrationPreCheckError("reason"))
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
self.mox.ReplayAll()
self.assertEqual("host2", self.task._find_destination())
def test_find_destination_retry_exceeds_max(self):
self.flags(migrate_max_retries=0)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
self.mox.ReplayAll()
self.assertRaises(exception.MaxRetriesExceeded,
self.task._find_destination)
def test_find_destination_when_runs_out_of_hosts(self):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({})
scheduler_utils.setup_instance_group(
self.context, {}, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
| apache-2.0 |
stevenmizuno/QGIS | python/plugins/processing/algs/qgis/TextToFloat.py | 4 | 3145 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TextToFloat.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsField,
QgsProcessing,
QgsProcessingParameterField)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class TextToFloat(QgisFeatureBasedAlgorithm):
FIELD = 'FIELD'
def group(self):
return self.tr('Vector table')
def groupId(self):
return 'vectortable'
def __init__(self):
super().__init__()
self.field_name = None
self.field_idx = -1
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Text attribute to convert to float'),
parentLayerParameterName='INPUT',
type=QgsProcessingParameterField.String
))
def name(self):
return 'texttofloat'
def displayName(self):
return self.tr('Text to float')
def outputName(self):
return self.tr('Float from text')
def inputLayerTypes(self):
return [QgsProcessing.TypeVector]
def outputFields(self, inputFields):
self.field_idx = inputFields.lookupField(self.field_name)
if self.field_idx >= 0:
inputFields[self.field_idx] = QgsField(self.field_name, QVariant.Double, '', 24, 15)
return inputFields
def prepareAlgorithm(self, parameters, context, feedback):
self.field_name = self.parameterAsString(parameters, self.FIELD, context)
return True
def processFeature(self, feature, context, feedback):
value = feature[self.field_idx]
try:
if '%' in value:
feature[self.field_idx] = float(value.replace('%', '')) / 100.0
else:
feature[self.field_idx] = float(value)
except:
feature[self.field_idx] = None
return [feature]
| gpl-2.0 |
AppleDash/BuhIRC | modules/urltitle.py | 2 | 2187 | # This file is part of BuhIRC.
#
# BuhIRC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BuhIRC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the# GNU General Public License
# along with BuhIRC. If not, see <http://www.gnu.org/licenses/>.
import re
import logging
import threading
from bs4 import BeautifulSoup
from modules import Module
class TitleModule(Module):
name = "URLTitle"
description = "Automatically gets titles of URLs posted in channels."
def module_init(self, bot):
self.hook_numeric("PRIVMSG", self.on_privmsg)
def on_privmsg(self, bot, ln):
sender = ln.hostmask.nick
message = ln.params[-1]
reply_to = sender
if ln.params[0][0] == "#":
reply_to = ln.params[0]
match = re.match(".*(http(s)?://[^ ]+).*", message)
if match:
url = match.group(1)
t = TitleFetchThread(url, lambda resp: bot.privmsg(reply_to, resp), self)
t.start()
class TitleFetchThread(threading.Thread):
def __init__(self, url, reply_func, module):
super(TitleFetchThread, self).__init__()
self.url = url
self.reply_func = reply_func
self.module = module
def run(self):
try:
res = self.module.bot.http_get(self.url, stream=True, timeout=5.0)
data = next(res.iter_content(4096))
except Exception as e:
logging.error("urltitle: Error fetching title for URL '%s': %s" % (self.url, str(e)))
return
soup = BeautifulSoup(data, "lxml")
if hasattr(soup, "title") and soup.title is not None:
safe_title = soup.title.text.strip().replace("\r", "").replace("\n", "")[:128]
self.reply_func("[ %s ] - %s" % (safe_title, self.url)) | gpl-3.0 |
hosseinmh/Django_learning | djmod/.venv/lib/python3.5/site-packages/pip/_vendor/webencodings/mklabels.py | 512 | 1305 | """
webencodings.mklabels
~~~~~~~~~~~~~~~~~~~~~
Regenarate the webencodings.labels module.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
import json
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
def assert_lower(string):
assert string == string.lower()
return string
def generate(url):
parts = ['''\
"""
webencodings.labels
~~~~~~~~~~~~~~~~~~~
Map encoding labels to their name.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
# XXX Do not edit!
# This file is automatically generated by mklabels.py
LABELS = {
''']
labels = [
(repr(assert_lower(label)).lstrip('u'),
repr(encoding['name']).lstrip('u'))
for category in json.loads(urlopen(url).read().decode('ascii'))
for encoding in category['encodings']
for label in encoding['labels']]
max_len = max(len(label) for label, name in labels)
parts.extend(
' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
for label, name in labels)
parts.append('}')
return ''.join(parts)
if __name__ == '__main__':
print(generate('http://encoding.spec.whatwg.org/encodings.json'))
| mit |
gonzafirewall/kivy | kivy/tools/stub-gl-debug.py | 75 | 13278 | from __future__ import print_function
a = '''cdef void glActiveTexture (cgl.GLenum texture)
cdef void glAttachShader (cgl.GLuint program, cgl.GLuint shader)
cdef void glBindAttribLocation (cgl.GLuint program, cgl.GLuint index, cgl.GLchar* name)
cdef void glBindBuffer (cgl.GLenum target, cgl.GLuint buffer)
cdef void glBindFramebuffer (cgl.GLenum target, cgl.GLuint framebuffer)
cdef void glBindRenderbuffer (cgl.GLenum target, cgl.GLuint renderbuffer)
cdef void glBindTexture (cgl.GLenum target, cgl.GLuint texture)
cdef void glBlendColor (cgl.GLclampf red, cgl.GLclampf green, cgl.GLclampf blue, cgl.GLclampf alpha)
cdef void glBlendEquation (cgl.GLenum mode)
cdef void glBlendEquationSeparate (cgl.GLenum modeRGB, cgl.GLenum modeAlpha)
cdef void glBlendFunc (cgl.GLenum sfactor, cgl.GLenum dfactor)
cdef void glBlendFuncSeparate (cgl.GLenum srcRGB, cgl.GLenum dstRGB, cgl.GLenum srcAlpha, cgl.GLenum dstAlpha)
cdef void glBufferData (cgl.GLenum target, cgl.GLsizeiptr size, cgl.GLvoid* data, cgl.GLenum usage)
cdef void glBufferSubData (cgl.GLenum target, cgl.GLintptr offset, cgl.GLsizeiptr size, cgl.GLvoid* data)
cdef cgl.GLenum glCheckFramebufferStatus (cgl.GLenum target)
cdef void glClear (cgl.GLbitfield mask)
cdef void glClearColor (cgl.GLclampf red, cgl.GLclampf green, cgl.GLclampf blue, cgl.GLclampf alpha)
cdef void glClearDepthf (cgl.GLclampf depth)
cdef void glClearStencil (cgl.GLint s)
cdef void glColorMask (cgl.GLboolean red, cgl.GLboolean green, cgl.GLboolean blue, cgl.GLboolean alpha)
cdef void glCompileShader (cgl.GLuint shader)
cdef void glCompressedTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLenum internalformat, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border, cgl.GLsizei imageSize, cgl.GLvoid* data)
cdef void glCompressedTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLsizei imageSize, cgl.GLvoid* data)
cdef void glCopyTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLenum internalformat, cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border)
cdef void glCopyTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)
cdef cgl.GLuint glCreateProgram ()
cdef cgl.GLuint glCreateShader (cgl.GLenum type)
cdef void glCullFace (cgl.GLenum mode)
cdef void glDeleteBuffers (cgl.GLsizei n, cgl.GLuint* buffers)
cdef void glDeleteFramebuffers (cgl.GLsizei n, cgl.GLuint* framebuffers)
cdef void glDeleteProgram (cgl.GLuint program)
cdef void glDeleteRenderbuffers (cgl.GLsizei n, cgl.GLuint* renderbuffers)
cdef void glDeleteShader (cgl.GLuint shader)
cdef void glDeleteTextures (cgl.GLsizei n, cgl.GLuint* textures)
cdef void glDepthFunc (cgl.GLenum func)
cdef void glDepthMask (cgl.GLboolean flag)
cdef void glDepthRangef (cgl.GLclampf zNear, cgl.GLclampf zFar)
cdef void glDetachShader (cgl.GLuint program, cgl.GLuint shader)
cdef void glDisable (cgl.GLenum cap)
cdef void glDisableVertexAttribArray (cgl.GLuint index)
cdef void glDrawArrays (cgl.GLenum mode, cgl.GLint first, cgl.GLsizei count)
cdef void glDrawElements (cgl.GLenum mode, cgl.GLsizei count, cgl.GLenum type, cgl.GLvoid* indices)
cdef void glEnable (cgl.GLenum cap)
cdef void glEnableVertexAttribArray (cgl.GLuint index)
cdef void glFinish ()
cdef void glFlush ()
cdef void glFramebufferRenderbuffer (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum renderbuffertarget, cgl.GLuint renderbuffer)
cdef void glFramebufferTexture2D (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum textarget, cgl.GLuint texture, cgl.GLint level)
cdef void glFrontFace (cgl.GLenum mode)
cdef void glGenBuffers (cgl.GLsizei n, cgl.GLuint* buffers)
cdef void glGenerateMipmap (cgl.GLenum target)
cdef void glGenFramebuffers (cgl.GLsizei n, cgl.GLuint* framebuffers)
cdef void glGenRenderbuffers (cgl.GLsizei n, cgl.GLuint* renderbuffers)
cdef void glGenTextures (cgl.GLsizei n, cgl.GLuint* textures)
cdef void glGetActiveAttrib (cgl.GLuint program, cgl.GLuint index, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLint* size, cgl.GLenum* type, cgl.GLchar* name)
cdef void glGetActiveUniform (cgl.GLuint program, cgl.GLuint index, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLint* size, cgl.GLenum* type, cgl.GLchar* name)
cdef void glGetAttachedShaders (cgl.GLuint program, cgl.GLsizei maxcount, cgl.GLsizei* count, cgl.GLuint* shaders)
cdef int glGetAttribLocation (cgl.GLuint program, cgl.GLchar* name)
cdef void glGetBooleanv (cgl.GLenum pname, cgl.GLboolean* params)
cdef void glGetBufferParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef cgl.GLenum glGetError ()
cdef void glGetFloatv (cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetFramebufferAttachmentParameteriv (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetIntegerv (cgl.GLenum pname, cgl.GLint* params)
cdef void glGetProgramiv (cgl.GLuint program, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetProgramInfoLog (cgl.GLuint program, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* infolog)
cdef void glGetRenderbufferParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetShaderiv (cgl.GLuint shader, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetShaderInfoLog (cgl.GLuint shader, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* infolog)
#cdef void glGetShaderPrecisionFormat (cgl.GLenum shadertype, cgl.GLenum precisiontype, cgl.GLint* range, cgl.GLint* precision)
cdef void glGetShaderSource (cgl.GLuint shader, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* source)
cdef cgl.GLubyte* glGetString (cgl.GLenum name)
cdef void glGetTexParameterfv (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetTexParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetUniformfv (cgl.GLuint program, cgl.GLint location, cgl.GLfloat* params)
cdef void glGetUniformiv (cgl.GLuint program, cgl.GLint location, cgl.GLint* params)
cdef int glGetUniformLocation (cgl.GLuint program, cgl.GLchar* name)
cdef void glGetVertexAttribfv (cgl.GLuint index, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetVertexAttribiv (cgl.GLuint index, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetVertexAttribPointerv (cgl.GLuint index, cgl.GLenum pname, cgl.GLvoid** pointer)
cdef void glHint (cgl.GLenum target, cgl.GLenum mode)
cdef cgl.GLboolean glIsBuffer (cgl.GLuint buffer)
cdef cgl.GLboolean glIsEnabled (cgl.GLenum cap)
cdef cgl.GLboolean glIsFramebuffer (cgl.GLuint framebuffer)
cdef cgl.GLboolean glIsProgram (cgl.GLuint program)
cdef cgl.GLboolean glIsRenderbuffer (cgl.GLuint renderbuffer)
cdef cgl.GLboolean glIsShader (cgl.GLuint shader)
cdef cgl.GLboolean glIsTexture (cgl.GLuint texture)
cdef void glLineWidth (cgl.GLfloat width)
cdef void glLinkProgram (cgl.GLuint program)
cdef void glPixelStorei (cgl.GLenum pname, cgl.GLint param)
cdef void glPolygonOffset (cgl.GLfloat factor, cgl.GLfloat units)
cdef void glReadPixels (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
#cdef void glReleaseShaderCompiler ()
cdef void glRenderbufferStorage (cgl.GLenum target, cgl.GLenum internalformat, cgl.GLsizei width, cgl.GLsizei height)
cdef void glSampleCoverage (cgl.GLclampf value, cgl.GLboolean invert)
cdef void glScissor (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)
#cdef void glShaderBinary (cgl.GLsizei n, cgl.GLuint* shaders, cgl.GLenum binaryformat, cgl.GLvoid* binary, cgl.GLsizei length)
cdef void glShaderSource (cgl.GLuint shader, cgl.GLsizei count, cgl.GLchar** string, cgl.GLint* length)
cdef void glStencilFunc (cgl.GLenum func, cgl.GLint ref, cgl.GLuint mask)
cdef void glStencilFuncSeparate (cgl.GLenum face, cgl.GLenum func, cgl.GLint ref, cgl.GLuint mask)
cdef void glStencilMask (cgl.GLuint mask)
cdef void glStencilMaskSeparate (cgl.GLenum face, cgl.GLuint mask)
cdef void glStencilOp (cgl.GLenum fail, cgl.GLenum zfail, cgl.GLenum zpass)
cdef void glStencilOpSeparate (cgl.GLenum face, cgl.GLenum fail, cgl.GLenum zfail, cgl.GLenum zpass)
cdef void glTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint internalformat, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
cdef void glTexParameterf (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat param)
cdef void glTexParameterfv (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glTexParameteri (cgl.GLenum target, cgl.GLenum pname, cgl.GLint param)
cdef void glTexParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
cdef void glUniform1f (cgl.GLint location, cgl.GLfloat x)
cdef void glUniform1fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform1i (cgl.GLint location, cgl.GLint x)
cdef void glUniform1iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform2f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y)
cdef void glUniform2fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform2i (cgl.GLint location, cgl.GLint x, cgl.GLint y)
cdef void glUniform2iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform3f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z)
cdef void glUniform3fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform3i (cgl.GLint location, cgl.GLint x, cgl.GLint y, cgl.GLint z)
cdef void glUniform3iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform4f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z, cgl.GLfloat w)
cdef void glUniform4fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform4i (cgl.GLint location, cgl.GLint x, cgl.GLint y, cgl.GLint z, cgl.GLint w)
cdef void glUniform4iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniformMatrix2fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUniformMatrix3fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUniformMatrix4fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUseProgram (cgl.GLuint program)
cdef void glValidateProgram (cgl.GLuint program)
cdef void glVertexAttrib1f (cgl.GLuint indx, cgl.GLfloat x)
cdef void glVertexAttrib1fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib2f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y)
cdef void glVertexAttrib2fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib3f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z)
cdef void glVertexAttrib3fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib4f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z, cgl.GLfloat w)
cdef void glVertexAttrib4fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttribPointer (cgl.GLuint indx, cgl.GLint size, cgl.GLenum type, cgl.GLboolean normalized, cgl.GLsizei stride, cgl.GLvoid* ptr)
cdef void glViewport (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)'''
def replace(s):
item = s.split(' ')
rettype = item[1]
item = item[2:]
for x in item:
x = x.strip()
if not x or x.startswith('GL'):
continue
if x.startswith('(GL'):
yield '('
continue
if x.startswith('gl'):
prefix = ''
if rettype != 'void':
prefix = 'return '
yield '%scgl.%s' % (prefix, x)
continue
yield x
print('''
# This file was automatically generated with kivy/tools/stub-gl-debug.py
cimport c_opengl as cgl
''')
lines = a.splitlines()
for x in lines:
if x.startswith('#'):
# There are some functions that either do not exist or break on OSX.
# Just skip those.
print('# Skipping generation of: "%s"' % x)
continue
x = x.replace('cgl.', '')
y = ' '.join(replace(x))
print('%s with gil:' % x)
s = x.split()
print(' print "GL %s(' % s[2], end=' ')
pointer = 0
for arg in s[3:]:
arg = arg.strip()
arg = arg.replace(',', '').replace(')', '')
if 'GL' in arg or arg == '(':
pointer = arg.count('*')
continue
pointer = '*' * pointer
if pointer:
print('%s%s=", repr(hex(<long> %s)), ",' % (arg, pointer, arg), end=' ')
else:
print('%s = ", %s, ",' % (arg, arg), end=' ')
pointer = 0
print(')"')
print(' %s' % y)
print(' ret = glGetError()')
print(' if ret: print("ERR {} / {}".format(ret, ret))')
| mit |
habeanf/Open-Knesset | mks/admin.py | 7 | 3764 | from django.contrib import admin
from django.contrib.contenttypes import generic
from django.db.models import Q
from models import Member, Membership, MemberAltname
from models import CoalitionMembership, Correlation, Party, \
Award, AwardType, Knesset
from links.models import Link
from video.models import Video
from persons.models import Person
class MembershipInline(admin.TabularInline):
model = Membership
extra = 1
class MemberLinksInline(generic.GenericTabularInline):
model = Link
ct_fk_field = 'object_pk'
extra = 1
class MemberAltnameInline(admin.TabularInline):
model = MemberAltname
extra = 1
class MemberPersonInline(admin.StackedInline):
model = Person
ct_fk_field = "mk"
extra = 0
max_num = 0
fields = ['calendar_url']
can_delete = False
class MemberRelatedVideosInline(generic.GenericTabularInline):
model = Video
ct_fk_field = 'object_pk'
can_delete = False
fields = ['title', 'description', 'embed_link', 'group', 'sticky', 'hide']
ordering = ['group', '-sticky', '-published']
readonly_fields = ['title', 'description', 'embed_link', 'group']
extra = 0
def queryset(self, request):
qs = super(MemberRelatedVideosInline, self).queryset(request)
qs = qs.filter(Q(hide=False) | Q(hide=None))
return qs
class CoalitionMembershipAdmin(admin.ModelAdmin):
list_display = ('party', 'start_date', 'end_date')
admin.site.register(CoalitionMembership, CoalitionMembershipAdmin)
class PartyAdmin(admin.ModelAdmin):
ordering = ('name',)
list_display = ('name', 'knesset', 'start_date', 'end_date',
'is_coalition', 'number_of_members',
'number_of_seats')
list_filter = ('knesset', )
inlines = (MembershipInline,)
admin.site.register(Party, PartyAdmin)
class MemberAdmin(admin.ModelAdmin):
ordering = ('name',)
# fields = ('name','start_date','end_date')
list_display = ('name', 'gender', 'PartiesString', 'current_party',
'is_current', 'current_position')
list_editable = ('is_current', 'current_position')
search_fields = ['name']
inlines = (MembershipInline, MemberLinksInline, MemberAltnameInline, MemberPersonInline,
MemberRelatedVideosInline)
list_filter = ('current_party__knesset', 'gender')
# A template for a very customized change view:
change_form_template = 'admin/simple/change_form_with_extra.html'
def change_view(self, request, object_id, extra_context=None):
m = Member.objects.get(id=object_id)
my_context = {
'extra': {
'hi_corr': m.CorrelationListToString(m.HighestCorrelations()),
'low_corr': m.CorrelationListToString(m.LowestCorrelations()),
}
}
return super(MemberAdmin, self).change_view(request, object_id,
extra_context=my_context)
def queryset(self, request):
return super(MemberAdmin, self).queryset(
request).select_related('current_party')
admin.site.register(Member, MemberAdmin)
class CorrelationAdmin(admin.ModelAdmin):
ordering = ('-normalized_score',)
admin.site.register(Correlation, CorrelationAdmin)
class MembershipAdmin(admin.ModelAdmin):
ordering = ('member__name',)
admin.site.register(Membership, MembershipAdmin)
class AwardTypeAdmin(admin.ModelAdmin):
pass
admin.site.register(AwardType, AwardTypeAdmin)
class AwardAdmin(admin.ModelAdmin):
list_display = ('member', 'award_type', 'date_given')
raw_id_fields = ('member',)
admin.site.register(Award, AwardAdmin)
class KnessetAdmin(admin.ModelAdmin):
pass
admin.site.register(Knesset, KnessetAdmin)
| bsd-3-clause |
hadronproject/HadronWeb | apps/frontend/migrations/0003_auto__add_developer__add_frontendimage__add_field_page_published__add_.py | 1 | 8978 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Developer'
db.create_table('frontend_developer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('lastname', self.gf('django.db.models.fields.CharField')(max_length=128)),
('alias', self.gf('django.db.models.fields.CharField')(max_length=128)),
('occupation', self.gf('django.db.models.fields.CharField')(max_length=512)),
('roles', self.gf('django.db.models.fields.CharField')(max_length=512)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True)),
('interests', self.gf('django.db.models.fields.CharField')(max_length=512)),
('published', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
))
db.send_create_signal('frontend', ['Developer'])
# Adding model 'FrontendImage'
db.create_table('frontend_frontendimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('developer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='images', to=orm['frontend.Developer'])),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
))
db.send_create_signal('frontend', ['FrontendImage'])
# Adding field 'Page.published'
db.add_column('frontend_page', 'published',
self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True),
keep_default=False)
# Adding field 'News.published'
db.add_column('frontend_news', 'published',
self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Developer'
db.delete_table('frontend_developer')
# Deleting model 'FrontendImage'
db.delete_table('frontend_frontendimage')
# Deleting field 'Page.published'
db.delete_column('frontend_page', 'published')
# Deleting field 'News.published'
db.delete_column('frontend_news', 'published')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'frontend.developer': {
'Meta': {'ordering': "['-id']", 'object_name': 'Developer'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'lastname': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'published': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'roles': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'frontend.frontendimage': {
'Meta': {'object_name': 'FrontendImage'},
'developer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['frontend.Developer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'frontend.news': {
'Meta': {'ordering': "['-id']", 'object_name': 'News'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry': ('django.db.models.fields.TextField', [], {}),
'entry_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'frontend.page': {
'Meta': {'ordering': "['-id']", 'object_name': 'Page'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'entry': ('django.db.models.fields.TextField', [], {}),
'entry_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['frontend'] | gpl-3.0 |
maxammann/mpv | waftools/checks/generic.py | 14 | 5938 | import os
import inflector
from waflib.ConfigSet import ConfigSet
from waflib import Utils
__all__ = [
"check_pkg_config", "check_cc", "check_statement", "check_libs",
"check_headers", "compose_checks", "check_true", "any_version",
"load_fragment", "check_stub", "check_ctx_vars", "check_program"]
any_version = None
def even(n):
return n % 2 == 0
def __define_options__(dependency_identifier):
return inflector.define_dict(dependency_identifier)
def __merge_options__(dependency_identifier, *args):
options_accu = inflector.storage_dict(dependency_identifier)
options_accu['mandatory'] = False
[options_accu.update(arg) for arg in args if arg]
return options_accu
def _filter_cc_arguments(ctx, opts):
if ctx.env.DEST_OS != Utils.unversioned_sys_platform():
# cross compiling, remove execute=True if present
if opts.get('execute'):
opts['execute'] = False
return opts
def check_program(name, var):
def fn(ctx, dependency_identifier):
return ctx.find_program(name, var=var, mandatory=False)
return fn
def check_libs(libs, function):
libs = [None] + libs
def fn(ctx, dependency_identifier):
for lib in libs:
kwargs = lib and {'lib': lib} or {}
if function(ctx, dependency_identifier, **kwargs):
return True
return False
return fn
def check_statement(header, statement, **kw_ext):
def fn(ctx, dependency_identifier, **kw):
headers = header
if not isinstance(headers, list):
headers = [header]
hs = "\n".join(["#include <{0}>".format(h) for h in headers])
fragment = ("{0}\n"
"int main(int argc, char **argv)\n"
"{{ {1}; return 0; }}").format(hs, statement)
opts = __merge_options__(dependency_identifier,
{'fragment':fragment},
__define_options__(dependency_identifier),
kw_ext, kw)
return ctx.check_cc(**_filter_cc_arguments(ctx, opts))
return fn
def check_cc(**kw_ext):
def fn(ctx, dependency_identifier, **kw):
options = __merge_options__(dependency_identifier,
__define_options__(dependency_identifier),
kw_ext, kw)
return ctx.check_cc(**_filter_cc_arguments(ctx, options))
return fn
def check_pkg_config(*args, **kw_ext):
def fn(ctx, dependency_identifier, **kw):
argsl = list(args)
packages = args[::2]
verchecks = args[1::2]
sargs = []
for i in range(0, len(packages)):
if i < len(verchecks):
sargs.append(packages[i] + ' ' + verchecks[i])
else:
sargs.append(packages[i])
pkgc_args = ["--libs", "--cflags"]
if ctx.dependency_satisfied('static-build'):
pkgc_args += ["--static"]
defaults = {
'path': ctx.env.PKG_CONFIG,
'package': " ".join(packages),
'args': sargs + pkgc_args }
opts = __merge_options__(dependency_identifier, defaults, kw_ext, kw)
# Warning! Megahack incoming: when parsing flags in `parse_flags` waf
# uses append_unique. This appends the flags only if they aren't
# already present in the list. This causes breakage if one checks for
# multiple pkg-config packages in a single call as stuff like -lm is
# added only at its first occurrence.
original_append_unique = ConfigSet.append_unique
ConfigSet.append_unique = ConfigSet.append_value
result = bool(ctx.check_cfg(**opts))
ConfigSet.append_unique = original_append_unique
defkey = inflector.define_key(dependency_identifier)
if result:
ctx.define(defkey, 1)
else:
ctx.add_optional_message(dependency_identifier,
"'{0}' not found".format(" ".join(sargs)))
ctx.undefine(defkey)
return result
return fn
def check_headers(*headers, **kw_ext):
def undef_others(ctx, headers, found):
not_found_hs = set(headers) - set([found])
for not_found_h in not_found_hs:
ctx.undefine(inflector.define_key(not_found_h))
def fn(ctx, dependency_identifier):
for header in headers:
defaults = {'header_name': header, 'features': 'c cprogram'}
options = __merge_options__(dependency_identifier, defaults, kw_ext)
if ctx.check(**options):
undef_others(ctx, headers, header)
ctx.define(inflector.define_key(dependency_identifier), 1)
return True
undef_others(ctx, headers, None)
return False
return fn
def check_true(ctx, dependency_identifier):
ctx.define(inflector.define_key(dependency_identifier), 1)
return True
def check_ctx_vars(*variables):
def fn(ctx, dependency_identifier):
missing = []
for variable in variables:
if variable not in ctx.env:
missing.append(variable)
if any(missing):
ctx.add_optional_message(dependency_identifier,
'missing {0}'.format(', '.join(missing)))
return False
else:
return True
return fn
def check_stub(ctx, dependency_identifier):
ctx.undefine(inflector.define_key(dependency_identifier))
return False
def compose_checks(*checks):
def fn(ctx, dependency_identifier):
return all([check(ctx, dependency_identifier) for check in checks])
return fn
def load_fragment(fragment):
file_path = os.path.join(os.path.dirname(__file__), '..', 'fragments',
fragment)
fp = open(file_path,"r")
fragment_code = fp.read()
fp.close()
return fragment_code
| gpl-2.0 |
bitifirefly/edx-platform | lms/djangoapps/lti_provider/users.py | 80 | 5166 | """
LTI user management functionality. This module reconciles the two identities
that an individual has in the campus LMS platform and on edX.
"""
import string
import random
import uuid
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError
from lti_provider.models import LtiUser
from student.models import UserProfile
def authenticate_lti_user(request, lti_user_id, lti_consumer):
"""
Determine whether the user specified by the LTI launch has an existing
account. If not, create a new Django User model and associate it with an
LtiUser object.
If the currently logged-in user does not match the user specified by the LTI
launch, log out the old user and log in the LTI identity.
"""
try:
lti_user = LtiUser.objects.get(
lti_user_id=lti_user_id,
lti_consumer=lti_consumer
)
except LtiUser.DoesNotExist:
# This is the first time that the user has been here. Create an account.
lti_user = create_lti_user(lti_user_id, lti_consumer)
if not (request.user.is_authenticated() and
request.user == lti_user.edx_user):
# The user is not authenticated, or is logged in as somebody else.
# Switch them to the LTI user
switch_user(request, lti_user, lti_consumer)
def create_lti_user(lti_user_id, lti_consumer):
"""
Generate a new user on the edX platform with a random username and password,
and associates that account with the LTI identity.
"""
edx_password = str(uuid.uuid4())
created = False
while not created:
try:
edx_user_id = generate_random_edx_username()
edx_email = "{}@{}".format(edx_user_id, settings.LTI_USER_EMAIL_DOMAIN)
edx_user = User.objects.create_user(
username=edx_user_id,
password=edx_password,
email=edx_email,
)
# A profile is required if PREVENT_CONCURRENT_LOGINS flag is set.
# TODO: We could populate user information from the LTI launch here,
# but it's not necessary for our current uses.
edx_user_profile = UserProfile(user=edx_user)
edx_user_profile.save()
created = True
except IntegrityError:
# The random edx_user_id wasn't unique. Since 'created' is still
# False, we will retry with a different random ID.
pass
lti_user = LtiUser(
lti_consumer=lti_consumer,
lti_user_id=lti_user_id,
edx_user=edx_user
)
lti_user.save()
return lti_user
def switch_user(request, lti_user, lti_consumer):
"""
Log out the current user, and log in using the edX identity associated with
the LTI ID.
"""
edx_user = authenticate(
username=lti_user.edx_user.username,
lti_user_id=lti_user.lti_user_id,
lti_consumer=lti_consumer
)
if not edx_user:
# This shouldn't happen, since we've created edX accounts for any LTI
# users by this point, but just in case we can return a 403.
raise PermissionDenied()
login(request, edx_user)
def generate_random_edx_username():
"""
Create a valid random edX user ID. An ID is at most 30 characters long, and
can contain upper and lowercase letters and numbers.
:return:
"""
allowable_chars = string.ascii_letters + string.digits
username = ''
for _index in range(30):
username = username + random.SystemRandom().choice(allowable_chars)
return username
class LtiBackend(object):
"""
A Django authentication backend that authenticates users via LTI. This
backend will only return a User object if it is associated with an LTI
identity (i.e. the user was created by the create_lti_user method above).
"""
def authenticate(self, username=None, lti_user_id=None, lti_consumer=None):
"""
Try to authenticate a user. This method will return a Django user object
if a user with the corresponding username exists in the database, and
if a record that links that user with an LTI user_id field exists in
the LtiUser collection.
If such a user is not found, the method returns None (in line with the
authentication backend specification).
"""
try:
edx_user = User.objects.get(username=username)
except User.DoesNotExist:
return None
try:
LtiUser.objects.get(
edx_user_id=edx_user.id,
lti_user_id=lti_user_id,
lti_consumer=lti_consumer
)
except LtiUser.DoesNotExist:
return None
return edx_user
def get_user(self, user_id):
"""
Return the User object for a user that has already been authenticated by
this backend.
"""
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
| agpl-3.0 |
zaycev/metaphor-search | sear/sear/lexicon.py | 1 | 2198 | # coding: utf-8
# Author: Vladimir M. Zaytsev <[email protected]>
import os
import leveldb
import logging
class DictLexicon(object):
TERM_FREQ_SEP = chr(255)
LEX_DIR_NAME = "dict.lexicon.ldb"
def __init__(self, root_dir):
self.term_dict = dict()
self.root_dir = root_dir
self.lexicon_root = os.path.join(self.root_dir, self.LEX_DIR_NAME)
self.ldb = leveldb.LevelDB(self.lexicon_root)
def add_term(self, term):
term_and_freq = self.term_dict.get(term)
if term_and_freq is not None:
term_and_freq[1] += 1
else:
new_term_id = len(self.term_dict)
self.term_dict[term] = [new_term_id, 1]
def count_terms(self, terms):
for term in terms:
self.add_term(term)
def get_id(self, term):
term_and_freq = self.term_dict.get(term)
if term_and_freq is not None:
return term_and_freq[0]
return -1
def dump(self):
w_batch = leveldb.WriteBatch()
total_wrote = 0
for term, term_and_freq in self.term_dict.iteritems():
w_batch.Put(term, str(term_and_freq[0]) + self.TERM_FREQ_SEP + str(term_and_freq[1]))
total_wrote += 1
self.ldb.Write(w_batch)
logging.info("DictLexicon: wrote %d term to ldb" % total_wrote)
def load(self):
lexicon_root = os.path.join(self.root_dir, self.LEX_DIR_NAME)
if not os.path.exists(lexicon_root):
os.mkdir(lexicon_root)
logging.warn("Lexicon file %s not exist. Skip.")
if len(self) > 0:
raise Exception("Non empty lexicon does not support reading")
total_read = 0
for term, term_and_freq in self.ldb.RangeIter():
term_id, term_freq = term_and_freq.split(self.TERM_FREQ_SEP)
term_id = int(term_id)
term_freq = int(term_freq)
self.term_dict[term] = [term_id, term_freq]
total_read += 1
logging.info("DictLexicon: read %d term from ldb" % total_read)
def __len__(self):
return self.term_dict.__len__()
def __contains__(self, term):
return self.term_dict.__contains__(term) | mit |
bitify/raspi | i2c-sensors/bitify/python/sensors/hmc5883l.py | 3 | 4436 | import math
import bitify.python.utils.i2cutils as I2CUtils
class HMC5883L(object):
'''
Simple HMC5883L implementation
'''
TWO_PI = 2 * math.pi
CONF_REG_A = 0
CONF_REG_B = 1
MODE_REG = 2
DATA_START_BLOCK = 3
DATA_XOUT_H = 0
DATA_XOUT_L = 1
DATA_ZOUT_H = 2
DATA_ZOUT_L = 3
DATA_YOUT_H = 4
DATA_YOUT_L = 5
SAMPLE_RATE = { 0 : 0.75, 1 : 1.5, 2 : 3, 3 : 7.5, 4 : 15, 5 : 30, 6 : 75, 7 :-1 }
SAMPLE_MODE = { 0 : "CONTINUOUS", 1 : "SINGLE", 2 : "IDLE", 3 : "IDLE" }
GAIN_SCALE = {
0 : [ 0.88, 1370, 0.73 ],
1 : [ 1.30, 1090, 0.92 ],
2 : [ 1.90, 820, 1.22 ],
3 : [ 2.50, 660, 1.52 ],
4 : [ 4.00, 440, 2.27 ],
5 : [ 4.70, 390, 2.56 ],
6 : [ 5.60, 330, 3.03 ],
7 : [ 8.10, 230, 4.35 ]
}
def __init__(self, bus, address, name, samples=3, rate=4, gain=1, sampling_mode=0, x_offset=0, y_offset=0, z_offset=0):
self.bus = bus
self.address = address
self.name = name
self.samples = samples
self.gain = gain
self.sampling_mode = sampling_mode
self.x_offset = x_offset
self.y_offset = y_offset
self.z_offset = z_offset
# Set the number of samples
conf_a = (samples << 5) + (rate << 2)
I2CUtils.i2c_write_byte(self.bus, self.address, HMC5883L.CONF_REG_A, conf_a)
# Set the gain
conf_b = gain << 5
I2CUtils.i2c_write_byte(self.bus, self.address, HMC5883L.CONF_REG_B, conf_b)
# Set the operation mode
I2CUtils.i2c_write_byte(self.bus, self.address, HMC5883L.MODE_REG, self.sampling_mode)
self.raw_data = [0, 0, 0, 0, 0, 0]
# Now read all the values as the first read after a gain change returns the old value
self.read_raw_data()
def read_raw_data(self):
'''
Read the raw data from the sensor, scale it appropriately and store for later use
'''
self.raw_data = I2CUtils.i2c_read_block(self.bus, self.address, HMC5883L.DATA_START_BLOCK, 6)
self.raw_x = I2CUtils.twos_compliment(self.raw_data[HMC5883L.DATA_XOUT_H], self.raw_data[HMC5883L.DATA_XOUT_L]) - self.x_offset
self.raw_y = I2CUtils.twos_compliment(self.raw_data[HMC5883L.DATA_YOUT_H], self.raw_data[HMC5883L.DATA_YOUT_L]) - self.y_offset
self.raw_z = I2CUtils.twos_compliment(self.raw_data[HMC5883L.DATA_ZOUT_H], self.raw_data[HMC5883L.DATA_ZOUT_L]) - self.z_offset
self.scaled_x = self.raw_x * HMC5883L.GAIN_SCALE[self.gain][2]
self.scaled_y = self.raw_y * HMC5883L.GAIN_SCALE[self.gain][2]
self.scaled_z = self.raw_z * HMC5883L.GAIN_SCALE[self.gain][2]
def read_bearing(self):
'''
Read a bearing from the sensor assuming the sensor is level
'''
self.read_raw_data()
bearing = math.atan2(self.read_scaled_y(), self.read_scaled_x())
if bearing < 0:
return bearing + (HMC5883L.TWO_PI)
else:
return bearing
def read_compensated_bearing(self, pitch, roll):
'''
Calculate a bearing taking in to account the current pitch and roll of the device as supplied as parameters
'''
self.read_raw_data()
cos_pitch = (math.cos(pitch))
sin_pitch = (math.sin(pitch))
cos_roll = (math.cos(roll))
sin_roll = (math.sin(roll))
Xh = (self.scaled_x * cos_roll) + (self.scaled_z * sin_roll)
Yh = (self.scaled_x * sin_pitch * sin_roll) + (self.scaled_y * cos_pitch) - (self.scaled_z * sin_pitch * cos_roll)
bearing = math.atan2(Yh, Xh)
if bearing < 0:
return bearing + (HMC5883L.TWO_PI)
else:
return bearing
def set_offsets(self, x_offset, y_offset, z_offset):
self.x_offset = x_offset
self.y_offset = y_offset
self.z_offset = z_offset
def read_raw_x(self):
return self.raw_x
def read_raw_y(self):
return self.raw_y
def read_raw_z(self):
return self.raw_z
def read_scaled_x(self):
return self.scaled_x
def read_scaled_y(self):
return self.scaled_y
def read_scaled_z(self):
return self.scaled_z
| apache-2.0 |
IllusionRom-deprecated/android_platform_external_chromium_org_tools_grit | grit/gather/regexp.py | 62 | 3380 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A baseclass for simple gatherers based on regular expressions.
'''
import re
from grit.gather import skeleton_gatherer
class RegexpGatherer(skeleton_gatherer.SkeletonGatherer):
'''Common functionality of gatherers based on parsing using a single
regular expression.
'''
DescriptionMapping_ = {
'CAPTION' : 'This is a caption for a dialog',
'CHECKBOX' : 'This is a label for a checkbox',
'CONTROL': 'This is the text on a control',
'CTEXT': 'This is a label for a control',
'DEFPUSHBUTTON': 'This is a button definition',
'GROUPBOX': 'This is a label for a grouping',
'ICON': 'This is a label for an icon',
'LTEXT': 'This is the text for a label',
'PUSHBUTTON': 'This is the text for a button',
}
# Contextualization elements. Used for adding additional information
# to the message bundle description string from RC files.
def AddDescriptionElement(self, string):
if self.DescriptionMapping_.has_key(string):
description = self.DescriptionMapping_[string]
else:
description = string
if self.single_message_:
self.single_message_.SetDescription(description)
else:
if (self.translatable_chunk_):
message = self.skeleton_[len(self.skeleton_) - 1].GetMessage()
message.SetDescription(description)
def _RegExpParse(self, regexp, text_to_parse):
'''An implementation of Parse() that can be used for resource sections that
can be parsed using a single multi-line regular expression.
All translateables must be in named groups that have names starting with
'text'. All textual IDs must be in named groups that have names starting
with 'id'. All type definitions that can be included in the description
field for contextualization purposes should have a name that starts with
'type'.
Args:
regexp: re.compile('...', re.MULTILINE)
text_to_parse:
'''
chunk_start = 0
for match in regexp.finditer(text_to_parse):
groups = match.groupdict()
keys = groups.keys()
keys.sort()
self.translatable_chunk_ = False
for group in keys:
if group.startswith('id') and groups[group]:
self._AddTextualId(groups[group])
elif group.startswith('text') and groups[group]:
self._AddNontranslateableChunk(
text_to_parse[chunk_start : match.start(group)])
chunk_start = match.end(group) # Next chunk will start after the match
self._AddTranslateableChunk(groups[group])
elif group.startswith('type') and groups[group]:
# Add the description to the skeleton_ list. This works because
# we are using a sort set of keys, and because we assume that the
# group name used for descriptions (type) will come after the "text"
# group in alphabetical order. We also assume that there cannot be
# more than one description per regular expression match.
self.AddDescriptionElement(groups[group])
self._AddNontranslateableChunk(text_to_parse[chunk_start:])
if self.single_message_:
self.skeleton_.append(self.uberclique.MakeClique(self.single_message_))
| bsd-2-clause |
eehello/shadowsocks | utils/autoban.py | 1033 | 2156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
sys.stdout.flush()
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
banned.add(ip)
cmd = 'iptables -A INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
sys.stderr.flush()
os.system(cmd)
| apache-2.0 |
KibiCoin/kibicoin | qa/rpc-tests/forknotify.py | 161 | 2179 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -alertnotify
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
class ForkNotifyTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
with open(self.alert_filename, 'w') as f:
pass # Just open then close to create zero-length file
self.nodes.append(start_node(0, self.options.tmpdir,
["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
# Node1 mines block.version=211 blocks
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockversion=211"]))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Mine 51 up-version blocks
self.nodes[1].setgenerate(True, 51)
self.sync_all()
# -alertnotify should trigger on the 51'st,
# but mine and sync another to give
# -alertnotify time to write
self.nodes[1].setgenerate(True, 1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
if len(alert_text) == 0:
raise AssertionError("-alertnotify did not warn of up-version blocks")
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].setgenerate(True, 1)
self.sync_all()
self.nodes[1].setgenerate(True, 1)
self.sync_all()
with open(self.alert_filename, 'r') as f:
alert_text2 = f.read()
if alert_text != alert_text2:
raise AssertionError("-alertnotify excessive warning of up-version blocks")
if __name__ == '__main__':
ForkNotifyTest().main()
| mit |
YellowGlue/musicbox | netease_musicbox/menu.py | 1 | 14582 | #!/usr/bin/env python
#encoding: UTF-8
'''
网易云音乐 Menu
'''
import curses
import locale
import sys
import os
import json
import time
import webbrowser
from .api import NetEase
from .player import Player
from .ui import Ui
home = os.path.expanduser("~")
if os.path.isdir(home + '/netease-musicbox') is False:
os.mkdir(home+'/netease-musicbox')
locale.setlocale(locale.LC_ALL, "")
code = locale.getpreferredencoding()
# carousel x in [left, right]
carousel = lambda left, right, x: left if (x>right) else (right if x<left else x)
shortcut = [
['j', 'Down ', '下移'],
['k', 'Up ', '上移'],
['h', 'Back ', '后退'],
['l', 'Forward ', '前进'],
['u', 'Prev page ', '上一页'],
['d', 'Next page ', '下一页'],
['f', 'Search ', '快速搜索'],
['[', 'Prev song ', '上一曲'],
[']', 'Next song ', '下一曲'],
[' ', 'Play/Pause ', '播放/暂停'],
['m', 'Menu ', '主菜单'],
['p', 'Present ', '当前播放列表'],
['a', 'Add ', '添加曲目到打碟'],
['A', 'Add page ', '添加本页到打碟'],
['z', 'DJ list ', '打碟列表'],
['s', 'Star ', '添加到收藏'],
['c', 'Collection ', '收藏列表'],
['r', 'Remove ', '删除当前条目'],
['R', 'Remove page', '删除本页所有条目'],
['q', 'Quit ', '退出']
]
CONTROL_B = 2
CONTROL_F = 6
class Menu:
def __init__(self):
reload(sys)
sys.setdefaultencoding('UTF-8')
self.datatype = 'main'
self.title = '网易云音乐'
self.datalist = ['排行榜', '艺术家', '新碟上架', '精选歌单', '我的歌单', 'DJ节目', '打碟', '收藏', '搜索', '帮助']
self.offset = 0
self.index = 0
self.presentsongs = []
self.player = Player()
self.ui = Ui()
self.netease = NetEase()
self.screen = curses.initscr()
self.screen.keypad(1)
self.step = 10
self.stack = []
self.djstack = []
self.userid = None
self.username = None
try:
sfile = file(home + "/netease-musicbox/flavor.json",'r')
data = json.loads(sfile.read())
self.collection = data['collection']
self.account = data['account']
sfile.close()
except:
self.collection = []
self.account = {}
def start(self):
self.ui.build_menu(self.datatype, self.title, self.datalist, self.offset, self.index, self.step)
self.stack.append([self.datatype, self.title, self.datalist, self.offset, self.index])
while True:
datatype = self.datatype
title = self.title
datalist = self.datalist
offset = self.offset
idx = index = self.index
step = self.step
stack = self.stack
djstack = self.djstack
key = self.screen.getch()
self.ui.screen.refresh()
# 退出
if key == ord('q'):
break
# 上移
elif key == ord('k'):
self.index = carousel(offset, min( len(datalist), offset + step) - 1, idx-1 )
# 下移
elif key == ord('j'):
self.index = carousel(offset, min( len(datalist), offset + step) - 1, idx+1 )
# 向上翻页
elif key in (ord('u'), CONTROL_B):
if offset == 0:
continue
self.offset -= step
self.index = self.offset
# 向下翻页
elif key in (ord('d'), CONTROL_F):
if offset + step >= len( datalist ):
continue
self.offset += step
self.index = self.offset
# 前进
elif key == ord('l') or key == 10:
if self.datatype == 'songs' or self.datatype == 'djchannels' or self.datatype == 'help':
continue
self.ui.build_loading()
self.dispatch_enter(idx)
self.index = 0
self.offset = 0
# 回退
elif key == ord('h'):
# if not main menu
if len(self.stack) == 1:
continue
up = stack.pop()
self.datatype = up[0]
self.title = up[1]
self.datalist = up[2]
self.offset = up[3]
self.index = up[4]
# 搜索
elif key == ord('f'):
self.search()
# 播放下一曲
elif key == ord(']'):
if len(self.presentsongs) == 0:
continue
self.player.next()
time.sleep(0.1)
# 播放上一曲
elif key == ord('['):
if len(self.presentsongs) == 0:
continue
self.player.prev()
time.sleep(0.1)
# 播放、暂停
elif key == ord(' '):
if datatype == 'songs' and len(datalist) != 0:
self.presentsongs = ['songs', title, datalist, offset, index]
elif datatype == 'djchannels' and len(datalist) != 0:
self.presentsongs = ['djchannels', title, datalist, offset, index]
self.player.play(datatype, datalist, idx)
time.sleep(0.1)
# 加载当前播放列表
elif key == ord('p'):
if len(self.presentsongs) == 0:
continue
self.stack.append( [datatype, title, datalist, offset, index] )
self.datatype = self.presentsongs[0]
self.title = self.presentsongs[1]
self.datalist = self.presentsongs[2]
self.offset = self.presentsongs[3]
self.index = self.presentsongs[4]
# 添加到打碟歌单
elif key == ord('a'):
if datatype == 'songs' and len(datalist) != 0:
self.djstack.append( datalist[idx] )
elif datatype == 'artists':
pass
# 添加当前页面到打碟
elif key == ord('A'):
if datatype == 'songs' and len(datalist) != 0:
self.djstack.extend( datalist[offset:offset+step] )
# 加载打碟歌单
elif key == ord('z'):
self.stack.append( [datatype, title, datalist, offset, index] )
self.datatype = 'songs'
self.title = '网易云音乐 > 打碟'
self.datalist = self.djstack
self.offset = 0
self.index = 0
# 添加到收藏歌曲
elif key == ord('s'):
if (datatype == 'songs' or datatype == 'djchannels') and len(datalist) != 0:
self.collection.append( datalist[idx] )
# 加载收藏歌曲
elif key == ord('c'):
self.stack.append( [datatype, title, datalist, offset, index] )
self.datatype = 'songs'
self.title = '网易云音乐 > 收藏'
self.datalist = self.collection
self.offset = 0
self.index = 0
# 从当前列表移除条目
elif key == ord('r'):
if datatype != 'main' and datatype != 'help' and len(datalist) != 0:
self.datalist.pop(idx)
self.index = carousel(offset, min( len(datalist), offset + step) - 1, idx )
# 移除当前页面所有
elif key == ord('R'):
if datatype != 'main' and datatype != 'help' and len(datalist) != 0:
for i in range(0, step):
try:
self.datalist.pop(offset)
except:
pass
self.index = self.offset = max(0, offset-step)
# 回到主菜单
elif key == ord('m'):
if datatype != 'main':
self.stack.append( [datatype, title, datalist, offset, index] )
self.datatype = self.stack[0][0]
self.title = self.stack[0][1]
self.datalist = self.stack[0][2]
self.offset = 0
self.index = 0
# go to github
elif key == ord('g'):
if datatype == 'help':
webbrowser.open_new_tab('https://github.com/vellow/NetEase-MusicBox')
self.ui.build_menu(self.datatype, self.title, self.datalist, self.offset, self.index, self.step)
self.player.stop()
sfile = file(home + "/netease-musicbox/flavor.json", 'w')
data = {
'account': self.account,
'collection': self.collection
}
sfile.write(json.dumps(data))
sfile.close()
curses.endwin()
def dispatch_enter(self, idx):
# The end of stack
if len(self.datalist) == 0:
return
netease = self.netease
datatype = self.datatype
title = self.title
datalist = self.datalist
offset = self.offset
index = self.index
self.stack.append( [datatype, title, datalist, offset, index])
if datatype == 'main':
self.choice_channel(idx)
# 该艺术家的热门歌曲
elif datatype == 'artists':
artist_id = datalist[idx]['artist_id']
songs = netease.artists(artist_id)
self.datatype = 'songs'
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > ' + datalist[idx]['artists_name']
# 该专辑包含的歌曲
elif datatype == 'albums':
album_id = datalist[idx]['album_id']
songs = netease.album(album_id)
self.datatype = 'songs'
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > ' + datalist[idx]['albums_name']
# 该歌单包含的歌曲
elif datatype == 'playlists':
playlist_id = datalist[idx]['playlist_id']
songs = netease.playlist_detail(playlist_id)
self.datatype = 'songs'
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > ' + datalist[idx]['playlists_name']
def choice_channel(self, idx):
# 排行榜
netease = self.netease
if idx == 0:
songs = netease.top_songlist()
self.datalist = netease.dig_info(songs, 'songs')
self.title += ' > 排行榜'
self.datatype = 'songs'
# 艺术家
elif idx == 1:
artists = netease.top_artists()
self.datalist = netease.dig_info(artists, 'artists')
self.title += ' > 艺术家'
self.datatype = 'artists'
# 新碟上架
elif idx == 2:
albums = netease.new_albums()
self.datalist = netease.dig_info(albums, 'albums')
self.title += ' > 新碟上架'
self.datatype = 'albums'
# 精选歌单
elif idx == 3:
playlists = netease.top_playlists()
self.datalist = netease.dig_info(playlists, 'playlists')
self.title += ' > 精选歌单'
self.datatype = 'playlists'
# 我的歌单/登录
elif idx == 4:
if self.login() == -1:
return
# 读取登录之后的用户歌单
myplaylist = netease.user_playlist( self.userid )
self.datalist = netease.dig_info(myplaylist, 'playlists')
self.datatype = 'playlists'
self.title += ' > ' + self.username + ' 的歌单'
# DJ节目
elif idx == 5:
self.datatype = 'djchannels'
self.title += ' > DJ节目'
self.datalist = netease.djchannels()
# 打碟
elif idx == 6:
self.datatype = 'songs'
self.title += ' > 打碟'
self.datalist = self.djstack
# 收藏
elif idx == 7:
self.datatype = 'songs'
self.title += ' > 收藏'
self.datalist = self.collection
# 搜索
elif idx == 8:
self.search()
# 帮助
elif idx == 9:
self.datatype = 'help'
self.title += ' > 帮助'
self.datalist = shortcut
self.offset = 0
self.index = 0
def login(self):
# 未登录
if self.userid is None:
# 使用本地存储了账户登录
if self.account:
user_info = self.netease.login( self.account[0], self.account[1], self.account[2])
# 本地没有存储账户,或本地账户失效,则引导录入
if self.account == {} or user_info['code'] != 200:
data = self.ui.build_login()
# 取消登录
if data == -1:
return -1
user_info = data[0]
self.account = data[1]
try:
self.username = user_info['profile']['nickname']
except:
self.username = user_info['account']['userName']
self.userid = user_info['account']['id']
def search(self):
ui = self.ui
x = ui.build_search_menu()
# if do search, push current info into stack
if x in range(ord('1'), ord('5')):
self.stack.append( [self.datatype, self.title, self.datalist, self.offset, self.index ])
self.index = 0
self.offset = 0
if x == ord('1'):
self.datatype = 'songs'
self.datalist = ui.build_search('songs')
self.title = '歌曲搜索列表'
elif x == ord('2'):
self.datatype = 'artists'
self.datalist = ui.build_search('artists')
self.title = '艺术家搜索列表'
elif x == ord('3'):
self.datatype = 'albums'
self.datalist = ui.build_search('albums')
self.title = '专辑搜索列表'
elif x == ord('4'):
self.datatype = 'playlists'
self.datalist = ui.build_search('playlists')
self.title = '精选歌单搜索列表'
| mit |
domob1812/huntercore | test/functional/example_test.py | 8 | 9213 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
P2PInterface,
mininode_lock,
msg_block,
msg_getdata,
network_thread_join,
network_thread_start,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the BitcoinTestFramework
pass
class ExampleTest(BitcoinTestFramework):
# Each functional test is a subclass of the BitcoinTestFramework class.
# Override the set_test_params(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be exlicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all([self.nodes[0:1]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
BitcoinTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections to two of the nodes
self.nodes[0].add_p2p_connection(BaseNode())
# Start up network handling in another thread. This needs to be called
# after the P2P connections have been created.
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:1]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = 1
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
# We can't add additional P2P connections once the network thread has started. Disconnect the connection
# to node0, wait for the network thread to terminate, then connect to node2. This is specific to
# the current implementation of the network thread and may be improved in future.
self.nodes[0].disconnect_p2ps()
network_thread_join()
self.nodes[2].add_p2p_connection(BaseNode())
network_thread_start()
self.nodes[2].p2p.wait_for_verack()
self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
| mit |
Distrotech/reportlab | docs/userguide/ch2a_fonts.py | 14 | 19923 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/docs/userguide/ch2a_fonts.py
from tools.docco.rl_doc_utils import *
from reportlab.lib.codecharts import SingleByteEncodingChart
from reportlab.platypus import Image
import reportlab
heading1("Fonts and encodings")
disc("""
This chapter covers fonts, encodings and Asian language capabilities.
If you are purely concerned with generating PDFs for Western
European languages, you can just read the "Unicode is the default" section
below and skip the rest on a first reading.
We expect this section to grow considerably over time. We
hope that Open Source will enable us to give better support for
more of the world's languages than other tools, and we welcome
feedback and help in this area.
""")
heading2("Unicode and UTF8 are the default input encodings")
disc("""
Starting with reportlab Version 2.0 (May 2006), all text input you
provide to our APIs should be in UTF8 or as Python Unicode objects.
This applies to arguments to canvas.drawString and related APIs,
table cell content, drawing object parameters, and paragraph source
text.
""")
disc("""
We considered making the input encoding configurable or even locale-dependent,
but decided that "explicit is better than implicit".""")
disc("""
This simplifies many things we used to do previously regarding greek
letters, symbols and so on. To display any character, find out its
unicode code point, and make sure the font you are using is able
to display it.""")
disc("""
If you are adapting a ReportLab 1.x application, or reading data from
another source which contains single-byte data (e.g. latin-1 or WinAnsi),
you need to do a conversion into Unicode. The Python codecs package now
includes converters for all the common encodings, including Asian ones.
""")
disc("""
If your data is not encoded as UTF8, you will get a UnicodeDecodeError as
soon as you feed in a non-ASCII character. For example, this snippet below is
attempting to read in and print a series of names, including one with a French
accent: ^Marc-Andr\u00e9 Lemburg^. The standard error is quite helpful and tells you
what character it doesn't like:
""")
eg("""
>>> from reportlab.pdfgen.canvas import Canvas
>>> c = Canvas('temp.pdf')
>>> y = 700
>>> for line in file('latin_python_gurus.txt','r'):
... c.drawString(100, y, line.strip())
...
Traceback (most recent call last):
...
UnicodeDecodeError: 'utf8' codec can't decode bytes in position 9-11: invalid data
-->\u00e9 L<--emburg
>>>
""")
disc("""
The simplest fix is just to convert your data to unicode, saying which encoding
it comes from, like this:""")
eg("""
>>> for line in file('latin_input.txt','r'):
... uniLine = unicode(line, 'latin-1')
... c.drawString(100, y, uniLine.strip())
>>>
>>> c.save()
""")
heading2("Automatic output font substitution")
disc("""
There are still a number of places in the code, including the rl_config
defaultEncoding parameter, and arguments passed to various Font constructors,
which refer to encodings. These were useful in the past when people needed to
use glyphs in the Symbol and ZapfDingbats fonts which are supported by PDF
viewing devices.
By default the standard fonts (Helvetica, Courier, Times Roman)
will offer the glyphs available in Latin-1. However, if our engine detects
a character not in the font, it will attempt to switch to Symbol or ZapfDingbats to
display these. For example, if you include the Unicode character for a pair of
right-facing scissors, \\u2702, in a call to ^drawString^, you should see them (there is
an example in ^test_pdfgen_general.py/pdf^). It is not
necessary to switch fonts in your code.
""")
heading2("Using non-standard Type 1 fonts")
disc("""
As discussed in the previous chapter, every copy of Acrobat Reader
comes with 14 standard fonts built in. Therefore, the ReportLab
PDF Library only needs to refer to these by name. If you want
to use other fonts, they must be available to your code and
will be embedded in the PDF document.""")
disc("""
You can use the mechanism described below to include arbitrary
fonts in your documents. We have an open source
font named <i>DarkGardenMK</i> which we may
use for testing and/or documenting purposes (and which you may
use as well). It comes bundled with the ReportLab distribution in the
directory $reportlab/fonts$.
""")
disc("""
Right now font-embedding relies on font description files in the Adobe
AFM ('Adobe Font Metrics') and PFB ('Printer Font Binary') format. The
former is an ASCII file and contains information about the characters
('glyphs') in the font such as height, width, bounding box info and
other 'metrics', while the latter is a binary file that describes the
shapes of the font. The $reportlab/fonts$ directory contains the files
$'DarkGardenMK.afm'$ and $'DarkGardenMK.pfb'$ that are used as an example
font.
""")
disc("""
In the following example locate the folder containing the test font and
register it for future use with the $pdfmetrics$ module,
after which we can use it like any other standard font.
""")
eg("""
import os
import reportlab
folder = os.path.dirname(reportlab.__file__) + os.sep + 'fonts'
afmFile = os.path.join(folder, 'DarkGardenMK.afm')
pfbFile = os.path.join(folder, 'DarkGardenMK.pfb')
from reportlab.pdfbase import pdfmetrics
justFace = pdfmetrics.EmbeddedType1Face(afmFile, pfbFile)
faceName = 'DarkGardenMK' # pulled from AFM file
pdfmetrics.registerTypeFace(justFace)
justFont = pdfmetrics.Font('DarkGardenMK',
faceName,
'WinAnsiEncoding')
pdfmetrics.registerFont(justFont)
canvas.setFont('DarkGardenMK', 32)
canvas.drawString(10, 150, 'This should be in')
canvas.drawString(10, 100, 'DarkGardenMK')
""")
disc("""
Note that the argument "WinAnsiEncoding" has nothing to do with the input;
it's to say which set of characters within the font file will be active
and available.
""")
illust(examples.customfont1, "Using a very non-standard font")
disc("""
The font's facename comes from the AFM file's $FontName$ field.
In the example above we knew the name in advance, but quite
often the names of font description files are pretty cryptic
and then you might want to retrieve the name from an AFM file
automatically.
When lacking a more sophisticated method you can use some
code as simple as this:
""")
eg("""
class FontNameNotFoundError(Exception):
pass
def findFontName(path):
"Extract a font name from an AFM file."
f = open(path)
found = 0
while not found:
line = f.readline()[:-1]
if not found and line[:16] == 'StartCharMetrics':
raise FontNameNotFoundError, path
if line[:8] == 'FontName':
fontName = line[9:]
found = 1
return fontName
""")
disc("""
In the <i>DarkGardenMK</i> example we explicitely specified
the place of the font description files to be loaded.
In general, you'll prefer to store your fonts in some canonic
locations and make the embedding mechanism aware of them.
Using the same configuration mechanism we've already seen at the
beginning of this section we can indicate a default search path
for Type-1 fonts.
""")
disc("""
Unfortunately, there is no reliable standard yet for such
locations (not even on the same platform) and, hence, you might
have to edit the file $reportlab/rl_config.py$ to modify the
value of the $T1SearchPath$ identifier to contain additional
directories. Our own recommendation is to use the ^reportlab/fonts^
folder in development; and to have any needed fonts as packaged parts of
your application in any kind of controlled server deployment. This insulates
you from fonts being installed and uninstalled by other software or system
administrator.
""")
heading3("Warnings about missing glyphs")
disc("""If you specify an encoding, it is generally assumed that
the font designer has provided all the needed glyphs. However,
this is not always true. In the case of our example font,
the letters of the alphabet are present, but many symbols and
accents are missing. The default behaviour is for the font to
print a 'notdef' character - typically a blob, dot or space -
when passed a character it cannot draw. However, you can ask
the library to warn you instead; the code below (executed
before loading a font) will cause warnings to be generated
for any glyphs not in the font when you register it.""")
eg("""
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
""")
heading2("Standard Single-Byte Font Encodings")
disc("""
This section shows you the glyphs available in the common encodings.
""")
disc("""The code chart below shows the characters in the $WinAnsiEncoding$.
This is the standard encoding on Windows and many Unix systems in America
and Western Europe. It is also knows as Code Page 1252, and is practically
identical to ISO-Latin-1 (it contains one or two extra characters). This
is the default encoding used by the Reportlab PDF Library. It was generated from
a standard routine in $reportlab/lib$, $codecharts.py$,
which can be used to display the contents of fonts. The index numbers
along the edges are in hex.""")
cht1 = SingleByteEncodingChart(encodingName='WinAnsiEncoding',charsPerRow=32, boxSize=12)
illust(lambda canv: cht1.drawOn(canv, 0, 0), "WinAnsi Encoding", cht1.width, cht1.height)
disc("""The code chart below shows the characters in the $MacRomanEncoding$.
as it sounds, this is the standard encoding on Macintosh computers in
America and Western Europe. As usual with non-unicode encodings, the first
128 code points (top 4 rows in this case) are the ASCII standard and agree
with the WinAnsi code chart above; but the bottom 4 rows differ.""")
cht2 = SingleByteEncodingChart(encodingName='MacRomanEncoding',charsPerRow=32, boxSize=12)
illust(lambda canv: cht2.drawOn(canv, 0, 0), "MacRoman Encoding", cht2.width, cht2.height)
disc("""These two encodings are available for the standard fonts (Helvetica,
Times-Roman and Courier and their variants) and will be available for most
commercial fonts including those from Adobe. However, some fonts contain non-
text glyphs and the concept does not really apply. For example, ZapfDingbats
and Symbol can each be treated as having their own encoding.""")
cht3 = SingleByteEncodingChart(faceName='ZapfDingbats',encodingName='ZapfDingbatsEncoding',charsPerRow=32, boxSize=12)
illust(lambda canv: cht3.drawOn(canv, 0, 0), "ZapfDingbats and its one and only encoding", cht3.width, cht3.height)
cht4 = SingleByteEncodingChart(faceName='Symbol',encodingName='SymbolEncoding',charsPerRow=32, boxSize=12)
illust(lambda canv: cht4.drawOn(canv, 0, 0), "Symbol and its one and only encoding", cht4.width, cht4.height)
CPage(5)
heading2("TrueType Font Support")
disc("""
Marius Gedminas ([email protected]$) with the help of Viktorija Zaksiene ([email protected]$)
have contributed support for embedded TrueType fonts. TrueType fonts work in Unicode/UTF8
and are not limited to 256 characters.""")
CPage(3)
disc("""We use <b>$reportlab.pdfbase.ttfonts.TTFont$</b> to create a true type
font object and register using <b>$reportlab.pdfbase.pdfmetrics.registerFont$</b>.
In pdfgen drawing directly to the canvas we can do""")
eg("""
# we know some glyphs are missing, suppress warnings
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
pdfmetrics.registerFont(TTFont('Vera', 'Vera.ttf'))
pdfmetrics.registerFont(TTFont('VeraBd', 'VeraBd.ttf'))
pdfmetrics.registerFont(TTFont('VeraIt', 'VeraIt.ttf'))
pdfmetrics.registerFont(TTFont('VeraBI', 'VeraBI.ttf'))
canvas.setFont('Vera', 32)
canvas.drawString(10, 150, "Some text encoded in UTF-8")
canvas.drawString(10, 100, "In the Vera TT Font!")
""")
illust(examples.ttffont1, "Using a the Vera TrueType Font")
disc("""In the above example the true type font object is created using""")
eg("""
TTFont(name,filename)
""")
disc("""so that the ReportLab internal name is given by the first argument and the second argument
is a string(or file like object) denoting the font's TTF file. In Marius' original patch the filename
was supposed to be exactly correct, but we have modified things so that if the filename is relative
then a search for the corresponding file is done in the current directory and then in directories
specified by $reportlab.rl_config.TTFSearchpath$!""")
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase.pdfmetrics import registerFontFamily
registerFontFamily('Vera',normal='Vera',bold='VeraBd',italic='VeraIt',boldItalic='VeraBI')
disc("""Before using the TT Fonts in Platypus we should add a mapping from the family name to the
individual font names that describe the behaviour under the $<b>$ and $<i>$ attributes.""")
eg("""
from reportlab.pdfbase.pdfmetrics import registerFontFamily
registerFontFamily('Vera',normal='Vera',bold='VeraBd',italic='VeraIt',boldItalic='VeraBI')
""")
disc("""If we only have a Vera regular font, no bold or italic then we must map all to the
same internal fontname. ^<b>^ and ^<i>^ tags may now be used safely, but
have no effect.
After registering and mapping
the Vera font as above we can use paragraph text like""")
parabox2("""<font name="Times-Roman" size="14">This is in Times-Roman</font>
<font name="Vera" color="magenta" size="14">and this is in magenta <b>Vera!</b></font>""","Using TTF fonts in paragraphs")
heading2("Asian Font Support")
disc("""The Reportlab PDF Library aims to expose full support for Asian fonts.
PDF is the first really portable solution for Asian text handling. There are
two main approaches for this: Adobe's Asian Language Packs, or TrueType fonts.
""")
heading3("Asian Language Packs")
disc("""
This approach offers the best performance since nothing needs embedding in the PDF file;
as with the standard fonts, everything is on the reader.""")
disc("""
Adobe makes available add-ons for each main language. In Adobe Reader 6.0 and 7.0, you
will be prompted to download and install these as soon as you try to open a document
using them. In earlier versions, you would see an error message on opening an Asian document
and had to know what to do.
""")
disc("""
Japanese, Traditional Chinese (Taiwan/Hong Kong), Simplified Chinese (mainland China)
and Korean are all supported and our software knows about the following fonts:
""")
bullet("""
$chs$ = Chinese Simplified (mainland): '$STSong-Light$'
""")
bullet("""
$cht$ = Chinese Traditional (Taiwan): '$MSung-Light$', '$MHei-Medium$'
""")
bullet("""
$kor$ = Korean: '$HYSMyeongJoStd-Medium$','$HYGothic-Medium$'
""")
bullet("""
$jpn$ = Japanese: '$HeiseiMin-W3$', '$HeiseiKakuGo-W5$'
""")
disc("""Since many users will not have the font packs installed, we have included
a rather grainy ^bitmap^ of some Japanese characters. We will discuss below what is needed to
generate them.""")
# include a bitmap of some Asian text
I=os.path.join(os.path.dirname(reportlab.__file__),'docs','images','jpnchars.jpg')
try:
getStory().append(Image(I))
except:
disc("""An image should have appeared here.""")
disc("""Prior to Version 2.0, you had to specify one of many native encodings
when registering a CID Font. In version 2.0 you should a new UnicodeCIDFont
class.""")
eg("""
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
pdfmetrics.registerFont(UnicodeCIDFont('HeiseiMin-W3'))
canvas.setFont('HeiseiMin-W3', 16)
# the two unicode characters below are "Tokyo"
msg = u'\u6771\u4EAC : Unicode font, unicode input'
canvas.drawString(100, 675, msg)
""")
#had to double-escape the slashes above to get escapes into the PDF
disc("""The old coding style with explicit encodings should still work, but is now
only relevant if you need to construct vertical text. We aim to add more readable options
for horizontal and vertical text to the UnicodeCIDFont constructor in future.
The following four test scripts generate samples in the corresponding languages:""")
eg("""tests/test_multibyte_jpn.py
tests/test_multibyte_kor.py
tests/test_multibyte_chs.py
tests/test_multibyte_cht.py""")
## put back in when we have vertical text...
##disc("""The illustration below shows part of the first page
##of the Japanese output sample. It shows both horizontal and vertical
##writing, and illustrates the ability to mix variable-width Latin
##characters in Asian sentences. The choice of horizontal and vertical
##writing is determined by the encoding, which ends in 'H' or 'V'.
##Whether an encoding uses fixed-width or variable-width versions
##of Latin characters also depends on the encoding used; see the definitions
##below.""")
##
##Illustration(image("../images/jpn.gif", width=531*0.50,
##height=435*0.50), 'Output from test_multibyte_jpn.py')
##
##caption("""
##Output from test_multibyte_jpn.py
##""")
disc("""In previous versions of the ReportLab PDF Library, we had to make
use of Adobe's CMap files (located near Acrobat Reader if the Asian Language
packs were installed). Now that we only have one encoding to deal with, the
character width data is embedded in the package, and CMap files are not needed
for generation. The CMap search path in ^rl_config.py^ is now deprecated
and has no effect if you restrict yourself to UnicodeCIDFont.
""")
heading3("TrueType fonts with Asian characters")
disc("""
This is the easy way to do it. No special handling at all is needed to
work with Asian TrueType fonts. Windows users who have installed, for example,
Japanese as an option in Control Panel, will have a font "msmincho.ttf" which
can be used. However, be aware that it takes time to parse the fonts, and that
quite large subsets may need to be embedded in your PDFs. We can also now parse
files ending in .ttc, which are a slight variation of .ttf.
""")
heading3("To Do")
disc("""We expect to be developing this area of the package for some time.accept2dyear
Here is an outline of the main priorities. We welcome help!""")
bullet("""
Ensure that we have accurate character metrics for all encodings in horizontal and
vertical writing.""")
bullet("""
Add options to ^UnicodeCIDFont^ to allow vertical and proportional variants where the font permits it.""")
bullet("""
Improve the word wrapping code in paragraphs and allow vertical writing.""")
CPage(5)
heading2("RenderPM tests")
disc("""This may also be the best place to mention the test function of $reportlab/graphics/renderPM.py$,
which can be considered the cannonical place for tests which exercise renderPM (the "PixMap Renderer",
as opposed to renderPDF, renderPS or renderSVG).""")
disc("""If you run this from the command line, you should see lots of output like the following.""")
eg("""C:\\code\\reportlab\\graphics>renderPM.py
wrote pmout\\renderPM0.gif
wrote pmout\\renderPM0.tif
wrote pmout\\renderPM0.png
wrote pmout\\renderPM0.jpg
wrote pmout\\renderPM0.pct
...
wrote pmout\\renderPM12.gif
wrote pmout\\renderPM12.tif
wrote pmout\\renderPM12.png
wrote pmout\\renderPM12.jpg
wrote pmout\\renderPM12.pct
wrote pmout\\index.html""")
disc("""This runs a number of tests progressing from a "Hello World" test, through various tests of
Lines; text strings in a number of sizes, fonts, colours and alignments; the basic shapes; translated
and rotated groups; scaled coordinates; rotated strings; nested groups; anchoring and non-standard fonts.""")
disc("""It creates a subdirectory called $pmout$, writes the image files into it, and writes an
$index.html$ page which makes it easy to refer to all the results.""")
disc("""The font-related tests which you may wish to look at are test #11 ('Text strings in a non-standard font')
and test #12 ('Test Various Fonts').""")
##### FILL THEM IN
| bsd-3-clause |
EdLogan18/logan-repository | plugin.video.joao/genesisresolvers.py | 266 | 64370 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,urlparse,re,os,sys,xbmc,xbmcgui,xbmcaddon,xbmcvfs
try:
import CommonFunctions as common
except:
import commonfunctionsdummy as common
try:
import json
except:
import simplejson as json
class get(object):
def __init__(self, url):
self.result = self.worker(url)
def worker(self, url):
try:
pz = premiumize().resolve(url)
if not pz == None: return pz
rd = realdebrid().resolve(url)
if not rd == None: return rd
if url.startswith('rtmp'):
if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
return url
u = urlparse.urlparse(url).netloc
u = u.replace('www.', '').replace('embed.', '')
u = u.lower()
import sys, inspect
r = inspect.getmembers(sys.modules[__name__], inspect.isclass)
r = [i for i in r if hasattr(i[1], 'info') and u in eval(i[0])().info()['netloc']][0][0]
r = eval(r)().resolve(url)
if r == None: return r
elif type(r) == list: return r
elif not r.startswith('http'): return r
try: h = dict(urlparse.parse_qsl(r.rsplit('|', 1)[1]))
except: h = dict('')
h.update({'Referer': url, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'})
r = '%s|%s' % (r.split('|')[0], urllib.urlencode(h))
return r
except:
return url
class getUrl(object):
def __init__(self, url, close=True, proxy=None, post=None, headers=None, mobile=False, referer=None, cookie=None, output='', timeout='10'):
handlers = []
if not proxy == None:
handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if output == 'cookie' or not close == True:
import cookielib
cookies = cookielib.LWPCookieJar()
handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
if sys.version_info < (2, 7, 9): raise Exception()
import ssl; ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
handlers += [urllib2.HTTPSHandler(context=ssl_context)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
except:
pass
try: headers.update(headers)
except: headers = {}
if 'User-Agent' in headers:
pass
elif not mobile == True:
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'
else:
headers['User-Agent'] = 'Apple-iPhone/701.341'
if 'referer' in headers:
pass
elif referer == None:
headers['referer'] = url
else:
headers['referer'] = referer
if not 'Accept-Language' in headers:
headers['Accept-Language'] = 'en-US'
if 'cookie' in headers:
pass
elif not cookie == None:
headers['cookie'] = cookie
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
if output == 'cookie':
result = []
for c in cookies: result.append('%s=%s' % (c.name, c.value))
result = "; ".join(result)
elif output == 'geturl':
result = response.geturl()
else:
result = response.read()
if close == True:
response.close()
self.result = result
class captcha:
def worker(self, data):
self.captcha = {}
self.solvemedia(data)
if not self.type == None: return self.captcha
self.recaptcha(data)
if not self.type == None: return self.captcha
self.capimage(data)
if not self.type == None: return self.captcha
self.numeric(data)
if not self.type == None: return self.captcha
def solvemedia(self, data):
try:
url = common.parseDOM(data, "iframe", ret="src")
url = [i for i in url if 'api.solvemedia.com' in i]
if len(url) > 0: self.type = 'solvemedia'
else: self.type = None ; return
result = getUrl(url[0], referer='').result
response = common.parseDOM(result, "iframe", ret="src")
response += common.parseDOM(result, "img", ret="src")
response = [i for i in response if '/papi/media' in i][0]
response = 'http://api.solvemedia.com' + response
response = self.keyboard(response)
post = {}
f = common.parseDOM(result, "form", attrs = { "action": "verify.noscript" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'adcopy_response': response})
getUrl('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)).result
self.captcha.update({'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'})
except:
pass
def recaptcha(self, data):
try:
url = []
if data.startswith('http://www.google.com'): url += [data]
url += common.parseDOM(data, "script", ret="src", attrs = { "type": "text/javascript" })
url = [i for i in url if 'http://www.google.com' in i]
if len(url) > 0: self.type = 'recaptcha'
else: self.type = None ; return
result = getUrl(url[0]).result
challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0]
response = 'http://www.google.com/recaptcha/api/image?c=' + challenge
response = self.keyboard(response)
self.captcha.update({'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response})
except:
pass
def capimage(self, data):
try:
url = common.parseDOM(data, "img", ret="src")
url = [i for i in url if 'captcha' in i]
if len(url) > 0: self.type = 'capimage'
else: self.type = None ; return
response = self.keyboard(url[0])
self.captcha.update({'code': response})
except:
pass
def numeric(self, data):
try:
url = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(data)
if len(url) > 0: self.type = 'numeric'
else: self.type = None ; return
result = sorted(url[0], key=lambda ltr: int(ltr[0]))
response = ''.join(str(int(num[1])-48) for num in result)
self.captcha.update({'code': response})
except:
pass
def keyboard(self, response):
try:
dataPath = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo("profile"))
i = os.path.join(dataPath.decode("utf-8"),'img')
f = xbmcvfs.File(i, 'w')
f.write(getUrl(response).result)
f.close()
f = xbmcgui.ControlImage(450,5,375,115, i)
d = xbmcgui.WindowDialog()
d.addControl(f)
xbmcvfs.delete(i)
d.show()
xbmc.sleep(3000)
t = 'Type the letters in the image'
c = common.getUserInput(t, '')
d.close()
return c
except:
return
class regex:
def worker(self, data):
try:
data = str(data).replace('\r','').replace('\n','').replace('\t','')
url = re.compile('(.+?)<regex>').findall(data)[0]
regex = re.compile('<regex>(.+?)</regex>').findall(data)
except:
return
for x in regex:
try:
name = re.compile('<name>(.+?)</name>').findall(x)[0]
expres = re.compile('<expres>(.+?)</expres>').findall(x)[0]
referer = re.compile('<referer>(.+?)</referer>').findall(x)[0]
referer = urllib.unquote_plus(referer)
referer = common.replaceHTMLCodes(referer)
referer = referer.encode('utf-8')
page = re.compile('<page>(.+?)</page>').findall(x)[0]
page = urllib.unquote_plus(page)
page = common.replaceHTMLCodes(page)
page = page.encode('utf-8')
result = getUrl(page, referer=referer).result
result = str(result).replace('\r','').replace('\n','').replace('\t','')
result = str(result).replace('\/','/')
r = re.compile(expres).findall(result)[0]
url = url.replace('$doregex[%s]' % name, r)
except:
pass
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
class unwise:
def worker(self, str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=self.__unwise(w,i,s,e)
except: return
return page_value
def __unwise(self, w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return self.worker(ret)
else:
return ret
class js:
def worker(self, script):
aSplit = script.split(";',")
p = str(aSplit[0])
aSplit = aSplit[1].split(",")
a = int(aSplit[0])
c = int(aSplit[1])
k = aSplit[2].split(".")[0].replace("'", '').split('|')
e = ''
d = ''
sUnpacked = str(self.__unpack(p, a, c, k, e, d))
sUnpacked = sUnpacked.replace('\\', '')
url = self.__parse(sUnpacked)
return url
def __unpack(self, p, a, c, k, e, d):
while (c > 1):
c = c -1
if (k[c]):
p = re.sub('\\b' + str(self.__itoa(c, a)) +'\\b', k[c], p)
return p
def __itoa(self, num, radix):
result = ""
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __parse(self, sUnpacked):
url = re.compile("'file' *, *'(.+?)'").findall(sUnpacked)
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(sUnpacked)
url += re.compile("playlist=(.+?)&").findall(sUnpacked)
url += common.parseDOM(sUnpacked, "embed", ret="src")
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[-1].split('://', 1)[-1]
return url
class premiumize:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("premiumize_user")
self.password = xbmcaddon.Addon().getSetting("premiumize_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s' % (self.user, self.password)
result = getUrl(url).result
pz = json.loads(result)['result']['hosterlist']
pz = [i.rsplit('.' ,1)[0].lower() for i in pz]
return pz
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink¶ms[login]=%s¶ms[pass]=%s¶ms[link]=%s' % (self.user, self.password, urllib.quote_plus(url))
result = getUrl(url, close=False).result
url = json.loads(result)['result']['location']
return url
except:
return
class realdebrid:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("realdedrid_user")
self.password = xbmcaddon.Addon().getSetting("realdedrid_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://real-debrid.com/api/hosters.php'
result = getUrl(url).result
rd = json.loads('[%s]' % result)
rd = [i.rsplit('.' ,1)[0].lower() for i in rd]
return rd
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
login_data = urllib.urlencode({'user' : self.user, 'pass' : self.password})
login_link = 'http://real-debrid.com/ajax/login.php?%s' % login_data
result = getUrl(login_link, close=False).result
result = json.loads(result)
error = result['error']
if not error == 0: raise Exception()
url = 'http://real-debrid.com/ajax/unrestrict.php?link=%s' % url
url = url.replace('filefactory.com/stream/', 'filefactory.com/file/')
result = getUrl(url).result
result = json.loads(result)
url = result['generated_links'][0][-1]
return url
except:
return
class _180upload:
def info(self):
return {
'netloc': ['180upload.com'],
'host': ['180upload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://180upload.com/embed-%s.html' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "id": "captchaForm" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class allmyvideos:
def info(self):
return {
'netloc': ['allmyvideos.net'],
'host': ['Allmyvideos'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://allmyvideos.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class bestreams:
def info(self):
return {
'netloc': ['bestreams.net'],
'host': ['Bestreams'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://bestreams.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class clicknupload:
def info(self):
return {
'netloc': ['clicknupload.com'],
'host': ['Clicknupload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="onClick")
url = [i for i in url if i.startswith('window.open')][0]
url = re.compile('[\'|\"](.+?)[\'|\"]').findall(url)[0]
return url
except:
return
class cloudzilla:
def info(self):
return {
'netloc': ['cloudzilla.to'],
'host': ['Cloudzilla'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/share/file/', '/embed/')
result = getUrl(url).result
url = re.compile('var\s+vurl *= *"(http.+?)"').findall(result)[0]
return url
except:
return
class coolcdn:
def info(self):
return {
'netloc': ['movshare.net', 'novamov.com', 'nowvideo.sx', 'videoweed.es'],
'host': ['Movshare', 'Novamov', 'Nowvideo', 'Videoweed'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
netloc = urlparse.urlparse(url).netloc
netloc = netloc.replace('www.', '').replace('embed.', '')
netloc = netloc.lower()
id = re.compile('//.+?/.+?/([\w]+)').findall(url)
id += re.compile('//.+?/.+?v=([\w]+)').findall(url)
id = id[0]
url = 'http://embed.%s/embed.php?v=%s' % (netloc, id)
result = getUrl(url).result
key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1]
try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1]
except: pass
url = 'http://www.%s/api/player.api.php?key=%s&file=%s' % (netloc, key, id)
result = getUrl(url).result
url = re.compile('url=(.+?)&').findall(result)[0]
return url
except:
return
class daclips:
def info(self):
return {
'netloc': ['daclips.in'],
'host': ['Daclips'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class datemule:
def info(self):
return {
'netloc': ['datemule.com']
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class fastvideo:
def info(self):
return {
'netloc': ['fastvideo.in', 'faststream.in'],
'host': ['Fastvideo', 'Faststream'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://fastvideo.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class filehoot:
def info(self):
return {
'netloc': ['filehoot.com'],
'host': ['Filehoot'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://filehoot.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class filenuke:
def info(self):
return {
'netloc': ['filenuke.com', 'sharesix.com'],
'host': ['Filenuke', 'Sharesix'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
try: f = common.parseDOM(result, "form", attrs = { "method": "POST" })[0]
except: f = ''
k = common.parseDOM(f, "input", ret="name")
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+lnk\d* *= *'(http.+?)'").findall(result)[0]
return url
except:
return
class googledocs:
def info(self):
return {
'netloc': ['docs.google.com', 'drive.google.com']
}
def resolve(self, url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = getUrl(url).result
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class googleplus:
def info(self):
return {
'netloc': ['plus.google.com', 'picasaweb.google.com']
}
def resolve(self, url):
try:
if 'picasaweb' in url.lower():
result = getUrl(url).result
aid = re.compile('aid=(\d*)').findall(result)[0]
pid = urlparse.urlparse(url).fragment
oid = re.compile('/(\d*)/').findall(urlparse.urlparse(url).path)[0]
key = urlparse.parse_qs(urlparse.urlparse(url).query)['authkey'][0]
url = 'http://plus.google.com/photos/%s/albums/%s/%s?authkey=%s' % (oid, aid, pid, key)
result = getUrl(url, mobile=True).result
u = re.compile('"(http[s]*://.+?videoplayback[?].+?)"').findall(result)[::-1]
u = [i.replace('\\u003d','=').replace('\\u0026','&') for i in u]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class gorillavid:
def info(self):
return {
'netloc': ['gorillavid.com', 'gorillavid.in'],
'host': ['Gorillavid'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://gorillavid.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class grifthost:
def info(self):
return {
'netloc': ['grifthost.com'],
'host': ['Grifthost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://grifthost.com/embed-%s.html' % url
result = getUrl(url).result
try:
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
f = f.replace('"submit"', '"hidden"')
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
except:
pass
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class hugefiles:
def info(self):
return {
'netloc': ['hugefiles.net'],
'host': ['Hugefiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
f += common.parseDOM(result, "form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('fileUrl\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
return url
except:
return
class ipithos:
def info(self):
return {
'netloc': ['ipithos.to'],
'host': ['Ipithos'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://ipithos.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class ishared:
def info(self):
return {
'netloc': ['ishared.eu'],
'host': ['iShared'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile('path *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class kingfiles:
def info(self):
return {
'netloc': ['kingfiles.net'],
'host': ['Kingfiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+download_url *= *'(.+?)'").findall(result)[0]
return url
except:
return
class mailru:
def info(self):
return {
'netloc': ['mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru']
}
def resolve(self, url):
try:
usr = re.compile('/mail/(.+?)/').findall(url)[0]
vid = re.compile('(\d*)[.]html').findall(url)[0]
url = 'http://videoapi.my.mail.ru/videos/mail/%s/_myvideo/%s.json?ver=0.2.60' % (usr, vid)
import requests
result = requests.get(url).content
cookie = requests.get(url).headers['Set-Cookie']
u = json.loads(result)['videos']
h = "|Cookie=%s" % urllib.quote(cookie)
url = []
try: url += [[{'quality': '1080p', 'url': i['url'] + h} for i in u if i['key'] == '1080p'][0]]
except: pass
try: url += [[{'quality': 'HD', 'url': i['url'] + h} for i in u if i['key'] == '720p'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i['url'] + h} for i in u if not (i['key'] == '1080p' or i ['key'] == '720p')][0]]
except: pass
if url == []: return
return url
except:
return
class mightyupload:
def info(self):
return {
'netloc': ['mightyupload.com'],
'host': ['Mightyupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.mightyupload.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class mooshare:
def info(self):
return {
'netloc': ['mooshare.biz'],
'host': ['Mooshare'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://mooshare.biz/embed-%s.html?play=1&confirm=Close+Ad+and+Watch+as+Free+User' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class movdivx:
def info(self):
return {
'netloc': ['movdivx.com'],
'host': ['Movdivx'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.movdivx.com/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class movpod:
def info(self):
return {
'netloc': ['movpod.net', 'movpod.in'],
'host': ['Movpod'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('/vid/', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://movpod.in/embed-%s.html' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class movreel:
def info(self):
return {
'netloc': ['movreel.com'],
'host': ['Movreel'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
user = xbmcaddon.Addon().getSetting("movreel_user")
password = xbmcaddon.Addon().getSetting("movreel_password")
login = 'http://movreel.com/login.html'
post = {'op': 'login', 'login': user, 'password': password, 'redirect': url}
post = urllib.urlencode(post)
result = getUrl(url, close=False).result
result += getUrl(login, post=post, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 3):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
url = re.compile('(<a .+?</a>)').findall(result)
url = [i for i in url if 'Download Link' in i][-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
time.sleep(1)
except:
return
class mrfile:
def info(self):
return {
'netloc': ['mrfile.me'],
'host': ['Mrfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('(<a\s+href=.+?>Download\s+.+?</a>)').findall(result)[-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class mybeststream:
def info(self):
return {
'netloc': ['mybeststream.xyz']
}
def resolve(self, url):
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')
result = getUrl(url, referer=referer).result
result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
result = unwise().worker(result)
strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
strm = [i for i in strm if i.startswith('rtmp')][0]
url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
return url
except:
return
class nosvideo:
def info(self):
return {
'netloc': ['nosvideo.com'],
'host': ['Nosvideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[0]
url = js().worker(result)
result = getUrl(url).result
url = common.parseDOM(result, "file")[0]
return url
except:
return
class openload:
def info(self):
return {
'netloc': ['openload.io'],
'host': ['Openload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "span", attrs = { "id": "realdownload" })[0]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class played:
def info(self):
return {
'netloc': ['played.to'],
'host': ['Played'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('//', '/')
url = re.compile('/.+?/([\w]+)').findall(url)[0]
url = 'http://played.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class primeshare:
def info(self):
return {
'netloc': ['primeshare.tv'],
'host': ['Primeshare'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "video")[0]
url = common.parseDOM(url, "source", ret="src", attrs = { "type": ".+?" })[0]
return url
except:
return
class sharerepo:
def info(self):
return {
'netloc': ['sharerepo.com'],
'host': ['Sharerepo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile("file *: *'(http.+?)'").findall(result)[-1]
return url
except:
return
class stagevu:
def info(self):
return {
'netloc': ['stagevu.com'],
'host': ['StageVu'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "embed", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class streamcloud:
def info(self):
return {
'netloc': ['streamcloud.eu'],
'host': ['Streamcloud'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamcloud.eu/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "class": "proform" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
post = post.replace('op=download1', 'op=download2')
result = getUrl(url, post=post).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class streamin:
def info(self):
return {
'netloc': ['streamin.to'],
'host': ['Streamin'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamin.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result)[-1]
return url
except:
return
class thefile:
def info(self):
return {
'netloc': ['thefile.me'],
'host': ['Thefile'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thefile.me/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class thevideo:
def info(self):
return {
'netloc': ['thevideo.me'],
'host': ['Thevideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thevideo.me/embed-%s.html' % url
result = getUrl(url).result
result = result.replace('\n','')
import ast
url = re.compile("'sources' *: *(\[.+?\])").findall(result)[-1]
url = ast.literal_eval(url)
url = url[-1]['file']
return url
except:
return
class tusfiles:
def info(self):
return {
'netloc': ['tusfiles.net'],
'host': ['Tusfiles'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadc:
def info(self):
return {
'netloc': ['uploadc.com', 'zalaa.com'],
'host': ['Uploadc', 'Zalaa'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://uploadc.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("'file' *, *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadrocket:
def info(self):
return {
'netloc': ['uploadrocket.net'],
'host': ['Uploadrocket'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = result.decode('iso-8859-1').encode('utf-8')
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "freeorpremium" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_isfree': 'Click for Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = result.decode('iso-8859-1').encode('utf-8')
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = result.decode('iso-8859-1').encode('utf-8')
url = common.parseDOM(result, "a", ret="href", attrs = { "onclick": "DL.+?" })[0]
return url
except:
return
class uptobox:
def info(self):
return {
'netloc': ['uptobox.com'],
'host': ['Uptobox'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "div", attrs = { "align": ".+?" })
url = [i for i in url if 'button_upload' in i][0]
url = common.parseDOM(url, "a", ret="href")[0]
url = ['http' + i for i in url.split('http') if 'uptobox.com' in i][0]
return url
except:
return
class v_vids:
def info(self):
return {
'netloc': ['v-vids.com'],
'host': ['V-vids'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="href", attrs = { "id": "downloadbutton" })[0]
return url
except:
return
class veehd:
def info(self):
return {
'netloc': ['veehd.com'],
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
result = result.replace('\n','')
url = re.compile('function\s*load_download.+?src\s*:\s*"(.+?)"').findall(result)[0]
url = urlparse.urljoin('http://veehd.com', url)
result = getUrl(url, close=False).result
i = common.parseDOM(result, "iframe", ret="src")
if len(i) > 0:
i = urlparse.urljoin('http://veehd.com', i[0])
getUrl(i, close=False).result
result = getUrl(url).result
url = re.compile('href *= *"([^"]+(?:mkv|mp4|avi))"').findall(result)
url += re.compile('src *= *"([^"]+(?:divx|avi))"').findall(result)
url += re.compile('"url" *: *"(.+?)"').findall(result)
url = urllib.unquote(url[0])
return url
except:
return
class vidbull:
def info(self):
return {
'netloc': ['vidbull.com'],
'host': ['Vidbull'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class videomega:
def info(self):
return {
'netloc': ['videomega.tv']
}
def resolve(self, url):
try:
url = urlparse.urlparse(url).query
url = urlparse.parse_qsl(url)[0][1]
url = 'http://videomega.tv/cdn.php?ref=%s' % url
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class vidplay:
def info(self):
return {
'netloc': ['vidplay.net'],
'host': ['Vidplay'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
u = 'http://vidplay.net/vidembed-%s' % url
url = getUrl(u, output='geturl').result
if u == url: raise Exception()
return url
except:
return
class vidspot:
def info(self):
return {
'netloc': ['vidspot.net'],
'host': ['Vidspot'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidspot.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
query = urlparse.urlparse(url).query
url = url[:url.find('?')]
url = '%s?%s&direct=false' % (url, query)
return url
except:
return
class vidto:
def info(self):
return {
'netloc': ['vidto.me'],
'host': ['Vidto'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidto.me/embed-%s.html' % url
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = re.sub(r'(\',\d*,\d*,)', r';\1', result)
url = js().worker(result)
return url
except:
return
class vidzi:
def info(self):
return {
'netloc': ['vidzi.tv'],
'host': ['Vidzi'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
result = result.replace('\n','')
result = re.compile('sources *: *\[.+?\]').findall(result)[-1]
result = re.compile('file *: *"(http.+?)"').findall(result)
url = [i for i in result if '.m3u8' in i]
if len(url) > 0: return url[0]
url = [i for i in result if not '.m3u8' in i]
if len(url) > 0: return url[0]
except:
return
class vimeo:
def info(self):
return {
'netloc': ['vimeo.com']
}
def resolve(self, url):
try:
url = [i for i in url.split('/') if i.isdigit()][-1]
url = 'http://player.vimeo.com/video/%s/config' % url
result = getUrl(url).result
result = json.loads(result)
u = result['request']['files']['h264']
url = None
try: url = u['hd']['url']
except: pass
try: url = u['sd']['url']
except: pass
return url
except:
return
class vk:
def info(self):
return {
'netloc': ['vk.com']
}
def resolve(self, url):
try:
url = url.replace('https://', 'http://')
result = getUrl(url).result
u = re.compile('url(720|540|480|360|240)=(.+?)&').findall(result)
url = []
try: url += [[{'quality': 'HD', 'url': i[1]} for i in u if i[0] == '720'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '540'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '480'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '360'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '240'][0]]
except: pass
if url == []: return
return url
except:
return
class vodlocker:
def info(self):
return {
'netloc': ['vodlocker.com'],
'host': ['Vodlocker'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vodlocker.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class xfileload:
def info(self):
return {
'netloc': ['xfileload.com'],
'host': ['Xfileload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 5):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
if 'download2' in result: raise Exception()
url = common.parseDOM(result, "a", ret="href", attrs = { "target": "" })[0]
return url
except:
time.sleep(1)
except:
return
class xvidstage:
def info(self):
return {
'netloc': ['xvidstage.com'],
'host': ['Xvidstage'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://xvidstage.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class youtube:
def info(self):
return {
'netloc': ['youtube.com'],
'host': ['Youtube'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
result = getUrl('http://www.youtube.com/watch?v=%s' % id).result
message = common.parseDOM(result, "div", attrs = { "id": "unavailable-submessage" })
message = ''.join(message)
alert = common.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" })
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id
return url
except:
return
class zettahost:
def info(self):
return {
'netloc': ['zettahost.tv'],
'host': ['Zettahost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://zettahost.tv/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
| gpl-2.0 |
aemal/westcat | api/rest/viewsets/article.py | 2 | 8270 | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import json
from amcat.models import Medium, Article
from api.rest.mixins import DatatablesMixin
from api.rest.serializer import AmCATModelSerializer
from api.rest.viewset import AmCATViewSetMixin
__all__ = ("ArticleSerializer", "ArticleViewSet")
class ArticleViewSetMixin(AmCATViewSetMixin):
model_key = "article"
model = Article
class ArticleSerializer(AmCATModelSerializer):
def __init__(self, instance=None, data=None, files=None, **kwargs):
kwargs['many'] = isinstance(data, list)
super(ArticleSerializer, self).__init__(instance, data, files, **kwargs)
def restore_fields(self, data, files):
# convert media from name to id, if needed
data = data.copy() # make data mutable
if 'medium' in data:
try:
int(data['medium'])
except ValueError:
if not hasattr(self, 'media'):
self.media = {}
m = data['medium']
if m not in self.media:
self.media[m] = Medium.get_or_create(m).id
data['medium'] = self.media[m]
# add time part to date, if needed
if 'date' in data and len(data['date']) == 10:
data['date'] += "T00:00"
if 'project' not in data:
data['project'] = self.context['view'].project.id
return super(ArticleSerializer, self).restore_fields(data, files)
def from_native(self, data, files):
result = super(ArticleSerializer, self).from_native(data, files)
# deserialize children (if needed)
children = data.get('children')# TODO: children can be a multi-value GET param as well, e.g. handle getlist
if isinstance(children, (str, unicode)):
children = json.loads(children)
if children:
self.many = True
def get_child(obj):
child = self.from_native(obj, None)
child.parent = result
return child
return [result] + [get_child(child) for child in children]
return result
def save(self, **kwargs):
import collections
def _flatten(l):
"""Turn either an object or a (recursive/irregular/jagged) list-of-lists into a flat list"""
# inspired by http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python
if isinstance(l, collections.Iterable) and not isinstance(l, basestring):
for el in l:
for sub in _flatten(el):
yield sub
else:
yield l
# flatten articles list (children in a many call yields a list of lists)
self.object = list(_flatten(self.object))
Article.create_articles(self.object, self.context['view'].articleset)
# make sure that self.many is True for serializing result
self.many = True
return self.object
from api.rest.viewsets.articleset import ArticleSetViewSetMixin
from rest_framework.viewsets import ModelViewSet
from api.rest.viewsets.project import ProjectViewSetMixin
from amcat.models import Article, ArticleSet, ROLE_PROJECT_READER
from api.rest.viewsets.project import CannotEditLinkedResource, NotFoundInProject
class ArticleViewSetMixin(AmCATViewSetMixin):
model = Article
model_key = "article"
class ArticleViewSet(ProjectViewSetMixin, ArticleSetViewSetMixin, ArticleViewSetMixin, DatatablesMixin, ModelViewSet):
model = Article
model_key = "article"
permission_map = {'GET' : ROLE_PROJECT_READER}
model_serializer_class = ArticleSerializer
def check_permissions(self, request):
# make sure that the requested set is available in the projec, raise 404 otherwiset
# sets linked_set to indicate whether the current set is owned by the project
if self.articleset.project == self.project:
pass
elif self.project.articlesets.filter(pk=self.articleset.id).exists():
if request.method == 'POST':
raise CannotEditLinkedResource()
else:
raise NotFoundInProject()
return super(ArticleViewSet, self).check_permissions(request)
@property
def articleset(self):
if not hasattr(self, '_articleset'):
articleset_id = int(self.kwargs['articleset'])
self._articleset = ArticleSet.objects.get(pk=articleset_id)
return self._articleset
def filter_queryset(self, queryset):
queryset = super(ArticleViewSet, self).filter_queryset(queryset)
return queryset.filter(articlesets_set=self.articleset)
return self.object
###########################################################################
# U N I T T E S T S #
###########################################################################
from api.rest.apitestcase import ApiTestCase
from amcat.tools import amcattest, toolkit
class TestArticleViewSet(ApiTestCase):
def test_post(self):
"""Test whether posting and retrieving an article works correctly"""
import datetime
p = amcattest.create_test_project()
s = amcattest.create_test_set(project=p)
a = {
'date': datetime.datetime.now().isoformat(),
'headline': 'Test child',
'medium': 'Fantasy',
'text': 'Hello Universe',
'pagenr': 1,
'url': 'http://example.org',
}
url = "/api/v4/projects/{p.id}/articlesets/{s.id}/articles/".format(**locals())
self.post(url, a, as_user=self.user)
res = self.get(url)["results"]
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["headline"], a['headline'])
self.assertEqual(toolkit.readDate(res[0]["date"]), toolkit.readDate(a['date']))
@amcattest.use_elastic
def test_children(self):
p = amcattest.create_test_project()
s = amcattest.create_test_set(project=p)
# need to json dump the children because the django client does weird stuff with post data
children = json.dumps([{'date': '2001-01-02', 'headline': 'Test child',
'medium': 'Fantasy', 'text': 'Hello Universe'}])
a = {
'date': '2001-01-01',
'headline': 'Test parent',
'medium': 'My Imagination',
'text': 'Hello World',
'children': children
}
url = "/api/v4/projects/{p.id}/articlesets/{s.id}/articles/".format(**locals())
self.post(url, a, as_user=self.user)
res = self.get(url)["results"]
headlines = {a['headline'] : a for a in res}
self.assertEqual(set(headlines), {'Test parent', 'Test child'})
self.assertEqual(headlines['Test child']['parent'], headlines['Test parent']['id'])
| agpl-3.0 |
Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/amaranth/animation/jump_frames.py | 3 | 6967 | # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Jump X Frames on Shift Up/Down
When you hit Shift Up/Down, you'll jump 10 frames forward/backwards.
Sometimes is nice to tweak that value.
In the User Preferences, Editing tab, you'll find a "Frames to Jump"
slider where you can adjust how many frames you'd like to move
forwards/backwards.
Make sure you save your user settings if you want to use this value from
now on.
Find it on the User Preferences, Editing.
"""
import bpy
KEYMAPS = list()
# FUNCTION: Check if object has keyframes for a specific frame
def is_keyframe(ob, frame):
if ob is not None and ob.animation_data is not None and ob.animation_data.action is not None:
for fcu in ob.animation_data.action.fcurves:
if frame in (p.co.x for p in fcu.keyframe_points):
return True
return False
# monkey path is_keyframe function
bpy.types.Object.is_keyframe = is_keyframe
# FEATURE: Jump to frame in-between next and previous keyframe
class AMTH_SCREEN_OT_keyframe_jump_inbetween(bpy.types.Operator):
"""Jump to half in-between keyframes"""
bl_idname = "screen.amth_keyframe_jump_inbetween"
bl_label = "Jump to Keyframe In-between"
backwards = bpy.props.BoolProperty()
def execute(self, context):
back = self.backwards
scene = context.scene
ob = bpy.context.object
frame_start = scene.frame_start
frame_end = scene.frame_end
if not context.scene.get("amth_keyframes_jump"):
context.scene["amth_keyframes_jump"] = list()
keyframes_list = context.scene["amth_keyframes_jump"]
for f in range(frame_start, frame_end):
if ob.is_keyframe(f):
keyframes_list = list(keyframes_list)
keyframes_list.append(f)
if keyframes_list:
keyframes_list_half = []
for i, item in enumerate(keyframes_list):
try:
next_item = keyframes_list[i + 1]
keyframes_list_half.append(int((item + next_item) / 2))
except:
pass
if len(keyframes_list_half) > 1:
if back:
v = (scene.frame_current == keyframes_list_half[::-1][-1],
scene.frame_current < keyframes_list_half[::-1][-1])
if any(v):
self.report({"INFO"}, "No keyframes behind")
else:
for i in keyframes_list_half[::-1]:
if scene.frame_current > i:
scene.frame_current = i
break
else:
v = (scene.frame_current == keyframes_list_half[-1],
scene.frame_current > keyframes_list_half[-1])
if any(v):
self.report({"INFO"}, "No keyframes ahead")
else:
for i in keyframes_list_half:
if scene.frame_current < i:
scene.frame_current = i
break
else:
self.report({"INFO"}, "Object has only 1 keyframe")
else:
self.report({"INFO"}, "Object has no keyframes")
return {"FINISHED"}
# FEATURE: Jump forward/backward every N frames
class AMTH_SCREEN_OT_frame_jump(bpy.types.Operator):
"""Jump a number of frames forward/backwards"""
bl_idname = "screen.amaranth_frame_jump"
bl_label = "Jump Frames"
forward = bpy.props.BoolProperty(default=True)
def execute(self, context):
scene = context.scene
preferences = context.user_preferences.addons["amaranth"].preferences
if preferences.use_framerate:
framedelta = scene.render.fps
else:
framedelta = preferences.frames_jump
if self.forward:
scene.frame_current = scene.frame_current + framedelta
else:
scene.frame_current = scene.frame_current - framedelta
return {"FINISHED"}
def ui_userpreferences_edit(self, context):
preferences = context.user_preferences.addons["amaranth"].preferences
col = self.layout.column()
split = col.split(percentage=0.21)
split.prop(preferences, "frames_jump",
text="Frames to Jump")
def label(self, context):
preferences = context.user_preferences.addons["amaranth"].preferences
layout = self.layout
if preferences.use_timeline_extra_info:
row = layout.row(align=True)
row.operator(AMTH_SCREEN_OT_keyframe_jump_inbetween.bl_idname,
icon="PREV_KEYFRAME", text="").backwards = True
row.operator(AMTH_SCREEN_OT_keyframe_jump_inbetween.bl_idname,
icon="NEXT_KEYFRAME", text="").backwards = False
def register():
bpy.utils.register_class(AMTH_SCREEN_OT_frame_jump)
bpy.utils.register_class(AMTH_SCREEN_OT_keyframe_jump_inbetween)
bpy.types.USERPREF_PT_edit.append(ui_userpreferences_edit)
bpy.types.USERPREF_PT_edit.append(label)
# register keyboard shortcuts
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
km = kc.keymaps.new(name="Frames")
kmi = km.keymap_items.new('screen.amth_keyframe_jump_inbetween', 'UP_ARROW', 'PRESS', shift=True, ctrl=True)
kmi.properties.backwards = False
KEYMAPS.append((km, kmi))
kmi = km.keymap_items.new('screen.amth_keyframe_jump_inbetween', 'DOWN_ARROW', 'PRESS', shift=True, ctrl=True)
kmi.properties.backwards = True
KEYMAPS.append((km, kmi))
kmi = km.keymap_items.new(
"screen.amaranth_frame_jump", "UP_ARROW", "PRESS", shift=True)
kmi.properties.forward = True
KEYMAPS.append((km, kmi))
kmi = km.keymap_items.new(
"screen.amaranth_frame_jump", "DOWN_ARROW", "PRESS", shift=True)
kmi.properties.forward = False
KEYMAPS.append((km, kmi))
def unregister():
bpy.utils.unregister_class(AMTH_SCREEN_OT_frame_jump)
bpy.utils.unregister_class(AMTH_SCREEN_OT_keyframe_jump_inbetween)
bpy.types.USERPREF_PT_edit.remove(ui_userpreferences_edit)
for km, kmi in KEYMAPS:
km.keymap_items.remove(kmi)
KEYMAPS.clear()
| gpl-3.0 |
htdevices/linux-2.6-imx | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
jmartinezchaine/OpenERP | openerp/addons/caldav/wizard/calendar_event_import.py | 9 | 3434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
import netsvc
import pooler
import time
import tools
import wizard
import base64
class calendar_event_import(osv.osv_memory):
"""
Import Calendar Event.
"""
cnt = 0
def process_imp_ics(self, cr, uid, ids, context=None):
"""
Process Import ics File.
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of calendar event import’s IDs
@return: dictionary of calendar evet import window with Import successful msg.
"""
if context is None:
context = {}
else:
context = context.copy()
context['uid'] = uid
for data in self.read(cr, uid, ids, context=context):
model = data.get('model', 'basic.calendar')
model_obj = self.pool.get(model)
context.update({'model': model})
data_obj = self.pool.get('ir.model.data')
id2 = data_obj._get_id(cr, uid, 'caldav', 'view_calendar_event_import_display')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
vals = None
try:
vals = model_obj.import_cal(cr, uid, base64.decodestring(data['file_path']), context['active_id'], context)
except:
raise osv.except_osv(_('Warning !'),_('Invalid format of the ics, file can not be imported'))
global cnt
if vals:
cnt = len(vals)
value = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'calendar.event.import',
'views': [(id2,'form'),(False,'tree'),(False,'calendar'),(False,'graph')],
'type': 'ir.actions.act_window',
'target': 'new'
}
return value
_name = "calendar.event.import"
_description = "Event Import"
_columns = {
'file_path': fields.binary('Select ICS file', filters='*.ics', required=True),
'msg': fields.text('', readonly=True),
}
def _get_msg(self, cr, uid, context):
return _('Import Sucessful')
_defaults = {
'msg': _get_msg,
}
calendar_event_import()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
johnseekins/carbon | lib/carbon/writer.py | 1 | 6892 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import time
from carbon import state
from carbon.cache import MetricCache
from carbon.storage import loadStorageSchemas, loadAggregationSchemas
from carbon.conf import settings
from carbon import log, events, instrumentation
from carbon.util import TokenBucket
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.application.service import Service
try:
import signal
except ImportError:
log.msg("Couldn't import signal module")
SCHEMAS = loadStorageSchemas()
AGGREGATION_SCHEMAS = loadAggregationSchemas()
# Inititalize token buckets so that we can enforce rate limits on creates and
# updates if the config wants them.
CREATE_BUCKET = None
UPDATE_BUCKET = None
if settings.MAX_CREATES_PER_MINUTE != float('inf'):
capacity = settings.MAX_CREATES_PER_MINUTE
fill_rate = float(settings.MAX_CREATES_PER_MINUTE) / 60
CREATE_BUCKET = TokenBucket(capacity, fill_rate)
if settings.MAX_UPDATES_PER_SECOND != float('inf'):
capacity = settings.MAX_UPDATES_PER_SECOND
fill_rate = settings.MAX_UPDATES_PER_SECOND
UPDATE_BUCKET = TokenBucket(capacity, fill_rate)
def optimalWriteOrder():
"""Generates metrics with the most cached values first and applies a soft
rate limit on new metrics"""
while MetricCache:
(metric, datapoints) = MetricCache.drain_metric()
dbFileExists = state.database.exists(metric)
if not dbFileExists and CREATE_BUCKET:
# If our tokenbucket has enough tokens available to create a new metric
# file then yield the metric data to complete that operation. Otherwise
# we'll just drop the metric on the ground and move on to the next
# metric.
# XXX This behavior should probably be configurable to no tdrop metrics
# when rate limitng unless our cache is too big or some other legit
# reason.
if CREATE_BUCKET.drain(1):
yield (metric, datapoints, dbFileExists)
continue
yield (metric, datapoints, dbFileExists)
def writeCachedDataPoints():
"Write datapoints until the MetricCache is completely empty"
while MetricCache:
dataWritten = False
for (metric, datapoints, dbFileExists) in optimalWriteOrder():
dataWritten = True
if not dbFileExists:
archiveConfig = None
xFilesFactor, aggregationMethod = None, None
for schema in SCHEMAS:
if schema.matches(metric):
log.creates('new metric %s matched schema %s' % (metric, schema.name))
archiveConfig = [archive.getTuple() for archive in schema.archives]
break
for schema in AGGREGATION_SCHEMAS:
if schema.matches(metric):
log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
xFilesFactor, aggregationMethod = schema.archives
break
if not archiveConfig:
raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)
log.creates("creating database metric %s (archive=%s xff=%s agg=%s)" %
(metric, archiveConfig, xFilesFactor, aggregationMethod))
try:
state.database.create(metric, archiveConfig, xFilesFactor, aggregationMethod)
instrumentation.increment('creates')
except Exception:
log.msg("Error creating %s: %s" % (metric, e))
continue
# If we've got a rate limit configured lets makes sure we enforce it
if UPDATE_BUCKET:
UPDATE_BUCKET.drain(1, blocking=True)
try:
t1 = time.time()
# If we have duplicated points, always pick the last. update_many()
# has no guaranted behavior for that, and in fact the current implementation
# will keep the first point in the list.
datapoints = dict(datapoints).items()
state.database.write(metric, datapoints)
updateTime = time.time() - t1
except Exception, e:
log.msg("Error writing to %s: %s" % (metric, e))
instrumentation.increment('errors')
else:
pointCount = len(datapoints)
instrumentation.increment('committedPoints', pointCount)
instrumentation.append('updateTimes', updateTime)
if settings.LOG_UPDATES:
log.updates("wrote %d datapoints for %s in %.5f seconds" % (pointCount, metric, updateTime))
# Avoid churning CPU when only new metrics are in the cache
if not dataWritten:
time.sleep(0.1)
def writeForever():
while reactor.running:
try:
writeCachedDataPoints()
except Exception:
log.err()
time.sleep(1) # The writer thread only sleeps when the cache is empty or an error occurs
def reloadStorageSchemas():
global SCHEMAS
try:
SCHEMAS = loadStorageSchemas()
except Exception, e:
log.msg("Failed to reload storage SCHEMAS: %s" % (e))
def reloadAggregationSchemas():
global AGGREGATION_SCHEMAS
try:
AGGREGATION_SCHEMAS = loadAggregationSchemas()
except Exception, e:
log.msg("Failed to reload aggregation SCHEMAS: %s" % (e))
def shutdownModifyUpdateSpeed():
try:
shut = settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN
if UPDATE_BUCKET:
UPDATE_BUCKET.setCapacityAndFillRate(shut,shut)
if CREATE_BUCKET:
CREATE_BUCKET.setCapacityAndFillRate(shut,shut)
log.msg("Carbon shutting down. Changed the update rate to: " + str(settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN))
except KeyError:
log.msg("Carbon shutting down. Update rate not changed")
class WriterService(Service):
def __init__(self):
self.storage_reload_task = LoopingCall(reloadStorageSchemas)
self.aggregation_reload_task = LoopingCall(reloadAggregationSchemas)
def startService(self):
if 'signal' in globals().keys():
log.msg("Installing SIG_IGN for SIGHUP")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.storage_reload_task.start(60, False)
self.aggregation_reload_task.start(60, False)
reactor.addSystemEventTrigger('before', 'shutdown', shutdownModifyUpdateSpeed)
reactor.callInThread(writeForever)
Service.startService(self)
def stopService(self):
self.storage_reload_task.stop()
self.aggregation_reload_task.stop()
Service.stopService(self)
| apache-2.0 |
kz/pybattle | main.py | 1 | 4013 | #!/usr/bin/env python
from time import sleep
import yaml
from player import Player
from attack import Attack
def main():
# Load YAML config
config_path = "./attack.yml"
f = open(config_path, 'r')
attack_config = yaml.load(f)
f.close()
# Load attacks
attacks = {}
for name, properties in attack_config.items():
attacks[name] = (Attack(name, int(properties['strength']), int(properties['accuracy'])))
# Introduce the game
print("""
PyBattle
===
Each player is given a python with 100HP (health points).
The first player whose python depletes the other python's health points wins.
===
""")
# Obtain player names
player_one = Player(input("What is Player 1's name? "))
player_two = Player(input("What is Player 2's name? "))
# Start the game
print("\n{!s} challenges {!s} to a python battle!\n".format(player_one.name, player_two.name))
sleep(0.5)
# Assign players to current/target players
current_player = player_one
target_player = player_two
# Start a round
while player_one.health > 0 and player_two.health > 0:
# Output player's turn
sleep(0.5)
print("It's {!s}'s turn --\n".format(current_player.name))
sleep(0.5)
# Output player HPs
sleep(0.5)
print("{!s} - {!s}/100 HP".format(player_one.name, player_one.health))
print("{!s} - {!s}/100 HP\n".format(player_two.name, player_two.health))
sleep(0.5)
# Output available attacks
sleep(0.5)
print("Available attacks:")
print("==================")
for name, attack in attacks.items():
print("{!s} - Strength: {!s} - Accuracy: {!s}".format(attack.name, attack.strength, attack.accuracy))
print("==================\n")
sleep(0.5)
# Player inputs attack
while True:
input_attack = input("{!s}, enter the name of the attack you want to use: ".format(current_player.name)) \
.strip() \
.lower() \
.capitalize()
if input_attack in attacks:
chosen_attack = attacks[input_attack]
current_player.set_previous_attack_name(input_attack)
break
else:
print("The selected attack is invalid. Try again!\n")
# Show chosen attack
sleep(1)
print("\n{!s} chose {!s}!\n".format(current_player.name, chosen_attack.name))
sleep(1)
# Process attack
attack_result = chosen_attack.attempt(target_player.previous_attack_name)
target_player.deplete(attack_result['strength'])
# Display attack result to players
print(attack_result['message']
.replace('{current}', current_player.name)
.replace('{target}', target_player.name)
.replace('{strength}', str(attack_result['strength']))
.replace('{health}', str(target_player.health)))
# Update player objects and switch around current/target players
if current_player.name == player_one.name:
player_one = current_player
player_two = target_player
current_player = player_two
target_player = player_one
else:
player_two = current_player
player_one = target_player
current_player = player_one
target_player = player_two
sleep(1.5)
print("\n=======================================================\n")
# Calculate winning/losing player
if player_one.health <= 0:
winning_player = player_two
losing_player = player_one
else:
winning_player = player_one
losing_player = player_two
# Display results to players
print("{!s}'s python falls to the ground in defeat.".format(losing_player.name))
print("{!s} is the winner!".format(winning_player.name))
if __name__ == '__main__':
main()
| mit |
mlcommons/training | reinforcement/tensorflow/minigo/oneoffs/joseki/joseki_query.py | 5 | 6293 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '.') # nopep8
import logging
import sqlite3
import collections
import datetime as dt
from flask import Flask, g, jsonify
from timeit import default_timer as timer
import os
import flask
import oneoffs.joseki.opening_freqs as openings
# static_folder is location of npm build
app = Flask(__name__, static_url_path="", static_folder="./build")
# Path to database relative to joseki_query.py, with pre-extracted joseki
# information (see 'opening_frequencies.py')
DATABASE = ''
assert os.path.exists(DATABASE)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def get_sequence_hour_counts(seq):
cur = get_db().execute('''
select run, hour, count from joseki_counts
where seq_id in (select id from joseki where seq = ?);
''', (seq, ))
return list(cur.fetchall())
def seq_id_or_none(db, seq):
s_id = db.execute("select id from joseki where seq = ?", (seq,)).fetchone()
return s_id[0] if s_id else None
@app.route("/")
def index():
return app.send_static_file("index.html")
@app.route("/nexts", methods=["POST"])
def next_moves():
d = flask.request.get_json()
prefix = d['params']['prefix']
run = d['params']['run']
db = get_db()
# If error, return this default result.
res = {'count': 0, 'next_moves': {}}
# For the blank board, there's no 'prefix' string sent, and so there's no
# 'next_move' information to be had. This selects and counts all the unique
# opening moves from the joseki table.
# TODO: Once the db is rebuilt with empty opening stats, this special case
# could be removed.
if not prefix:
nexts = db.execute("select distinct(substr(seq, 0, 7)), count(*) from joseki group by 1;").fetchall()
total = sum([n[1] for n in nexts])
else:
s_id = seq_id_or_none(db, prefix)
if not s_id:
return jsonify(res)
if run is None:
nexts = db.execute("""
select next_move, sum(count) from next_moves where seq_id = ? group by 1
""", (s_id,)).fetchall()
total = db.execute("select sum(count) from joseki_counts where seq_id = ?",
(s_id,)).fetchone()[0]
else:
start = timer()
nexts = db.execute("""
select next_move, sum(nm.count)
from next_moves as nm join joseki_counts as jc
on jc.id == nm.joseki_hour_id
where nm.seq_id = ? and jc.run = ? group by 1
""", (s_id, run)).fetchall()
end = timer()
print('%.4f seconds for fancy join.' % (end-start,))
total = db.execute("select sum(count) from joseki_counts where seq_id = ? and run = ?",
(s_id, run)).fetchone()[0]
if not nexts:
print("No next moves found, post params:", d['params'])
return jsonify(res)
next_moves = dict(nexts)
tot = sum(next_moves.values())
for k in next_moves:
next_moves[k] /= tot
max_v = max(next_moves.values())
next_moves = {k:v / max_v for k,v in next_moves.items() if v > 0.001}
res = {'count': total,
'next_moves': next_moves}
return jsonify(res)
@app.route("/games", methods=["POST"])
def games():
d = flask.request.get_json()
prefix = d['params']['sgf']
sort_hour = d['params']['sort']
run = d['params']['run']
# "page" is 1-indexed, so subtract 1 to get the proper OFFSET.
page = d['params']['page'] - 1
db = get_db()
if sort_hour.lower() not in ('desc', 'asc'):
print("Invalid input for sort_hour param: ", sort_hour)
return jsonify({'rows': []})
s_id = seq_id_or_none(db, prefix)
if not s_id:
return jsonify({'rows': []})
q = """select example_sgf, hour, run, b_wins*1.0/count from joseki_counts
where seq_id = ? {} order by hour {} limit 30 offset ?""".format(
"and run = ?" if run else "", sort_hour)
if run:
rows = db.execute(q, (s_id, run, page * 30)).fetchall()
else:
rows = db.execute(q, (s_id, page * 30)).fetchall()
res = [ {'game': os.path.basename(r[0]), 'hour': r[1],
'run': r[2], 'winrate': r[3]} for r in rows]
return jsonify({'rows': res})
@app.route("/search", methods=["POST"])
def search():
d = flask.request.get_json()
print(d)
query = d['params']['sgf']
ts = lambda hr: int(dt.datetime.strptime(hr, "%Y-%m-%d-%H").timestamp())
ranges = openings.run_time_ranges(get_db())
interps = openings.build_run_time_transformers(ranges)
runs = sorted(ranges.keys())
cols = []
cols.append({'id': 'time', 'label': '% of Training', 'type': 'number'})
for run in runs:
cols.append({'id': run + ' count', 'label': run + ' times seen', 'type': 'number'})
sequence_counts = get_sequence_hour_counts(query)
rows = collections.defaultdict(lambda: [0 for i in range(len(runs))])
for r, hr, ct in sequence_counts:
key = interps[r](ts(hr))
idx = runs.index(r)
rows[key][idx] = ct
row_data = [ {'c': [ {'v': key} ] + [{'v': v if v else None} for v in value ] }
for key,value in rows.items()]
obj = {'cols': cols, "rows": row_data, "sequence": query}
return jsonify(obj)
| apache-2.0 |
akashsinghal/Speech-Memorization-App | Python_Backend/lib/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
qevo/py_data_helper | tests/check.py | 1 | 6291 | """Tests for the data_helper.check module"""
import sys, unittest
from BaseTest import BaseTestWrapper
class IsBoolTestCase(BaseTestWrapper.BaseTest):
"""check.is_bool() test cases"""
def test_string(self):
"""Test if string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_number(self):
"""Test if a number is False"""
x = 12345
self.assertFalse(self._bt['func'](x))
def test_list(self):
"""Test if a list is False"""
x = []
self.assertFalse(self._bt['func'](x))
def test_bool(self):
"""Test if a bool is True"""
x = True
self.assertTrue(self._bt['func'](x))
class IsStrTestCase(BaseTestWrapper.BaseTest):
"""check.is_str() test cases"""
def test_string(self):
"""Test if string is True"""
x = 'y'
self.assertTrue(self._bt['func'](x))
def test_number(self):
"""Test if a number is False"""
x = 12345
self.assertFalse(self._bt['func'](x))
def test_list(self):
"""Test if a list is False"""
x = []
self.assertFalse(self._bt['func'](x))
class IsStrEmptyTestCase(BaseTestWrapper.BaseTest):
"""check.is_str_empty() test cases"""
def test_empty_string(self):
"""Test if an empty string is True"""
x = ''
self.assertTrue(self._bt['func'](x))
def test_string(self):
"""Test if non empty string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_number(self):
"""Test if a number is False"""
x = 12345
self.assertFalse(self._bt['func'](x))
def test_list(self):
"""Test if a list is False"""
x = []
self.assertFalse(self._bt['func'](x))
class IsStrNotEmptyTestCase(BaseTestWrapper.BaseTest):
"""check.is_str_not_empty() test cases"""
def test_empty_string(self):
"""Test if an empty string is False"""
x = ''
self.assertFalse(self._bt['func'](x))
def test_string(self):
"""Test if non empty string is True"""
x = 'y'
self.assertTrue(self._bt['func'](x))
def test_number(self):
"""Test if a number is False"""
x = 12345
self.assertFalse(self._bt['func'](x))
def test_list(self):
"""Test if a list is False"""
x = []
self.assertFalse(self._bt['func'](x))
class IsIntTestCase(BaseTestWrapper.BaseTest):
"""check.is_int() test cases"""
def test_string(self):
"""Test if string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_positive_int(self):
"""Test if a positive int is detected"""
x = 12345
self.assertTrue(self._bt['func'](x))
def test_negative_int(self):
"""Test if a negative int is detected"""
x = -12345
self.assertTrue(self._bt['func'](x))
class IsIntNotNegTestCase(BaseTestWrapper.BaseTest):
"""check.is_int_not_neg() test cases"""
def test_string(self):
"""Test if string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_positive_int(self):
"""Test if a positive int is detected"""
x = 12345
self.assertTrue(self._bt['func'](x))
def test_zero(self):
"""Test if zero is detected"""
x = 0
self.assertTrue(self._bt['func'](x))
def test_negative_int(self):
"""Test if a negative int is detected"""
x = -12345
self.assertFalse(self._bt['func'](x))
class IsIntPosTestCase(BaseTestWrapper.BaseTest):
"""check.is_int_pos() test cases"""
def test_string(self):
"""Test if string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_positive_int(self):
"""Test if a positive int is detected"""
x = 12345
self.assertTrue(self._bt['func'](x))
def test_zero(self):
"""Test if zero is detected"""
x = 0
self.assertFalse(self._bt['func'](x))
def test_negative_int(self):
"""Test if a negative int is detected"""
x = -12345
self.assertFalse(self._bt['func'](x))
class IsIntNegTestCase(BaseTestWrapper.BaseTest):
"""check.is_int_pos() test cases"""
def test_string(self):
"""Test if string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_positive_int(self):
"""Test if a positive int is detected"""
x = 12345
self.assertFalse(self._bt['func'](x))
def test_zero(self):
"""Test if zero is detected"""
x = 0
self.assertFalse(self._bt['func'](x))
def test_negative_int(self):
"""Test if a negative int is detected"""
x = -12345
self.assertTrue(self._bt['func'](x))
class IsListTestCase(BaseTestWrapper.BaseTest):
"""check.is_list() test cases"""
def test_string(self):
"""Test if string is False"""
x = 'y'
self.assertFalse(self._bt['func'](x))
def test_dict(self):
"""Test if dict is False"""
x = {}
self.assertFalse(self._bt['func'](x))
def test_list(self):
"""Test if list is True"""
x = []
self.assertTrue(self._bt['func'](x))
class HasWhitespaceTestCase(BaseTestWrapper.BaseTest):
"""check.has_whitespace() test cases"""
def test_space(self):
"""Test if whitespace is detected"""
l = [
'hello world',
' ',
' space'
]
for s in l:
self.assertTrue(self._bt['func'](s))
def test_no_space(self):
"""Test if no whitespace is detected"""
l = [
'hello',
'',
'none'
]
for s in l:
self.assertFalse(self._bt['func'](s))
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.result.TestResult()
suite.run(result)
print result
for f in result.failures:
for t in f:
print t
print ''
for e in result.errors:
for t in e:
print t
print ''
| mit |
Prachigarg1/Prachi | flags_test.py | 2 | 2567 |
import flags
import unittest
import re
import source
class FlagTestCase(unittest.TestCase):
def testParseParamDescription(self):
desc = '{!bbb|ccc?} aaa This \nis the desc. '
self.assertEquals(
('aaa', '!bbb|ccc?', 'This \nis the desc.'),
flags.ParseParameterDescription(desc))
desc = '{...*} var_args The items to substitute into the pattern.'
self.assertEquals(
('var_args', '...*', 'The items to substitute into the pattern.'),
flags.ParseParameterDescription(desc))
desc = '{string} aaa'
self.assertEquals(
('aaa', 'string', ''),
flags.ParseParameterDescription(desc))
self.assertRaises(
ValueError,
lambda: flags.ParseParameterDescription('desc without type'))
def testParseReturnDescription(self):
desc = ' {!bbb|ccc?} This \nis the desc. '
self.assertEquals(
('!bbb|ccc?', 'This \nis the desc.'),
flags.ParseReturnDescription(desc))
self.assertRaises(
ValueError,
lambda: flags.ParseReturnDescription('desc without type'))
def testMabyeParseTypeFromDescription(self):
self.assertEquals(
'aaa',
flags.MaybeParseTypeFromDescription(' {aaa} bbb ccc'))
self.assertEquals(
None,
flags.MaybeParseTypeFromDescription('aaa bbb ccc'))
@staticmethod
def GetFlags(script):
desc, flags = source._GetDescriptionAndFlags(script)
return flags
def testGetSymbolType(self):
self.assertEquals(
'aaa', flags.GetSymbolType(self.GetFlags("""@const {aaa}""")))
self.assertEquals(
'bbb', flags.GetSymbolType(self.GetFlags("""@private {bbb}""")))
self.assertEquals(
'ccc', flags.GetSymbolType(self.GetFlags("""@protected {ccc}""")))
self.assertEquals(
'ddd', flags.GetSymbolType(self.GetFlags("""@const {ddd}""")))
def testGetVisibility(self):
test_source = source.ScanScript("""\
goog.provide('abc');
/**
* @private
*/
abc.def;
""")
symbol = list(test_source.symbols)[0]
self.assertEquals(flags.PRIVATE, flags.GetVisibility(symbol.comment.flags))
test_source = source.ScanScript("""\
goog.provide('abc');
/**
* @protected
*/
abc.def;
""")
symbol = list(test_source.symbols)[0]
self.assertEquals(flags.PROTECTED, flags.GetVisibility(symbol.comment.flags))
test_source = source.ScanScript("""\
goog.provide('abc');
/**
*/
abc.def;
""")
symbol = list(test_source.symbols)[0]
self.assertEquals(flags.PUBLIC, flags.GetVisibility(symbol.comment.flags))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
maghoff/node-gyp | gyp/test/mac/gyptest-framework.py | 102 | 1459 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'test_framework', chdir='framework')
# Binary
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Test Framework',
chdir='framework')
# Info.plist
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Resources/Info.plist',
chdir='framework')
# Resources
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Resources/English.lproj/'
'InfoPlist.strings',
chdir='framework')
# Symlinks created by packaging process
test.built_file_must_exist('Test Framework.framework/Versions/Current',
chdir='framework')
test.built_file_must_exist('Test Framework.framework/Resources',
chdir='framework')
test.built_file_must_exist('Test Framework.framework/Test Framework',
chdir='framework')
# PkgInfo.
test.built_file_must_not_exist(
'Test Framework.framework/Versions/A/Resources/PkgInfo',
chdir='framework')
test.pass_test()
| mit |
denisenkom/django | tests/test_runner/test_discover_runner.py | 3 | 2122 | from contextlib import contextmanager
import os
import sys
from unittest import expectedFailure
from django.test import TestCase
from django.test.runner import DiscoverRunner
def expectedFailureIf(condition):
"""Marks a test as an expected failure if ``condition`` is met."""
if condition:
return expectedFailure
return lambda func: func
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 2)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
@contextmanager
def change_cwd_to_tests():
"""Change CWD to tests directory (one level up from this file)"""
current_dir = os.path.abspath(os.path.dirname(__file__))
tests_dir = os.path.join(current_dir, '..')
old_cwd = os.getcwd()
os.chdir(tests_dir)
yield
os.chdir(old_cwd)
with change_cwd_to_tests():
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 3)
| bsd-3-clause |
rdowinton/sphinx-php | sensio/sphinx/bestpractice.py | 6 | 1386 | from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
from sphinx.util.compat import make_admonition
from sphinx import addnodes
from sphinx.locale import _
class bestpractice(nodes.Admonition, nodes.Element):
pass
class BestPractice(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
ret = make_admonition(
bestpractice, self.name, [_('Best Practice')], self.options,
self.content, self.lineno, self.content_offset, self.block_text,
self.state, self.state_machine)
if self.arguments:
argnodes, msgs = self.state.inline_text(self.arguments[0],
self.lineno)
para = nodes.paragraph()
para += argnodes
para += msgs
ret[0].insert(1, para)
return ret
def visit_bestpractice_node(self, node):
self.body.append(self.starttag(node, 'div', CLASS=('admonition best-practice')))
self.set_first_last(node)
def depart_bestpractice_node(self, node):
self.depart_admonition(node)
def setup(app):
app.add_node(bestpractice, html=(visit_bestpractice_node, depart_bestpractice_node))
app.add_directive('best-practice', BestPractice)
| mit |
lucidmotifs/auto-aoc | .venv/lib/python3.5/site-packages/pylint/test/functional/inconsistent_returns.py | 1 | 4474 | #pylint: disable=missing-docstring, no-else-return, invalid-name, unused-variable, superfluous-parens
"""Testing inconsistent returns"""
import math
import sys
# These ones are consistent
def explicit_returns(var):
if var >= 0:
return math.sqrt(var)
else:
return None
def explicit_returns2(var):
if var < 0:
return None
return math.sqrt(var)
def empty_implicit_returns(var):
if var < 0:
return
def returns_in_exceptions():
try:
raise ValueError('test')
except ValueError:
return 1
except (OSError, TypeError):
return 2
def returns_and_exceptions(var):
if var < 10:
return var**2
else:
raise ValueError("Incorrect value")
def returns_and_exceptions_issue1770(var):
try:
if var == 1:
return 'a'
elif var == 2:
return 'b'
else:
raise ValueError
except AssertionError:
return None
def explicit_returns3(arg):
if arg:
return False
else:
if arg < 3:
print('arg < 3')
return True
def explicit_returns4(arg):
if arg:
if arg > 2:
print('arg > 2')
return False
else:
if arg < 3:
print('arg < 3')
return True
def explicit_returns5(arg):
if arg:
if arg > 2:
print('arg > 2')
return False
else:
return True
def nested_function():
def dummy_return():
return
return dummy_return
def explicit_returns6(x, y, z):
if x: # pylint: disable=no-else-return
a = 1
if y: # pylint: disable=no-else-return
b = 2
return y
else:
c = 3
return x
else:
d = 4
return z
def explicit_returns7(arg):
if arg < 0:
arg = 2 * arg
return 'below 0'
elif arg == 0:
print("Null arg")
return '0'
else:
arg = 3 * arg
return 'above 0'
def bug_1772():
"""Don't check inconsistent return statements inside while loop"""
counter = 1
while True:
counter += 1
if counter == 100:
return 7
def bug_1771(var):
if var == 1:
sys.exit(1)
else:
return var * 2
def bug_1771_with_user_config(var):
# sys.getdefaultencoding is considered as a never
# returning function in the inconsistent_returns.rc file.
if var == 1:
sys.getdefaultencoding()
else:
return var * 2
# Next ones are not consistent
def explicit_implicit_returns(var): # [inconsistent-return-statements]
if var >= 0:
return math.sqrt(var)
def empty_explicit_returns(var): # [inconsistent-return-statements]
if var < 0:
return
return math.sqrt(var)
def explicit_implicit_returns2(arg): # [inconsistent-return-statements]
if arg:
if arg > 2:
print('arg > 2')
return False
else:
return True
def explicit_implicit_returns3(arg): # [inconsistent-return-statements]
if arg:
if arg > 2:
print('arg > 2')
return False
else:
return True
def returns_missing_in_catched_exceptions(arg): # [inconsistent-return-statements]
try:
arg = arg**2
raise ValueError('test')
except ValueError:
print('ValueError')
arg = 0
except (OSError, TypeError):
return 2
def complex_func(arg): # [inconsistent-return-statements]
for i in range(arg):
if i > arg / 2:
break
else:
return arg
def inconsistent_returns_in_nested_function():
def not_consistent_returns_inner(arg): # [inconsistent-return-statements]
for i in range(arg):
if i > arg / 2:
break
else:
return arg
return not_consistent_returns_inner
class BlargException(Exception):
pass
def blarg(someval):
try:
if someval:
raise BlargException()
return 5
except BlargException:
raise
def bug_1772_counter_example(): # [inconsistent-return-statements]
counter = 1
if counter == 1:
while True:
counter += 1
if counter == 100:
return 7
def bug_1771_counter_example(var): # [inconsistent-return-statements]
if var == 1:
inconsistent_returns_in_nested_function()
else:
return var * 2
| mit |
jonaqp/heroku | core/utils/uploads.py | 1 | 2729 | import os
import sys
from PIL import Image
from django.conf import settings
from django.core.files.storage import default_storage as storage
from core.utils import resize_image
prefix_profile = 'uploads/profiles/'
prefix_container = 'uploads/container/'
prefix_upload_company = 'upload/logo_company'
def upload_location_profile(instance, filename):
file_base, extension = filename.split(".")
path_file = u"{0:s}/{1:s}.{2:s}".format(
str(instance.user.id), str(instance.id), extension)
return os.path.join(prefix_profile, path_file)
def upload_location_trip(instance, filename):
file_base, extension = filename.split(".")
path_file = u"{0:s}/shellcatch_{1:s}_{2:s}-{3:s}.{4:s}".format(
str(instance.container.identifier_mac),
str(instance.container.identifier_mac),
str(instance.datetime_image.strftime("%Y_%m_%d")),
str(instance.datetime_image.strftime("%H-%M-%S")),
extension).lower()
return os.path.join(prefix_container, path_file)
def upload_location_company(instance, filename):
file_base, extension = filename.split(".")
return "{0}/{1}.{2}".format(
prefix_upload_company, instance.name, extension)
def handle_upload_remove(current_image):
if settings.DEBUG:
if current_image:
image_path = "{0}/{1}".format(str(settings.MEDIA_ROOT),
str(current_image))
if os.path.isfile(image_path):
os.remove(image_path)
else:
pass
def handle_upload_profile(name_image, resize_height=100):
if settings.DEBUG:
url = "{0}/{1}".format(str(settings.MEDIA_ROOT).replace('\\', '/'),
str(name_image))
image = Image.open(url)
filename_base, filename_ext = os.path.splitext(url)
filename = url.rsplit('/', 1)[1].rsplit('.', 1)[0]
fullpath = url.rsplit('/', 1)[0]
if filename_ext not in ['.jpg', '.jpeg', '.png']:
sys.exit()
image = resize_image.resize_height(image, resize_height)
new_resize_image = filename + "_" + str(resize_height) + filename_ext
image.save(fullpath + '/' + new_resize_image)
else:
file_path = name_image.name
filename_base, filename_ext = os.path.splitext(file_path)
thumb_file_path = filename_base + "_" + str(resize_height) + filename_ext
f = storage.open(file_path, 'r')
image = Image.open(f)
if filename_ext not in ['.jpg', '.jpeg', '.png']:
sys.exit()
image = resize_image.resize_height(image, resize_height)
f_thumb = storage.open(thumb_file_path, "w")
image.save(f_thumb, "jpeg")
f_thumb.close()
| mit |
saturday-shi/spark | examples/src/main/python/mllib/decision_tree_classification_example.py | 106 | 2372 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Classification Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonDecisionTreeClassificationExample")
# $example on$
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainClassifier(trainingData, numClasses=2, categoricalFeaturesInfo={},
impurity='gini', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(
lambda lp: lp[0] != lp[1]).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Learned classification tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myDecisionTreeClassificationModel")
sameModel = DecisionTreeModel.load(sc, "target/tmp/myDecisionTreeClassificationModel")
# $example off$
| apache-2.0 |
darkryder/django | django/contrib/staticfiles/utils.py | 335 | 1976 | import fnmatch
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| bsd-3-clause |
manuelm/pyload | module/plugins/crypter/EmbeduploadCom.py | 5 | 2449 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Crypter import Crypter
from module.network.HTTPRequest import BadHeader
class EmbeduploadCom(Crypter):
__name__ = "EmbeduploadCom"
__type__ = "crypter"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?embedupload\.com/\?d=.+'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available" , True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default" ),
("preferedHoster" , "str" , "Prefered hoster list (bar-separated)", "embedupload"),
("ignoredHoster" , "str" , "Ignored hoster list (bar-separated)" , "" )]
__description__ = """EmbedUpload.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
LINK_PATTERN = r'<div id="(.+?)".*?>\s*<a href="(.+?)" target="_blank" (?:class="DownloadNow"|style="color:red")>'
def decrypt(self, pyfile):
self.data = self.load(pyfile.url)
tmp_links = []
m = re.findall(self.LINK_PATTERN, self.data)
if m is not None:
prefered_set = set(self.config.get('preferedHoster').split('|'))
prefered_set = map(lambda s: s.lower().split('.')[0], prefered_set)
self.log_debug("PF: %s" % prefered_set)
tmp_links.extend(x[1] for x in m if x[0] in prefered_set)
self.links = self.get_location(tmp_links)
if not self.links:
ignored_set = set(self.config.get('ignoredHoster').split('|'))
ignored_set = map(lambda s: s.lower().split('.')[0], ignored_set)
self.log_debug("IG: %s" % ignored_set)
tmp_links.extend(x[1] for x in m if x[0] not in ignored_set)
self.links = self.get_location(tmp_links)
def get_location(self, tmp_links):
new_links = []
for link in tmp_links:
try:
header = self.load(link, just_header=True)
if 'location' in header:
new_links.append(header.get('location'))
except BadHeader:
pass
return new_links
| gpl-3.0 |
xzYue/odoo | addons/account/wizard/account_automatic_reconcile.py | 340 | 11604 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_automatic_reconcile(osv.osv_memory):
_name = 'account.automatic.reconcile'
_description = 'Automatic Reconcile'
_columns = {
'account_ids': fields.many2many('account.account', 'reconcile_account_rel', 'reconcile_id', 'account_id', 'Accounts to Reconcile', domain = [('reconcile','=',1)],),
'writeoff_acc_id': fields.many2one('account.account', 'Account'),
'journal_id': fields.many2one('account.journal', 'Journal'),
'period_id': fields.many2one('account.period', 'Period'),
'max_amount': fields.float('Maximum write-off amount'),
'power': fields.selection([(p, str(p)) for p in range(2, 5)], 'Power', required=True, help='Number of partial amounts that can be combined to find a balance point can be chosen as the power of the automatic reconciliation'),
'reconciled': fields.integer('Reconciled transactions', readonly=True),
'unreconciled': fields.integer('Not reconciled transactions', readonly=True),
'allow_write_off': fields.boolean('Allow write off')
}
def _get_reconciled(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('reconciled', 0)
def _get_unreconciled(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('unreconciled', 0)
_defaults = {
'reconciled': _get_reconciled,
'unreconciled': _get_unreconciled,
'power': 2
}
#TODO: cleanup and comment this code... For now, it is awfulllll
# (way too complex, and really slow)...
def do_reconcile(self, cr, uid, credits, debits, max_amount, power, writeoff_acc_id, period_id, journal_id, context=None):
"""
for one value of a credit, check all debits, and combination of them
depending on the power. It starts with a power of one and goes up
to the max power allowed.
"""
move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
def check2(value, move_list, power):
def check(value, move_list, power):
for i in range(len(move_list)):
move = move_list[i]
if power == 1:
if abs(value - move[1]) <= max_amount + 0.00001:
return [move[0]]
else:
del move_list[i]
res = check(value - move[1], move_list, power-1)
move_list[i:i] = [move]
if res:
res.append(move[0])
return res
return False
for p in range(1, power+1):
res = check(value, move_list, p)
if res:
return res
return False
def check4(list1, list2, power):
"""
for a list of credit and debit and a given power, check if there
are matching tuples of credit and debits, check all debits, and combination of them
depending on the power. It starts with a power of one and goes up
to the max power allowed.
"""
def check3(value, list1, list2, list1power, power):
for i in range(len(list1)):
move = list1[i]
if list1power == 1:
res = check2(value + move[1], list2, power - 1)
if res:
return ([move[0]], res)
else:
del list1[i]
res = check3(value + move[1], list1, list2, list1power-1, power-1)
list1[i:i] = [move]
if res:
x, y = res
x.append(move[0])
return (x, y)
return False
for p in range(1, power):
res = check3(0, list1, list2, p, power)
if res:
return res
return False
def check5(list1, list2, max_power):
for p in range(2, max_power+1):
res = check4(list1, list2, p)
if res:
return res
return False
ok = True
reconciled = 0
while credits and debits and ok:
res = check5(credits, debits, power)
if res:
move_line_obj.reconcile(cr, uid, res[0] + res[1], 'auto', writeoff_acc_id, period_id, journal_id, context)
reconciled += len(res[0]) + len(res[1])
credits = [(id, credit) for (id, credit) in credits if id not in res[0]]
debits = [(id, debit) for (id, debit) in debits if id not in res[1]]
else:
ok = False
return (reconciled, len(credits)+len(debits))
def reconcile(self, cr, uid, ids, context=None):
move_line_obj = self.pool.get('account.move.line')
obj_model = self.pool.get('ir.model.data')
if context is None:
context = {}
form = self.browse(cr, uid, ids, context=context)[0]
max_amount = form.max_amount or 0.0
power = form.power
allow_write_off = form.allow_write_off
reconciled = unreconciled = 0
if not form.account_ids:
raise osv.except_osv(_('User Error!'), _('You must select accounts to reconcile.'))
for account_id in form.account_ids:
params = (account_id.id,)
if not allow_write_off:
query = """SELECT partner_id FROM account_move_line WHERE account_id=%s AND reconcile_id IS NULL
AND state <> 'draft' GROUP BY partner_id
HAVING ABS(SUM(debit-credit)) = 0.0 AND count(*)>0"""
else:
query = """SELECT partner_id FROM account_move_line WHERE account_id=%s AND reconcile_id IS NULL
AND state <> 'draft' GROUP BY partner_id
HAVING ABS(SUM(debit-credit)) < %s AND count(*)>0"""
params += (max_amount,)
# reconcile automatically all transactions from partners whose balance is 0
cr.execute(query, params)
partner_ids = [id for (id,) in cr.fetchall()]
for partner_id in partner_ids:
cr.execute(
"SELECT id " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND partner_id=%s " \
"AND state <> 'draft' " \
"AND reconcile_id IS NULL",
(account_id.id, partner_id))
line_ids = [id for (id,) in cr.fetchall()]
if line_ids:
reconciled += len(line_ids)
if allow_write_off:
move_line_obj.reconcile(cr, uid, line_ids, 'auto', form.writeoff_acc_id.id, form.period_id.id, form.journal_id.id, context)
else:
move_line_obj.reconcile_partial(cr, uid, line_ids, 'manual', context=context)
# get the list of partners who have more than one unreconciled transaction
cr.execute(
"SELECT partner_id " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " \
"AND partner_id IS NOT NULL " \
"GROUP BY partner_id " \
"HAVING count(*)>1",
(account_id.id,))
partner_ids = [id for (id,) in cr.fetchall()]
#filter?
for partner_id in partner_ids:
# get the list of unreconciled 'debit transactions' for this partner
cr.execute(
"SELECT id, debit " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND partner_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " \
"AND debit > 0 " \
"ORDER BY date_maturity",
(account_id.id, partner_id))
debits = cr.fetchall()
# get the list of unreconciled 'credit transactions' for this partner
cr.execute(
"SELECT id, credit " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND partner_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " \
"AND credit > 0 " \
"ORDER BY date_maturity",
(account_id.id, partner_id))
credits = cr.fetchall()
(rec, unrec) = self.do_reconcile(cr, uid, credits, debits, max_amount, power, form.writeoff_acc_id.id, form.period_id.id, form.journal_id.id, context)
reconciled += rec
unreconciled += unrec
# add the number of transactions for partners who have only one
# unreconciled transactions to the unreconciled count
partner_filter = partner_ids and 'AND partner_id not in (%s)' % ','.join(map(str, filter(None, partner_ids))) or ''
cr.execute(
"SELECT count(*) " \
"FROM account_move_line " \
"WHERE account_id=%s " \
"AND reconcile_id IS NULL " \
"AND state <> 'draft' " + partner_filter,
(account_id.id,))
additional_unrec = cr.fetchone()[0]
unreconciled = unreconciled + additional_unrec
context = dict(context, reconciled=reconciled, unreconciled=unreconciled)
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','account_automatic_reconcile_view1')])
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.automatic.reconcile',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
YcheLanguageStudio/PythonStudy | crpytography/libs/extended_euclidean.py | 1 | 1387 | class ExtendedGcdEuclidean:
def __init__(self, modulo_num, another_num):
self.modulo_num = modulo_num
self.r_list = list([modulo_num, another_num])
self.q_list = list([None, None])
self.x_list = list([1, 0])
self.y_list = list([0, 1])
self.iter_list = list([-1, 0])
self.is_break = False
self.compute_final_result()
def do_one_iteration(self):
next_tail_index = len(self.iter_list)
self.iter_list.append(self.iter_list[next_tail_index - 1] + 1)
self.q_list.append(self.r_list[next_tail_index - 2] / self.r_list[next_tail_index - 1])
self.r_list.append(self.r_list[next_tail_index - 2] % self.r_list[next_tail_index - 1])
if self.r_list[next_tail_index] == 0:
self.is_break = True
else:
self.x_list.append(self.x_list[next_tail_index - 2] -
self.q_list[next_tail_index] * self.x_list[next_tail_index - 1])
self.y_list.append(self.y_list[next_tail_index - 2] -
self.q_list[next_tail_index] * self.y_list[next_tail_index - 1])
def compute_final_result(self):
while not self.is_break:
self.do_one_iteration()
def get_result(self):
cur_y_val = self.y_list[-1]
return cur_y_val + self.modulo_num if cur_y_val < 0 else cur_y_val
| mit |
0xTony/Web-Filters | merger.py | 1 | 2808 | # -*- coding: utf-8 -*-
__version__ = "0.0.0.0.1b Pre Alpha Alpha"
__author__ = "Tony Martin"
# Creating a host file for blocking ads
# Based on the idea of various adblocking sw running on TomatoUSB
# The goal is to simplify the adblocking sw by having a central point
# that merges and hosts the host file - this will allow the SW
# to simply download the file and with a quick sanitization (alway sanitize - never trust)
# get the most updated host list to block
#
# This program will grab the host files from the sources file in the github project,
# download and merge them into a single file.
import sys
import urllib2
import subprocess
import hashlib
# location of sources of host file data
sourcelist = "https://raw.githubusercontent.com/0xTony/Web-Filters/master/sources"
# a few of the sources need use agent headers or the close the connection
headers = { 'User-Agent' : 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.6) Gecko/20070802 SeaMonkey/1.1.4' }
# Trash old file and overwrite with new contents
def writeToFile(filename, data):
target = open(filename, 'w')
target.truncate()
target.write(data)
target.close()
#download list of sources from github
def getSources():
response = urllib2.urlopen(sourcelist)
return response.read()
# With the list of source host files, download each and save
# if the file doesnt download - it will use the old file so not data lost on the latest.
# will download contents into a file sources-xyz
def downloadSources(sources):
data = ""
print sources
for line in sources.splitlines():
if not line.startswith("#"):
sourcehash = "source-" + hashlib.md5(line).hexdigest()[:8] # for file name
# get data. if it exists, overwrite source file - else if error it wil use old
try:
req = urllib2.Request(line, None, headers)
data = urllib2.urlopen(req).read()
data = data.replace('127.0.0.1', '0.0.0.0')
data = data.replace("\r", "\n")
data = data.replace(" ", " ")
print "writing to " + sourcehash + " for line " + line
writeToFile(sourcehash, data)
except urllib2.URLError, e:
print "An error %s " %e
print line
except:
print "Bad Source for line " + line
# Take all source-* files, merge and remove duplicates and remove unwanted data
def mergeSources():
# Merge the files and filter out unwanted data
print "Merging"
process = subprocess.Popen('sort -u source-* | grep -v "#" | grep -v "localhost" | grep -v "broadcasthost" | grep "0.0.0.0" > hosts',
shell=True,stdout=subprocess.PIPE)
#force a wait for Popen to finish
process.communicate()
# Main program entry here
def main(argv):
sources = getSources()
downloadSources(sources)
mergeSources()
if __name__ == "__main__":
main(sys.argv[1:])
print("Done")
| mit |
sarakha63/persomov | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oktoberfesttv.py | 168 | 1500 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class OktoberfestTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
'info_dict': {
'id': 'hb-zelt',
'ext': 'mp4',
'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'thumbnail': 're:^https?://.*\.jpg$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._live_title(self._html_search_regex(
r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
clip = self._search_regex(
r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
ncurl = self._search_regex(
r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
video_url = ncurl + clip
thumbnail = self._search_regex(
r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'ext': 'mp4',
'is_live': True,
'thumbnail': thumbnail,
}
| gpl-3.0 |
priseborough/InertialNav | code/plot_states.py | 6 | 2287 | #!/bin/python
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import numpy as np
import math
# State vector:
# 0-3: quaternions (q0, q1, q2, q3)
# 4-6: Velocity - m/sec (North, East, Down)
# 7-9: Position - m (North, East, Down)
# 10-12: Delta Angle bias - rad (X,Y,Z)
# 13: Accel offset
# 14-15: Wind Vector - m/sec (North,East)
# 16-18: Earth Magnetic Field Vector - milligauss (North, East, Down)
# 19-21: Body Magnetic Field Vector - milligauss (X,Y,Z)
# 22: Terrain
try:
data = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd', 'dist'])
except ValueError:
try:
data = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Aoff', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])
except ValueError:
data = np.genfromtxt('StateDataOut.txt', delimiter=' ', skip_header=1,
skip_footer=1, names=['time', 'q1', 'q2', 'q3', 'q4', 'Vn', 'Ve', 'Vd', 'Pn', 'Pe', 'Pd',
'Bx', 'By', 'Bz', 'Wn', 'We', 'Mn', 'Me', 'Md', 'Mbn', 'Mbe', 'Mbd'])
fig = plt.figure()
ax1 = fig.add_subplot(611)
ax1.set_title("Offsets")
ax1.set_ylabel('X gyro offset')
ax1.set_ylim([-0.0025,0.0025])
ax1.plot(data['time'], data['Bx'], color='r', label='Pn')
ax2 = fig.add_subplot(612)
ax2.set_ylabel('Y gyro offset')
ax2.set_ylim([-0.0025,0.0025])
ax2.plot(data['time'], data['By'], color='g', label='Pe')
ax3 = fig.add_subplot(613)
ax3.set_ylabel('Z gyro offset')
ax3.set_ylim([-0.0025,0.0025])
ax3.plot(data['time'], data['Bz'], color='b', label='Pd')
ax4 = fig.add_subplot(614)
ax4.set_ylabel('Mag offset N')
ax4.set_ylim([-0.4,0.4])
ax4.plot(data['time'], data['Mbn'], color='b', label='Pd')
ax5 = fig.add_subplot(615)
ax5.set_ylabel('Mag offset E')
ax5.set_ylim([-0.4,0.4])
ax5.plot(data['time'], data['Mbe'], color='b', label='Pd')
ax6 = fig.add_subplot(616)
ax6.set_xlabel('time (s)')
ax6.set_ylabel('Mag offset D')
ax6.set_ylim([-0.4,0.4])
ax6.plot(data['time'], data['Mbd'], color='b', label='Pd')
plt.show() | bsd-3-clause |
ThomasMiconi/nupic.research | projects/l2_pooling/multi_column_convergence.py | 2 | 22360 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots the convergence of L4-L2 as you increase the number of columns,
or adjust the confusion between objects.
"""
import random
import os
from math import ceil
import pprint
import numpy
import cPickle
from multiprocessing import Pool
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i,v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats)-i + 1
# Never differs - converged in one iteration
return 1
def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap,
settlingTime):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time across all objects.
Given inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration across all runs.
"""
convergenceSum = 0.0
# For each object
for stats in inferenceStats:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
columnConvergence = locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
# Ensure this column has converged by the last iteration
# assert(columnConvergence <= len(stats[key]))
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint)/settlingTime)
return convergenceSum/len(inferenceStats)
def objectConfusion(objects):
"""
For debugging, print overlap between each pair of objects.
"""
sumCommonLocations = 0
sumCommonFeatures = 0
sumCommonPairs = 0
numObjects = 0
commonPairHistogram = numpy.zeros(len(objects[0]), dtype=numpy.int32)
for o1,s1 in objects.iteritems():
for o2,s2 in objects.iteritems():
if o1 != o2:
# Count number of common locations id's and common feature id's
commonLocations = 0
commonFeatures = 0
for pair1 in s1:
for pair2 in s2:
if pair1[0] == pair2[0]: commonLocations += 1
if pair1[1] == pair2[1]: commonFeatures += 1
# print "Confusion",o1,o2,", common pairs=",len(set(s1)&set(s2)),
# print ", common locations=",commonLocations,"common features=",commonFeatures
assert(len(set(s1)&set(s2)) != len(s1) ), "Two objects are identical!"
sumCommonPairs += len(set(s1)&set(s2))
sumCommonLocations += commonLocations
sumCommonFeatures += commonFeatures
commonPairHistogram[len(set(s1)&set(s2))] += 1
numObjects += 1
print "Average common pairs=", sumCommonPairs / float(numObjects),
print ", locations=",sumCommonLocations / float(numObjects),
print ", features=",sumCommonFeatures / float(numObjects)
print "Common pair histogram=",commonPairHistogram
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param noiseLevel (float) Noise level to add to the locations and features
during inference. Default: None
@param profile (bool) If True, the network will be profiled after
learning and inference. Default: False
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param pointRange (int) Creates objects each with points ranging from
[numPoints,...,numPoints+pointRange-1]
A total of numObjects * pointRange objects will be
created.
Default: 1
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
The method returns the args dict updated with two additional keys:
convergencePoint (int) The average number of iterations it took
to converge across all objects
objects (pairs) The list of objects we trained on
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
profile = args.get("profile", False)
noiseLevel = args.get("noiseLevel", None) # TODO: implement this?
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
pointRange = args.get("pointRange", 1)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=150,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
seed=trialNum
)
for p in range(pointRange):
objects.createRandomObjects(numObjects, numPoints=numPoints+p,
numLocations=numLocations,
numFeatures=numFeatures)
objectConfusion(objects.getObjects())
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network
name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
inputSize=150,
externalInputSize=2400,
numInputBits=20,
seed=trialNum
)
exp.learnObjects(objects.provideObjectsToLearn())
if profile:
exp.printProfile(reset=True)
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
for objectId in objects:
obj = objects[objectId]
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if numColumns > 1:
# Create sequence of random sensations for this object for all columns At
# any point in time, ensure each column touches a unique loc,feature pair
# on the object. It is ok for a given column to sense a loc,feature pair
# more than once. The total number of sensations is equal to the number of
# points on the object.
for sensationNumber in range(len(obj)):
# Randomly shuffle points for each sensation
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for c in range(numColumns):
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[c].append(objectCopy[c])
else:
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": includeRandomLocation,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId)
if profile:
exp.printProfile(reset=True)
if plotInferenceStats:
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
experimentID=objectId,
onePlot=False,
)
convergencePoint = averageConvergencePoint(
exp.getInferenceStats(),"L2 Representation", 30, 40, settlingTime)
print
print "# objects {} # features {} # locations {} # columns {} trial # {}".format(
numObjects, numFeatures, numLocations, numColumns, trialNum)
print "Average convergence point=",convergencePoint
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
# Can't pickle experiment so can't return it for batch multiprocessing runs.
# However this is very useful for debugging when running in a single thread.
if plotInferenceStats:
args.update({"experiment": exp})
return args
def runExperimentPool(numObjects,
numLocations,
numFeatures,
numColumns,
numWorkers=7,
nTrials=1,
pointRange=1,
numPoints=10,
includeRandomLocation=False,
resultsName="convergence_results.pkl"):
"""
Allows you to run a number of experiments using multiple processes.
For each parameter except numWorkers, pass in a list containing valid values
for that parameter. The cross product of everything is run, and each
combination is run nTrials times.
Returns a list of dict containing detailed results from each experiment.
Also pickles and saves the results in resultsName for later analysis.
Example:
results = runExperimentPool(
numObjects=[10],
numLocations=[5],
numFeatures=[5],
numColumns=[2,3,4,5,6],
numWorkers=8,
nTrials=5)
"""
# Create function arguments for every possibility
args = []
for t in range(nTrials):
for c in numColumns:
for o in numObjects:
for l in numLocations:
for f in numFeatures:
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"pointRange": pointRange,
"numPoints": numPoints,
"plotInferenceStats": False,
"includeRandomLocation": includeRandomLocation,
"settlingTime": 3,
}
)
print "{} experiments to run, {} workers".format(len(args), numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
result = pool.map(runExperiment, args)
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# print "Full results:"
# pprint.pprint(result, width=150)
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result
def plotConvergenceByColumn(results, columnRange, featureRange, numTrials):
"""
Plots the convergence graph: iterations vs number of columns.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f,c] = how long it took it to converge with f unique features
# and c columns.
convergence = numpy.zeros((max(featureRange), max(columnRange) + 1))
for r in results:
convergence[r["numFeatures"] - 1,
r["numColumns"]] += r["convergencePoint"]
convergence /= numTrials
# For each column, print convergence as fct of number of unique features
for c in range(1, max(columnRange) + 1):
print c, convergence[:, c]
# Print everything anyway for debugging
print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_column.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print columnRange
print convergence[f-1,columnRange]
legendList.append('Unique features={}'.format(f))
plt.plot(columnRange, convergence[f-1,columnRange],
color=colorList[i])
# format
plt.legend(legendList, loc="upper right")
plt.xlabel("Number of columns")
plt.xticks(columnRange)
plt.yticks(range(0,int(convergence.max())+1))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (multiple columns)")
# save
plt.savefig(plotPath)
plt.close()
def plotConvergenceByObject(results, objectRange, featureRange):
"""
Plots the convergence graph: iterations vs number of objects.
Each curve shows the convergence for a given number of unique features.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[f,o] = how long it took it to converge with f unique features
# and o objects.
convergence = numpy.zeros((max(featureRange), max(objectRange) + 1))
for r in results:
if r["numFeatures"] in featureRange:
convergence[r["numFeatures"] - 1, r["numObjects"]] += r["convergencePoint"]
convergence /= numTrials
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_object_random_location.pdf")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(featureRange)):
f = featureRange[i]
print "features={} objectRange={} convergence={}".format(
f,objectRange, convergence[f-1,objectRange])
legendList.append('Unique features={}'.format(f))
plt.plot(objectRange, convergence[f-1,objectRange],
color=colorList[i])
# format
plt.legend(legendList, loc="lower right", prop={'size':10})
plt.xlabel("Number of objects in training set")
plt.xticks(range(0,max(objectRange)+1,10))
plt.yticks(range(0,int(convergence.max())+2))
plt.ylabel("Average number of touches")
plt.title("Number of touches to recognize one object (single column)")
# save
plt.savefig(plotPath)
plt.close()
def plotConvergenceByObjectMultiColumn(results, objectRange, columnRange):
"""
Plots the convergence graph: iterations vs number of objects.
Each curve shows the convergence for a given number of columns.
"""
########################################################################
#
# Accumulate all the results per column in a convergence array.
#
# Convergence[c,o] = how long it took it to converge with f unique features
# and c columns.
convergence = numpy.zeros((max(columnRange), max(objectRange) + 1))
for r in results:
if r["numColumns"] in columnRange:
convergence[r["numColumns"] - 1, r["numObjects"]] += r["convergencePoint"]
convergence /= numTrials
# print "Average convergence array=", convergence
########################################################################
#
# Create the plot. x-axis=
plt.figure()
plotPath = os.path.join("plots", "convergence_by_object_multicolumn.jpg")
# Plot each curve
legendList = []
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
for i in range(len(columnRange)):
c = columnRange[i]
print "columns={} objectRange={} convergence={}".format(
c, objectRange, convergence[c-1,objectRange])
if c == 1:
legendList.append('1 column')
else:
legendList.append('{} columns'.format(c))
plt.plot(objectRange, convergence[c-1,objectRange],
color=colorList[i])
# format
plt.legend(legendList, loc="upper left", prop={'size':10})
plt.xlabel("Number of objects in training set")
plt.xticks(range(0,max(objectRange)+1,10))
plt.yticks(range(0,int(convergence.max())+2))
plt.ylabel("Average number of touches")
plt.title("Object recognition with multiple columns (unique features = 5)")
# save
plt.savefig(plotPath)
plt.close()
if __name__ == "__main__":
# This is how you run a specific experiment in single process mode. Useful
# for debugging, profiling, etc.
if True:
results = runExperiment(
{
"numObjects": 30,
"numPoints": 10,
"numLocations": 10,
"numFeatures": 10,
"numColumns": 1,
"trialNum": 4,
"pointRange": 1,
"plotInferenceStats": True, # Outputs detailed graphs
"settlingTime": 3,
"includeRandomLocation": False
}
)
# Here we want to see how the number of columns affects convergence.
# This experiment is run using a process pool
if False:
columnRange = [1, 2, 3, 4, 5, 6, 7, 8]
featureRange = [5, 10, 20, 30]
objectRange = [100]
numTrials = 10
# Comment this out if you are re-running analysis on already saved results
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
nTrials=numTrials,
numWorkers=7,
resultsName="column_convergence_results.pkl")
with open("column_convergence_results.pkl","rb") as f:
results = cPickle.load(f)
plotConvergenceByColumn(results, columnRange, featureRange,
numTrials=numTrials)
# Here we want to see how the number of objects affects convergence for a
# single column.
# This experiment is run using a process pool
if False:
# We run 10 trials for each column number and then analyze results
numTrials = 10
columnRange = [1]
featureRange = [5,10,20,30]
objectRange = [2,10,20,30,40,50,60,80,100]
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
nTrials=numTrials,
numWorkers=7,
resultsName="object_convergence_results.pkl")
# Analyze results
with open("object_convergence_results.pkl","rb") as f:
results = cPickle.load(f)
plotConvergenceByObject(results, objectRange, featureRange)
# Here we want to see how the number of objects affects convergence for
# multiple columns.
if False:
# We run 10 trials for each column number and then analyze results
numTrials = 10
columnRange = [1,2,4,6]
featureRange = [5]
objectRange = [2,5,10,20,30,40,50,60,80,100]
# Comment this out if you are re-running analysis on already saved results.
# Very useful for debugging the plots
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
numWorkers=7,
nTrials=numTrials,
resultsName="object_convergence_multi_column_results.pkl")
# Analyze results
with open("object_convergence_multi_column_results.pkl","rb") as f:
results = cPickle.load(f)
plotConvergenceByObjectMultiColumn(results, objectRange, columnRange)
| agpl-3.0 |
meego-tablet-ux/meego-app-browser | third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_XML.py | 33 | 24745 | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
import libxml2
import re, sys, string
import typeexpr
def parse_GL_API( file_name, factory = None ):
doc = libxml2.readFile( file_name, None, libxml2.XML_PARSE_XINCLUDE + libxml2.XML_PARSE_NOBLANKS + libxml2.XML_PARSE_DTDVALID + libxml2.XML_PARSE_DTDATTR + libxml2.XML_PARSE_DTDLOAD + libxml2.XML_PARSE_NOENT )
ret = doc.xincludeProcess()
if not factory:
factory = gl_item_factory()
api = factory.create_item( "api", None, None )
api.process_element( doc )
# After the XML has been processed, we need to go back and assign
# dispatch offsets to the functions that request that their offsets
# be assigned by the scripts. Typically this means all functions
# that are not part of the ABI.
for func in api.functionIterateByCategory():
if func.assign_offset:
func.offset = api.next_offset;
api.next_offset += 1
doc.freeDoc()
return api
def is_attr_true( element, name ):
"""Read a name value from an element's attributes.
The value read from the attribute list must be either 'true' or
'false'. If the value is 'false', zero will be returned. If the
value is 'true', non-zero will be returned. An exception will be
raised for any other value."""
value = element.nsProp( name, None )
if value == "true":
return 1
elif value == "false":
return 0
else:
raise RuntimeError('Invalid value "%s" for boolean "%s".' % (value, name))
class gl_print_base:
"""Base class of all API pretty-printers.
In the model-view-controller pattern, this is the view. Any derived
class will want to over-ride the printBody, printRealHader, and
printRealFooter methods. Some derived classes may want to over-ride
printHeader and printFooter, or even Print (though this is unlikely).
"""
def __init__(self):
# Name of the script that is generating the output file.
# Every derived class should set this to the name of its
# source file.
self.name = "a"
# License on the *generated* source file. This may differ
# from the license on the script that is generating the file.
# Every derived class should set this to some reasonable
# value.
#
# See license.py for an example of a reasonable value.
self.license = "The license for this file is unspecified."
# The header_tag is the name of the C preprocessor define
# used to prevent multiple inclusion. Typically only
# generated C header files need this to be set. Setting it
# causes code to be generated automatically in printHeader
# and printFooter.
self.header_tag = None
# List of file-private defines that must be undefined at the
# end of the file. This can be used in header files to define
# names for use in the file, then undefine them at the end of
# the header file.
self.undef_list = []
return
def Print(self, api):
self.printHeader()
self.printBody(api)
self.printFooter()
return
def printHeader(self):
"""Print the header associated with all files and call the printRealHeader method."""
print '/* DO NOT EDIT - This file generated automatically by %s script */' \
% (self.name)
print ''
print '/*'
print ' * ' + self.license.replace('\n', '\n * ')
print ' */'
print ''
if self.header_tag:
print '#if !defined( %s )' % (self.header_tag)
print '# define %s' % (self.header_tag)
print ''
self.printRealHeader();
return
def printFooter(self):
"""Print the header associated with all files and call the printRealFooter method."""
self.printRealFooter()
if self.undef_list:
print ''
for u in self.undef_list:
print "# undef %s" % (u)
if self.header_tag:
print ''
print '#endif /* !defined( %s ) */' % (self.header_tag)
def printRealHeader(self):
"""Print the "real" header for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printRealFooter(self):
"""Print the "real" footer for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printPure(self):
"""Conditionally define `PURE' function attribute.
Conditionally defines a preprocessor macro `PURE' that wraps
GCC's `pure' function attribute. The conditional code can be
easilly adapted to other compilers that support a similar
feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("PURE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define PURE __attribute__((pure))
# else
# define PURE
# endif"""
return
def printFastcall(self):
"""Conditionally define `FASTCALL' function attribute.
Conditionally defines a preprocessor macro `FASTCALL' that
wraps GCC's `fastcall' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("FASTCALL")
print """# if defined(__i386__) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
# define FASTCALL __attribute__((fastcall))
# else
# define FASTCALL
# endif"""
return
def printVisibility(self, S, s):
"""Conditionally define visibility function attribute.
Conditionally defines a preprocessor macro name S that wraps
GCC's visibility function attribute. The visibility used is
the parameter s. The conditional code can be easilly adapted
to other compilers that support a similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append(S)
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) && defined(__ELF__)
# define %s __attribute__((visibility("%s")))
# else
# define %s
# endif""" % (S, s, S)
return
def printNoinline(self):
"""Conditionally define `NOINLINE' function attribute.
Conditionally defines a preprocessor macro `NOINLINE' that
wraps GCC's `noinline' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("NOINLINE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define NOINLINE __attribute__((noinline))
# else
# define NOINLINE
# endif"""
return
def real_function_name(element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if alias:
return alias
else:
return name
def real_category_name(c):
if re.compile("[1-9][0-9]*[.][0-9]+").match(c):
return "GL_VERSION_" + c.replace(".", "_")
else:
return c
def classify_category(name, number):
"""Based on the category name and number, select a numerical class for it.
Categories are divided into four classes numbered 0 through 3. The
classes are:
0. Core GL versions, sorted by version number.
1. ARB extensions, sorted by extension number.
2. Non-ARB extensions, sorted by extension number.
3. Un-numbered extensions, sorted by extension name.
"""
try:
core_version = float(name)
except Exception,e:
core_version = 0.0
if core_version > 0.0:
cat_type = 0
key = name
elif name.startswith("GL_ARB_") or name.startswith("GLX_ARB_") or name.startswith("WGL_ARB_"):
cat_type = 1
key = int(number)
else:
if number != None:
cat_type = 2
key = int(number)
else:
cat_type = 3
key = name
return [cat_type, key]
def create_parameter_string(parameters, include_names):
"""Create a parameter string from a list of gl_parameters."""
list = []
for p in parameters:
if p.is_padding:
continue
if include_names:
list.append( p.string() )
else:
list.append( p.type_string() )
if len(list) == 0: list = ["void"]
return string.join(list, ", ")
class gl_item:
def __init__(self, element, context):
self.context = context
self.name = element.nsProp( "name", None )
self.category = real_category_name( element.parent.nsProp( "name", None ) )
return
class gl_type( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.size = int( element.nsProp( "size", None ), 0 )
te = typeexpr.type_expression( None )
tn = typeexpr.type_node()
tn.size = int( element.nsProp( "size", None ), 0 )
tn.integer = not is_attr_true( element, "float" )
tn.unsigned = is_attr_true( element, "unsigned" )
tn.name = "GL" + self.name
te.set_base_type_node( tn )
self.type_expr = te
return
def get_type_expression(self):
return self.type_expr
class gl_enum( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.value = int( element.nsProp( "value", None ), 0 )
temp = element.nsProp( "count", None )
if not temp or temp == "?":
self.default_count = -1
else:
try:
c = int(temp)
except Exception,e:
raise RuntimeError('Invalid count value "%s" for enum "%s" in function "%s" when an integer was expected.' % (temp, self.name, n))
self.default_count = c
return
def priority(self):
"""Calculate a 'priority' for this enum name.
When an enum is looked up by number, there may be many
possible names, but only one is the 'prefered' name. The
priority is used to select which name is the 'best'.
Highest precedence is given to core GL name. ARB extension
names have the next highest, followed by EXT extension names.
Vendor extension names are the lowest.
"""
if self.name.endswith( "_BIT" ):
bias = 1
else:
bias = 0
if self.category.startswith( "GL_VERSION_" ):
priority = 0
elif self.category.startswith( "GL_ARB_" ):
priority = 2
elif self.category.startswith( "GL_EXT_" ):
priority = 4
else:
priority = 6
return priority + bias
class gl_parameter:
def __init__(self, element, context):
self.name = element.nsProp( "name", None )
ts = element.nsProp( "type", None )
self.type_expr = typeexpr.type_expression( ts, context )
temp = element.nsProp( "variable_param", None )
if temp:
self.count_parameter_list = temp.split( ' ' )
else:
self.count_parameter_list = []
# The count tag can be either a numeric string or the name of
# a variable. If it is the name of a variable, the int(c)
# statement will throw an exception, and the except block will
# take over.
c = element.nsProp( "count", None )
try:
count = int(c)
self.count = count
self.counter = None
except Exception,e:
count = 1
self.count = 0
self.counter = c
self.count_scale = int(element.nsProp( "count_scale", None ))
elements = (count * self.count_scale)
if elements == 1:
elements = 0
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (before)*/' % (self.name, self.type_expr.get_stack_size())
# print '/* # elements = %u */' % (elements)
self.type_expr.set_elements( elements )
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (after) */' % (self.name, self.type_expr.get_stack_size())
self.is_client_only = is_attr_true( element, 'client_only' )
self.is_counter = is_attr_true( element, 'counter' )
self.is_output = is_attr_true( element, 'output' )
# Pixel data has special parameters.
self.width = element.nsProp('img_width', None)
self.height = element.nsProp('img_height', None)
self.depth = element.nsProp('img_depth', None)
self.extent = element.nsProp('img_extent', None)
self.img_xoff = element.nsProp('img_xoff', None)
self.img_yoff = element.nsProp('img_yoff', None)
self.img_zoff = element.nsProp('img_zoff', None)
self.img_woff = element.nsProp('img_woff', None)
self.img_format = element.nsProp('img_format', None)
self.img_type = element.nsProp('img_type', None)
self.img_target = element.nsProp('img_target', None)
self.img_pad_dimensions = is_attr_true( element, 'img_pad_dimensions' )
self.img_null_flag = is_attr_true( element, 'img_null_flag' )
self.img_send_null = is_attr_true( element, 'img_send_null' )
self.is_padding = is_attr_true( element, 'padding' )
return
def compatible(self, other):
return 1
def is_array(self):
return self.is_pointer()
def is_pointer(self):
return self.type_expr.is_pointer()
def is_image(self):
if self.width:
return 1
else:
return 0
def is_variable_length(self):
return len(self.count_parameter_list) or self.counter
def is_64_bit(self):
count = self.type_expr.get_element_count()
if count:
if (self.size() / count) == 8:
return 1
else:
if self.size() == 8:
return 1
return 0
def string(self):
return self.type_expr.original_string + " " + self.name
def type_string(self):
return self.type_expr.original_string
def get_base_type_string(self):
return self.type_expr.get_base_name()
def get_dimensions(self):
if not self.width:
return [ 0, "0", "0", "0", "0" ]
dim = 1
w = self.width
h = "1"
d = "1"
e = "1"
if self.height:
dim = 2
h = self.height
if self.depth:
dim = 3
d = self.depth
if self.extent:
dim = 4
e = self.extent
return [ dim, w, h, d, e ]
def get_stack_size(self):
return self.type_expr.get_stack_size()
def size(self):
if self.is_image():
return 0
else:
return self.type_expr.get_element_size()
def get_element_count(self):
c = self.type_expr.get_element_count()
if c == 0:
return 1
return c
def size_string(self, use_parens = 1):
s = self.size()
if self.counter or self.count_parameter_list:
list = [ "compsize" ]
if self.counter and self.count_parameter_list:
list.append( self.counter )
elif self.counter:
list = [ self.counter ]
if s > 1:
list.append( str(s) )
if len(list) > 1 and use_parens :
return "(%s)" % (string.join(list, " * "))
else:
return string.join(list, " * ")
elif self.is_image():
return "compsize"
else:
return str(s)
def format_string(self):
if self.type_expr.original_string == "GLenum":
return "0x%x"
else:
return self.type_expr.format_string()
class gl_function( gl_item ):
def __init__(self, element, context):
self.context = context
self.name = None
self.entry_points = []
self.return_type = "void"
self.parameters = []
self.offset = -1
self.initialized = 0
self.images = []
self.assign_offset = 0
self.static_entry_points = []
# Track the parameter string (for the function prototype)
# for each entry-point. This is done because some functions
# change their prototype slightly when promoted from extension
# to ARB extension to core. glTexImage3DEXT and glTexImage3D
# are good examples of this. Scripts that need to generate
# code for these differing aliases need to real prototype
# for each entry-point. Otherwise, they may generate code
# that won't compile.
self.parameter_strings = {}
self.process_element( element )
return
def process_element(self, element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if is_attr_true(element, "static_dispatch"):
self.static_entry_points.append(name)
self.entry_points.append( name )
if alias:
true_name = alias
else:
true_name = name
# Only try to set the offset when a non-alias
# entry-point is being processes.
offset = element.nsProp( "offset", None )
if offset:
try:
o = int( offset )
self.offset = o
except Exception, e:
self.offset = -1
if offset == "assign":
self.assign_offset = 1
if not self.name:
self.name = true_name
elif self.name != true_name:
raise RuntimeError("Function true name redefined. Was %s, now %s." % (self.name, true_name))
# There are two possible cases. The first time an entry-point
# with data is seen, self.initialized will be 0. On that
# pass, we just fill in the data. The next time an
# entry-point with data is seen, self.initialized will be 1.
# On that pass we have to make that the new values match the
# valuse from the previous entry-point.
parameters = []
return_type = "void"
child = element.children
while child:
if child.type == "element":
if child.name == "return":
return_type = child.nsProp( "type", None )
elif child.name == "param":
param = self.context.factory.create_item( "parameter", child, self.context)
parameters.append( param )
child = child.next
if self.initialized:
if self.return_type != return_type:
raise RuntimeError( "Return type changed in %s. Was %s, now %s." % (name, self.return_type, return_type))
if len(parameters) != len(self.parameters):
raise RuntimeError( "Parameter count mismatch in %s. Was %d, now %d." % (name, len(self.parameters), len(parameters)))
for j in range(0, len(parameters)):
p1 = parameters[j]
p2 = self.parameters[j]
if not p1.compatible( p2 ):
raise RuntimeError( 'Parameter type mismatch in %s. "%s" was "%s", now "%s".' % (name, p2.name, p2.type_expr.original_string, p1.type_expr.original_string))
if true_name == name or not self.initialized:
self.return_type = return_type
self.parameters = parameters
for param in self.parameters:
if param.is_image():
self.images.append( param )
if element.children:
self.initialized = 1
self.parameter_strings[name] = create_parameter_string(parameters, 1)
else:
self.parameter_strings[name] = None
return
def get_images(self):
"""Return potentially empty list of input images."""
return self.images
def parameterIterator(self):
return self.parameters.__iter__();
def get_parameter_string(self, entrypoint = None):
if entrypoint:
s = self.parameter_strings[ entrypoint ]
if s:
return s
return create_parameter_string( self.parameters, 1 )
def get_called_parameter_string(self):
p_string = ""
comma = ""
for p in self.parameterIterator():
p_string = p_string + comma + p.name
comma = ", "
return p_string
def is_abi(self):
return (self.offset >= 0 and not self.assign_offset)
def is_static_entry_point(self, name):
return name in self.static_entry_points
def dispatch_name(self):
if self.name in self.static_entry_points:
return self.name
else:
return "_dispatch_stub_%u" % (self.offset)
def static_name(self, name):
if name in self.static_entry_points:
return name
else:
return "_dispatch_stub_%u" % (self.offset)
class gl_item_factory:
"""Factory to create objects derived from gl_item."""
def create_item(self, item_name, element, context):
if item_name == "function":
return gl_function(element, context)
if item_name == "type":
return gl_type(element, context)
elif item_name == "enum":
return gl_enum(element, context)
elif item_name == "parameter":
return gl_parameter(element, context)
elif item_name == "api":
return gl_api(self)
else:
return None
class gl_api:
def __init__(self, factory):
self.functions_by_name = {}
self.enums_by_name = {}
self.types_by_name = {}
self.category_dict = {}
self.categories = [{}, {}, {}, {}]
self.factory = factory
self.next_offset = 0
typeexpr.create_initial_types()
return
def process_element(self, doc):
element = doc.children
while element.type != "element" or element.name != "OpenGLAPI":
element = element.next
if element:
self.process_OpenGLAPI(element)
return
def process_OpenGLAPI(self, element):
child = element.children
while child:
if child.type == "element":
if child.name == "category":
self.process_category( child )
elif child.name == "OpenGLAPI":
self.process_OpenGLAPI( child )
child = child.next
return
def process_category(self, cat):
cat_name = cat.nsProp( "name", None )
cat_number = cat.nsProp( "number", None )
[cat_type, key] = classify_category(cat_name, cat_number)
self.categories[cat_type][key] = [cat_name, cat_number]
child = cat.children
while child:
if child.type == "element":
if child.name == "function":
func_name = real_function_name( child )
temp_name = child.nsProp( "name", None )
self.category_dict[ temp_name ] = [cat_name, cat_number]
if self.functions_by_name.has_key( func_name ):
func = self.functions_by_name[ func_name ]
func.process_element( child )
else:
func = self.factory.create_item( "function", child, self )
self.functions_by_name[ func_name ] = func
if func.offset >= self.next_offset:
self.next_offset = func.offset + 1
elif child.name == "enum":
enum = self.factory.create_item( "enum", child, self )
self.enums_by_name[ enum.name ] = enum
elif child.name == "type":
t = self.factory.create_item( "type", child, self )
self.types_by_name[ "GL" + t.name ] = t
child = child.next
return
def functionIterateByCategory(self, cat = None):
"""Iterate over functions by category.
If cat is None, all known functions are iterated in category
order. See classify_category for details of the ordering.
Within a category, functions are sorted by name. If cat is
not None, then only functions in that category are iterated.
"""
lists = [{}, {}, {}, {}]
for func in self.functionIterateAll():
[cat_name, cat_number] = self.category_dict[func.name]
if (cat == None) or (cat == cat_name):
[func_cat_type, key] = classify_category(cat_name, cat_number)
if not lists[func_cat_type].has_key(key):
lists[func_cat_type][key] = {}
lists[func_cat_type][key][func.name] = func
functions = []
for func_cat_type in range(0,4):
keys = lists[func_cat_type].keys()
keys.sort()
for key in keys:
names = lists[func_cat_type][key].keys()
names.sort()
for name in names:
functions.append(lists[func_cat_type][key][name])
return functions.__iter__()
def functionIterateByOffset(self):
max_offset = -1
for func in self.functions_by_name.itervalues():
if func.offset > max_offset:
max_offset = func.offset
temp = [None for i in range(0, max_offset + 1)]
for func in self.functions_by_name.itervalues():
if func.offset != -1:
temp[ func.offset ] = func
list = []
for i in range(0, max_offset + 1):
if temp[i]:
list.append(temp[i])
return list.__iter__();
def functionIterateAll(self):
return self.functions_by_name.itervalues()
def enumIterateByName(self):
keys = self.enums_by_name.keys()
keys.sort()
list = []
for enum in keys:
list.append( self.enums_by_name[ enum ] )
return list.__iter__()
def categoryIterate(self):
"""Iterate over categories.
Iterate over all known categories in the order specified by
classify_category. Each iterated value is a tuple of the
name and number (which may be None) of the category.
"""
list = []
for cat_type in range(0,4):
keys = self.categories[cat_type].keys()
keys.sort()
for key in keys:
list.append(self.categories[cat_type][key])
return list.__iter__()
def get_category_for_name( self, name ):
if self.category_dict.has_key(name):
return self.category_dict[name]
else:
return ["<unknown category>", None]
def typeIterate(self):
return self.types_by_name.itervalues()
def find_type( self, type_name ):
if type_name in self.types_by_name:
return self.types_by_name[ type_name ].type_expr
else:
print "Unable to find base type matching \"%s\"." % (type_name)
return None
| bsd-3-clause |
googleads/google-ads-python | google/ads/googleads/v8/services/services/campaign_shared_set_service/transports/base.py | 1 | 4228 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import campaign_shared_set
from google.ads.googleads.v8.services.types import campaign_shared_set_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CampaignSharedSetServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for CampaignSharedSetService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_campaign_shared_set: gapic_v1.method.wrap_method(
self.get_campaign_shared_set,
default_timeout=None,
client_info=client_info,
),
self.mutate_campaign_shared_sets: gapic_v1.method.wrap_method(
self.mutate_campaign_shared_sets,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_campaign_shared_set(
self,
) -> typing.Callable[
[campaign_shared_set_service.GetCampaignSharedSetRequest],
campaign_shared_set.CampaignSharedSet,
]:
raise NotImplementedError
@property
def mutate_campaign_shared_sets(
self,
) -> typing.Callable[
[campaign_shared_set_service.MutateCampaignSharedSetsRequest],
campaign_shared_set_service.MutateCampaignSharedSetsResponse,
]:
raise NotImplementedError
__all__ = ("CampaignSharedSetServiceTransport",)
| apache-2.0 |
airware/jsbsim | tests/TestScriptOutput.py | 2 | 3376 | # TestScriptInputOutput.py
#
# Check that <output> tags specified in a script are properly handled
#
# Copyright (c) 2015 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import sys, unittest
import xml.etree.ElementTree as et
import pandas as pd
import numpy as np
from JSBSim_utils import CreateFDM, SandBox, ExecuteUntil
class TestScriptOutput(unittest.TestCase):
def setUp(self):
self.sandbox = SandBox()
self.script_path = self.sandbox.path_to_jsbsim_file('scripts',
'c1722.xml')
def tearDown(self):
self.sandbox.erase()
def test_no_output(self):
fdm = CreateFDM(self.sandbox)
fdm.load_script(self.script_path)
fdm.run_ic()
ExecuteUntil(fdm, 10.)
self.assertFalse(self.sandbox.exists('output.csv'),
msg="Results have unexpectedly been written to 'output.csv'")
def test_output_from_file(self):
tree = et.parse(self.sandbox.elude(self.script_path))
output_tag = et.SubElement(tree.getroot(), 'output')
output_tag.attrib['file'] = self.sandbox.elude(self.sandbox.path_to_jsbsim_file('tests', 'output.xml'))
tree.write(self.sandbox('c1722_0.xml'))
fdm = CreateFDM(self.sandbox)
fdm.load_script('c1722_0.xml')
fdm.run_ic()
ExecuteUntil(fdm, 10.)
self.assertTrue(self.sandbox.exists('output.csv'),
msg="The file 'output.csv' has not been created")
def test_output(self):
tree = et.parse(self.sandbox.elude(self.script_path))
output_tag = et.SubElement(tree.getroot(), 'output')
output_tag.attrib['name'] = 'test.csv'
output_tag.attrib['type'] = 'CSV'
output_tag.attrib['rate'] = '10'
property_tag = et.SubElement(output_tag, 'property')
property_tag.text = 'position/vrp-radius-ft'
tree.write(self.sandbox('c1722_0.xml'))
fdm = CreateFDM(self.sandbox)
fdm.load_script('c1722_0.xml')
fdm.run_ic()
ExecuteUntil(fdm, 10.)
self.assertTrue(self.sandbox.exists(output_tag.attrib['name']),
msg="The file 'output.csv' has not been created")
orig = pd.read_csv(self.sandbox('JSBout172B.csv'))
test = pd.read_csv(self.sandbox('test.csv'))
self.assertEqual(np.max(orig['Time']-test['Time']), 0.0)
pname = '/fdm/jsbsim/' + property_tag.text
self.assertEqual(np.max(orig[pname]-test[pname]), 0.0)
suite = unittest.TestLoader().loadTestsFromTestCase(TestScriptOutput)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if test_result.failures or test_result.errors:
sys.exit(-1) # 'make test' will report the test failed.
| lgpl-2.1 |
michaelpacer/scikit-image | skimage/morphology/greyreconstruct.py | 36 | 8409 | """
This morphological reconstruction routine was adapted from CellProfiler, code
licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import numpy as np
from ..filters._rank_order import rank_order
def reconstruction(seed, mask, method='dilation', selem=None, offset=None):
"""Perform a morphological reconstruction of an image.
Morphological reconstruction by dilation is similar to basic morphological
dilation: high-intensity values will replace nearby low-intensity values.
The basic dilation operator, however, uses a structuring element to
determine how far a value in the input image can spread. In contrast,
reconstruction uses two images: a "seed" image, which specifies the values
that spread, and a "mask" image, which gives the maximum allowed value at
each pixel. The mask image, like the structuring element, limits the spread
of high-intensity values. Reconstruction by erosion is simply the inverse:
low-intensity values spread from the seed image and are limited by the mask
image, which represents the minimum allowed value.
Alternatively, you can think of reconstruction as a way to isolate the
connected regions of an image. For dilation, reconstruction connects
regions marked by local maxima in the seed image: neighboring pixels
less-than-or-equal-to those seeds are connected to the seeded region.
Local maxima with values larger than the seed image will get truncated to
the seed value.
Parameters
----------
seed : ndarray
The seed image (a.k.a. marker image), which specifies the values that
are dilated or eroded.
mask : ndarray
The maximum (dilation) / minimum (erosion) allowed value at each pixel.
method : {'dilation'|'erosion'}
Perform reconstruction by dilation or erosion. In dilation (or
erosion), the seed image is dilated (or eroded) until limited by the
mask image. For dilation, each seed value must be less than or equal
to the corresponding mask value; for erosion, the reverse is true.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
Returns
-------
reconstructed : ndarray
The result of morphological reconstruction.
Examples
--------
>>> import numpy as np
>>> from skimage.morphology import reconstruction
First, we create a sinusoidal mask image with peaks at middle and ends.
>>> x = np.linspace(0, 4 * np.pi)
>>> y_mask = np.cos(x)
Then, we create a seed image initialized to the minimum mask value (for
reconstruction by dilation, min-intensity values don't spread) and add
"seeds" to the left and right peak, but at a fraction of peak value (1).
>>> y_seed = y_mask.min() * np.ones_like(x)
>>> y_seed[0] = 0.5
>>> y_seed[-1] = 0
>>> y_rec = reconstruction(y_seed, y_mask)
The reconstructed image (or curve, in this case) is exactly the same as the
mask image, except that the peaks are truncated to 0.5 and 0. The middle
peak disappears completely: Since there were no seed values in this peak
region, its reconstructed value is truncated to the surrounding value (-1).
As a more practical example, we try to extract the bright features of an
image by subtracting a background image created by reconstruction.
>>> y, x = np.mgrid[:20:0.5, :20:0.5]
>>> bumps = np.sin(x) + np.sin(y)
To create the background image, set the mask image to the original image,
and the seed image to the original image with an intensity offset, `h`.
>>> h = 0.3
>>> seed = bumps - h
>>> background = reconstruction(seed, bumps)
The resulting reconstructed image looks exactly like the original image,
but with the peaks of the bumps cut off. Subtracting this reconstructed
image from the original image leaves just the peaks of the bumps
>>> hdome = bumps - background
This operation is known as the h-dome of the image and leaves features
of height `h` in the subtracted image.
Notes
-----
The algorithm is taken from [1]_. Applications for greyscale reconstruction
are discussed in [2]_ and [3]_.
References
----------
.. [1] Robinson, "Efficient morphological reconstruction: a downhill
filter", Pattern Recognition Letters 25 (2004) 1759-1767.
.. [2] Vincent, L., "Morphological Grayscale Reconstruction in Image
Analysis: Applications and Efficient Algorithms", IEEE Transactions
on Image Processing (1993)
.. [3] Soille, P., "Morphological Image Analysis: Principles and
Applications", Chapter 6, 2nd edition (2003), ISBN 3540429883.
"""
assert tuple(seed.shape) == tuple(mask.shape)
if method == 'dilation' and np.any(seed > mask):
raise ValueError("Intensity of seed image must be less than that "
"of the mask image for reconstruction by dilation.")
elif method == 'erosion' and np.any(seed < mask):
raise ValueError("Intensity of seed image must be greater than that "
"of the mask image for reconstruction by erosion.")
try:
from ._greyreconstruct import reconstruction_loop
except ImportError:
raise ImportError("_greyreconstruct extension not available.")
if selem is None:
selem = np.ones([3] * seed.ndim, dtype=bool)
else:
selem = selem.astype(bool)
if offset is None:
if not all([d % 2 == 1 for d in selem.shape]):
raise ValueError("Footprint dimensions must all be odd")
offset = np.array([d // 2 for d in selem.shape])
# Cross out the center of the selem
selem[[slice(d, d + 1) for d in offset]] = False
# Make padding for edges of reconstructed image so we can ignore boundaries
padding = (np.array(selem.shape) / 2).astype(int)
dims = np.zeros(seed.ndim + 1, dtype=int)
dims[1:] = np.array(seed.shape) + 2 * padding
dims[0] = 2
inside_slices = [slice(p, -p) for p in padding]
# Set padded region to minimum image intensity and mask along first axis so
# we can interleave image and mask pixels when sorting.
if method == 'dilation':
pad_value = np.min(seed)
elif method == 'erosion':
pad_value = np.max(seed)
else:
raise ValueError("Reconstruction method can be one of 'erosion' "
"or 'dilation'. Got '%s'." % method)
images = np.ones(dims) * pad_value
images[[0] + inside_slices] = seed
images[[1] + inside_slices] = mask
# Create a list of strides across the array to get the neighbors within
# a flattened array
value_stride = np.array(images.strides[1:]) // images.dtype.itemsize
image_stride = images.strides[0] // images.dtype.itemsize
selem_mgrid = np.mgrid[[slice(-o, d - o)
for d, o in zip(selem.shape, offset)]]
selem_offsets = selem_mgrid[:, selem].transpose()
nb_strides = np.array([np.sum(value_stride * selem_offset)
for selem_offset in selem_offsets], np.int32)
images = images.flatten()
# Erosion goes smallest to largest; dilation goes largest to smallest.
index_sorted = np.argsort(images).astype(np.int32)
if method == 'dilation':
index_sorted = index_sorted[::-1]
# Make a linked list of pixels sorted by value. -1 is the list terminator.
prev = -np.ones(len(images), np.int32)
next = -np.ones(len(images), np.int32)
prev[index_sorted[1:]] = index_sorted[:-1]
next[index_sorted[:-1]] = index_sorted[1:]
# Cython inner-loop compares the rank of pixel values.
if method == 'dilation':
value_rank, value_map = rank_order(images)
elif method == 'erosion':
value_rank, value_map = rank_order(-images)
value_map = -value_map
start = index_sorted[0]
reconstruction_loop(value_rank, prev, next, nb_strides, start,
image_stride)
# Reshape reconstructed image to original image shape and remove padding.
rec_img = value_map[value_rank[:image_stride]]
rec_img.shape = np.array(seed.shape) + 2 * padding
return rec_img[inside_slices]
| bsd-3-clause |
batoulapps/ITL | new_method/tests/run_tests.py | 2 | 2608 | #!/usr/bin/env python
# This script is used to test the new method implementation
# It does that by comparing the computed prayer times for specific
# locations on specific dates to the "official" times on these dates.
# "official" turns out to be a tricky one for locations in places
# that are in non-Muslim countries.
import sys
import os
import subprocess
import json
import datetime
import unittest
program_under_test=os.path.join("..", "build", "prayer")
locations="locations"
references="reference_times"
# We require all the computed times to be within 3 minutes
# of the reference values
threshold = datetime.timedelta(minutes=3)
class TestPrayerNewMethod(unittest.TestCase):
def setUp(self):
self.test_files = []
for f in os.listdir(references):
fn = os.path.join(references,f)
if os.path.isfile(fn):
self.test_files.append(fn)
def test_locations(self):
for f in self.test_files:
fp = open(f)
json_data = json.load(fp)
fp.close()
country = json_data["country"]
location = json_data["location"]
date = json_data["date"]
timezone = json_data["tz"] # Not used for now
dst = json_data["dst"] # Not used for now
command = program_under_test + \
" -d " + date + \
" -f " + \
os.path.join(locations, country, \
country + "-" + location + ".conf") + \
" -j"
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
(output, errors) = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertEqual(errors, "")
ref_times = list(json_data["times"])
computed_times = list(json.loads(output)["times"])
self.assertEqual(len(ref_times), len(computed_times))
for i in range(len(computed_times)):
ref_x = ref_times[i].items()[0][1]
comp_x = computed_times[i].items()[0][1]
time_format = "%H:%M"
t_r = datetime.datetime.strptime(ref_x, time_format)
t_c = datetime.datetime.strptime(comp_x, time_format)
if t_r > t_c:
tdelta = t_r - t_c
else:
tdelta = t_c - t_r
self.assertTrue(tdelta < threshold)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 |
sabel83/metashell | 3rd/templight/llvm/tools/clang/tools/clang-format/clang-format.py | 12 | 4683 | # This file is a minimal clang-format vim-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/clang-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/clang-format.py<cr>
#
# The first line enables clang-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need clang-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and clang-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# You can also pass in the variable "l:lines" to choose the range for
# formatting. This variable can either contain "<start line>:<end line>" or
# "all" to format the full file. So, to format the full file, write a function
# like:
# :function FormatFile()
# : let l:lines="all"
# : pyf <path-to-this-file>/clang-format.py
# :endfunction
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
from __future__ import print_function
import difflib
import json
import platform
import subprocess
import sys
import vim
# set g:clang_format_path to the path to clang-format if it is not on the path
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
if vim.eval('exists("g:clang_format_path")') == "1":
binary = vim.eval('g:clang_format_path')
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = 'file'
fallback_style = None
if vim.eval('exists("g:clang_format_fallback_style")') == "1":
fallback_style = vim.eval('g:clang_format_fallback_style')
def get_buffer(encoding):
if platform.python_version_tuple()[0] == '3':
return vim.current.buffer
return [ line.decode(encoding) for line in vim.current.buffer ]
def main():
# Get the current text.
encoding = vim.eval("&encoding")
buf = get_buffer(encoding)
text = '\n'.join(buf)
# Determine range to format.
if vim.eval('exists("l:lines")') == '1':
lines = ['-lines', vim.eval('l:lines')]
elif vim.eval('exists("l:formatdiff")') == '1':
with open(vim.current.buffer.name, 'r') as f:
ondisk = f.read().splitlines();
sequence = difflib.SequenceMatcher(None, ondisk, vim.current.buffer)
lines = []
for op in reversed(sequence.get_opcodes()):
if op[0] not in ['equal', 'delete']:
lines += ['-lines', '%s:%s' % (op[3] + 1, op[4])]
if lines == []:
return
else:
lines = ['-lines', '%s:%s' % (vim.current.range.start + 1,
vim.current.range.end + 1)]
# Determine the cursor position.
cursor = int(vim.eval('line2byte(line("."))+col(".")')) - 2
if cursor < 0:
print('Couldn\'t determine cursor position. Is your file empty?')
return
# Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
startupinfo = None
if sys.platform.startswith('win32'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call formatter.
command = [binary, '-style', style, '-cursor', str(cursor)]
if lines != ['-lines', 'all']:
command += lines
if fallback_style:
command.extend(['-fallback-style', fallback_style])
if vim.current.buffer.name:
command.extend(['-assume-filename', vim.current.buffer.name])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=startupinfo)
stdout, stderr = p.communicate(input=text.encode(encoding))
# If successful, replace buffer contents.
if stderr:
print(stderr)
if not stdout:
print(
'No output from clang-format (crashed?).\n'
'Please report to bugs.llvm.org.'
)
else:
lines = stdout.decode(encoding).split('\n')
output = json.loads(lines[0])
lines = lines[1:]
sequence = difflib.SequenceMatcher(None, buf, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
if output.get('IncompleteFormat'):
print('clang-format: incomplete (syntax errors)')
vim.command('goto %d' % (output['Cursor'] + 1))
main()
| gpl-3.0 |
astorije/ansible | plugins/inventory/spacewalk.py | 137 | 4999 | #!/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.1
"""
#
# Author:: Jon Miller <[email protected]>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import time
from optparse import OptionParser
import subprocess
try:
import json
except:
import simplejson as json
base_dir = os.path.dirname(os.path.realpath(__file__))
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
# Sanity check
if not os.path.exists(SW_REPORT):
print >> sys.stderr, 'Error: %s is required for operation.' % (SW_REPORT)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 2775)
# Helper functions
#------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
(options, args) = parser.parse_args()
# List out the known server from Spacewalk
#------------------------------
if options.list:
groups = {}
try:
for system in spacewalk_report('system-groups-systems'):
if system['group_name'] not in groups:
groups[system['group_name']] = set()
groups[system['group_name']].add(system['server_name'])
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s system-groups-systems": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
if options.human:
for group, systems in groups.iteritems():
print '[%s]\n%s\n' % (group, '\n'.join(systems))
else:
print json.dumps(dict([ (k, list(s)) for k, s in groups.iteritems() ]))
sys.exit(0)
# Return a details information concerning the spacewalk server
#------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['hostname'] == options.host:
host_details = system
break
except (OSError), e:
print >> sys.stderr, 'Problem executing the command "%s inventory": %s' % \
(SW_REPORT, str(e))
sys.exit(2)
if options.human:
print 'Host: %s' % options.host
for k, v in host_details.iteritems():
print ' %s: %s' % (k, '\n '.join(v.split(';')))
else:
print json.dumps(host_details)
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
| gpl-3.0 |
ebertti/nospam | contator.py | 1 | 1974 | # coding=utf-8
import socket
import json
import logging
import configuracao
logger = logging.getLogger()
class Contadores(object):
def __init__(self):
self.idiomas = {}
self.com_link = 0
self.sem_link = 0
# Porta que o Servidor esta
self.tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
destino = (configuracao.HOST_MONITOR, configuracao.HOST_PORTA)
try:
self.tcp.connect(destino)
self.conectado = True
except:
self.conectado = False
@property
def qtd(self):
return self.com_link + self.sem_link
def mais_um(self, idioma, com_link):
if idioma not in self.idiomas:
self.idiomas[idioma] = Contador(idioma)
self.idiomas[idioma].mais_um(com_link)
if com_link:
self.com_link += 1
else:
self.sem_link += 1
if self.qtd % 50000 == 0:
logger.debug(self)
if self.conectado:
self.tcp.send(json.dumps({'idioma': idioma, 'com_link':com_link}))
def __str__(self):
return "%s idiomas:%s qtd:%s com_link:%s sem_link:%s\n\t%s" % (
'conectado' if self.conectado else 'erro',
len(self.idiomas), self.qtd, self.com_link, self.sem_link,
'\n\t'.join([str(contador) for contador in self.idiomas.values()])
)
def __int__(self):
return self.qtd
def finalizar(self):
self.tcp.close()
class Contador(object):
def __init__(self, idioma):
self.idioma = idioma
self.com_link = 0
self.sem_link = 0
@property
def qtd(self):
return self.com_link + self.sem_link
def mais_um(self, com_link):
if com_link:
self.com_link += 1
else:
self.sem_link += 1
def __str__(self):
return "idioma:%s qtd:%s com_link:%s sem_link:%s" % (self.idioma, self.qtd, self.com_link, self.sem_link) | mit |
tlakshman26/cinder-new-branch | cinder/tests/unit/api/v1/test_volumes.py | 2 | 52093 | # Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
from lxml import etree
import mock
from oslo_config import cfg
from oslo_utils import timeutils
import webob
from cinder.api import extensions
from cinder.api.v1 import volumes
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
from cinder.tests.unit import fake_notifier
from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image
from cinder.volume import api as volume_api
NS = '{http://docs.openstack.org/api/openstack-block-storage/1.0/content}'
TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
CONF = cfg.CONF
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
return {'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description', }
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.flags(host='fake',
notification_driver=[fake_notifier.__name__])
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
def test_volume_create(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'zone1:host1',
'display_name': 'Volume Test Name',
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 100,
'encrypted': False}}
self.assertEqual(expected, res_dict)
def test_volume_create_with_type(self):
vol_type = CONF.default_volume_type
db.volume_type_create(context.get_admin_context(),
dict(name=vol_type, extra_specs={}))
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"volume_type": "FakeTypeName"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
# Raise 404 when type name isn't valid
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
# Use correct volume type name
vol.update(dict(volume_type=CONF.default_volume_type))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
self.assertIn('id', res_dict['volume'])
self.assertEqual(1, len(res_dict))
self.assertEqual(db_vol_type['name'],
res_dict['volume']['volume_type'])
# Use correct volume type id
vol.update(dict(volume_type=db_vol_type['id']))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
self.assertIn('id', res_dict['volume'])
self.assertEqual(1, len(res_dict))
self.assertEqual(db_vol_type['name'],
res_dict['volume']['volume_type'])
def test_volume_creation_fails_with_bad_size(self):
vol = {"size": '',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_creation_fails_with_bad_availability_zone(self):
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zonen:hostn"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req, body)
def test_volume_create_with_image_id(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_api_create)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': test_id,
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(expected, res_dict)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": 1,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": ''}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
@mock.patch.object(db, 'volume_admin_metadata_get',
return_value={'attached_mode': 'rw',
'readonly': 'False'})
@mock.patch.object(db, 'volume_type_get',
side_effect=stubs.stub_volume_type_get)
@mock.patch.object(volume_api.API, 'get',
side_effect=stubs.stub_volume_api_get, autospec=True)
@mock.patch.object(volume_api.API, 'update',
side_effect=stubs.stub_volume_update, autospec=True)
def test_volume_update(self, *args):
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
@mock.patch.object(db, 'volume_admin_metadata_get',
return_value={"qos_max_iops": 2000,
"readonly": "False",
"attached_mode": "rw"})
@mock.patch.object(db, 'volume_type_get',
side_effect=stubs.stub_volume_type_get)
@mock.patch.object(volume_api.API, 'get',
side_effect=stubs.stub_volume_api_get, autospec=True)
@mock.patch.object(volume_api.API, 'update',
side_effect=stubs.stub_volume_update, autospec=True)
def test_volume_update_metadata(self, *args):
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": '2000',
"readonly": "False",
"attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1
}}
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
def test_volume_update_with_admin_metadata(self):
def stubs_volume_admin_metadata_get(context, volume_id):
return {'key': 'value',
'readonly': 'True'}
self.stubs.Set(db, 'volume_admin_metadata_get',
stubs_volume_admin_metadata_get)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(0, len(self.notifier.notifications))
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'attachment_id': attachment['id'],
'id': '1',
'volume_id': '1',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'device': '/'
}],
'multiattach': 'false',
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
self.assertEqual(expected, res_dict)
self.assertEqual(2, len(self.notifier.notifications))
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_invalid_body(self):
body = {'display_name': 'missing top level volume key'}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, '1', body)
def test_volume_list(self):
def stubs_volume_admin_metadata_get(context, volume_id):
return {'attached_mode': 'rw',
'readonly': 'False'}
self.stubs.Set(db, 'volume_admin_metadata_get',
stubs_volume_admin_metadata_get)
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_api_get_all_by_project)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}]}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v1/volumes')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [
{'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': '1'}],
'multiattach': 'false',
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}]}
self.assertEqual(expected, res_dict)
@mock.patch.object(db, 'volume_admin_metadata_get',
return_value={'attached_mode': 'rw',
'readonly': 'False'})
def test_volume_list_detail(self, *args):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_api_get_all_by_project)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}]}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [
{'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': '1'}],
'multiattach': 'false',
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}]}
self.assertEqual(expected, res_dict)
@mock.patch.object(db, 'volume_admin_metadata_get',
return_value={'attached_mode': 'rw',
'readonly': 'False'})
@mock.patch.object(volume_api.API, 'get',
side_effect=stubs.stub_volume_api_get, autospec=True)
@mock.patch.object(db, 'volume_type_get',
side_effect=stubs.stub_volume_type_get, autospec=True)
def test_volume_show(self, *args):
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
self.assertEqual(expected, res_dict)
# Finally test that we cached the returned volume
self.assertIsNotNone(req.cached_resource_by_id('1'))
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = stubs.stub_volume(volume_id, attach_status='detached')
return fake_volume.fake_volume_obj(context, **vol)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
self.assertEqual(expected, res_dict)
def test_volume_show_bootable(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = (stubs.stub_volume(volume_id,
volume_glance_metadata=dict(foo='bar')))
return fake_volume.fake_volume_obj(context, **vol)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'multiattach': 'false',
'bootable': 'true',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
self.assertEqual(expected, res_dict)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
1)
# Finally test that we did not cache anything
self.assertIsNone(req.cached_resource_by_id('1'))
def test_volume_detail_limit_offset(self):
def volume_detail_limit_offset(is_admin):
def stub_volume_get_all_by_project(context, project_id, marker,
limit, sort_keys=None,
sort_dirs=None, filters=None,
viewable_admin_meta=False,
offset=None):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2\
&offset=1',
use_admin_context=is_admin)
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual('2', volumes[0]['id'])
# admin case
volume_detail_limit_offset(is_admin=True)
# non_admin case
volume_detail_limit_offset(is_admin=False)
def test_volume_show_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v1/volumes/1')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [
{'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': '1'}],
'multiattach': 'false',
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(
1900, 1, 1, 1, 1, 1,
tzinfo=iso8601.iso8601.Utc()),
'size': 1}}
self.assertEqual(expected, res_dict)
def test_volume_show_with_encrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
vol = stubs.stub_volume(volume_id, encryption_key_id='fake_id')
return fake_volume.fake_volume_obj(context, **vol)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(True, res_dict['volume']['encrypted'])
def test_volume_show_with_unencrypted_volume(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(False, res_dict['volume']['encrypted'])
def test_volume_delete(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_api_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(202, resp.status_int)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def test_admin_list_volumes_limited_to_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/fake/volumes',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(db, 'volume_type_get', stubs.stub_volume_type_get)
req = fakes.HTTPRequest.blank('/v1/fake/volumes')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_string(self, get_all):
req = mock.MagicMock()
req.GET.copy.return_value = {'display_name': 'Volume-573108026'}
context = mock.Mock()
req.environ = {'cinder.context': context}
self.controller._items(req, mock.Mock)
get_all.assert_called_once_with(
context, sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['created_at'], limit=None,
filters={'display_name': 'Volume-573108026'}, marker=None)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_list(self, get_all):
req = mock.MagicMock()
req.GET.copy.return_value = {'id': "['1', '2', '3']"}
context = mock.Mock()
req.environ = {'cinder.context': context}
self.controller._items(req, mock.Mock)
get_all.assert_called_once_with(
context, sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['created_at'], limit=None,
filters={'id': ['1', '2', '3']}, marker=None)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_expression(self, get_all):
req = mock.MagicMock()
req.GET.copy.return_value = {'id': "d+"}
context = mock.Mock()
req.environ = {'cinder.context': context}
self.controller._items(req, mock.Mock)
get_all.assert_called_once_with(
context, sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['created_at'], limit=None, filters={'id': 'd+'},
marker=None)
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(NS + 'volume', tree.tag)
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'display_name', 'display_description', 'volume_type',
'bootable', 'snapshot_id'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertIn(gr_child.get("key"), not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get('key'))
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
bootable='false',
created_at=timeutils.utcnow(),
attachments=[dict(id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
source_volid='source_volid',
metadata=dict(foo='bar',
baz='quux', ), )
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
bootable='true',
created_at=timeutils.utcnow(),
attachments=[dict(id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
source_volid=None,
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
bootable='true',
created_at=timeutils.utcnow(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
source_volid=None,
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {"volume": {"size": "1", }, }
self.assertEqual(expected, request['body'])
def test_display_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
},
}
self.assertEqual(expected, request['body'])
def test_display_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
},
}
self.assertEqual(expected, request['body'])
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEqual(expected, request['body'])
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEqual(expected, request['body'])
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
display_name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(expected, request['body'])
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(expected, request['body'])
def test_imageref(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
imageRef="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_snapshot_id(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
snapshot_id="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_source_volid(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
source_volid="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
class VolumesUnprocessableEntityTestCase(test.TestCase):
"""Tests of places we throw 422 Unprocessable Entity from."""
def setUp(self):
super(VolumesUnprocessableEntityTestCase, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = volumes.VolumeController(self.ext_mgr)
def _unprocessable_volume_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_no_body(self):
self._unprocessable_volume_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_volume_create(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._unprocessable_volume_create(body=body)
| apache-2.0 |
LxiaoGirl/wySpider | thirdparty/requests/auth.py | 70 | 6179 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
| gpl-2.0 |
LordDamionDevil/Lony | lib/youtube_dl/extractor/cinchcast.py | 177 | 1678 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unified_strdate,
xpath_text,
)
class CinchcastIE(InfoExtractor):
_VALID_URL = r'https?://player\.cinchcast\.com/.*?assetId=(?P<id>[0-9]+)'
_TEST = {
# Actual test is run in generic, look for undergroundwellness
'url': 'http://player.cinchcast.com/?platformId=1&assetType=single&assetId=7141703',
'only_matching': True,
}
def _real_extract(self, url):
video_id = self._match_id(url)
doc = self._download_xml(
'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
video_id)
item = doc.find('.//item')
title = xpath_text(item, './title', fatal=True)
date_str = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}date')
upload_date = unified_strdate(date_str, day_first=False)
# duration is present but wrong
formats = [{
'format_id': 'main',
'url': item.find('./{http://search.yahoo.com/mrss/}content').attrib['url'],
}]
backup_url = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}backupContent')
if backup_url:
formats.append({
'preference': 2, # seems to be more reliable
'format_id': 'backup',
'url': backup_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'upload_date': upload_date,
'formats': formats,
}
| gpl-3.0 |
adit-chandra/tensorflow | tensorflow/lite/testing/op_tests/pack.py | 3 | 2994 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for pack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_pack_tests(options):
"""Make a set of tests to do stack."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int64],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [5],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
}
]
def get_shape(parameters):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += parameters["additional_shape"]
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name=("input%d" % n),
shape=get_shape(parameters))
all_tensors.append(input_tensor)
out = tf.stack(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for _ in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32, get_shape(parameters))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=72)
| apache-2.0 |
RPGOne/Skynet | numpy-master/numpy/distutils/command/build_clib.py | 152 | 12217 | """ Modified version of build_clib that handles fortran source files.
"""
from __future__ import division, absolute_import, print_function
import os
from glob import glob
import shutil
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.errors import DistutilsSetupError, DistutilsError, \
DistutilsFileError
from numpy.distutils import log
from distutils.dep_util import newer_group
from numpy.distutils.misc_util import filter_sources, has_f_sources,\
has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \
get_numpy_include_dirs
# Fix Python distutils bug sf #1718574:
_l = old_build_clib.user_options
for _i in range(len(_l)):
if _l[_i][0] in ['build-clib', 'build-temp']:
_l[_i] = (_l[_i][0]+'=',)+_l[_i][1:]
#
class build_clib(old_build_clib):
description = "build C/C++/F libraries used by Python extensions"
user_options = old_build_clib.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
]
boolean_options = old_build_clib.boolean_options + ['inplace']
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
old_build_clib.finalize_options(self)
self.set_undefined_options('build', ('parallel', 'parallel'))
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
if has_f_sources(build_info.get('sources', [])):
return True
return False
def have_cxx_sources(self):
for (lib_name, build_info) in self.libraries:
if has_cxx_sources(build_info.get('sources', [])):
return True
return False
def run(self):
if not self.libraries:
return
# Make sure that library sources are complete.
languages = []
# Make sure that extension sources are complete.
self.run_command('build_src')
for (lib_name, build_info) in self.libraries:
l = build_info.get('language', None)
if l and l not in languages: languages.append(l)
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution,
need_cxx=self.have_cxx_sources())
libraries = self.libraries
self.libraries = None
self.compiler.customize_cmd(self)
self.libraries = libraries
self.compiler.show_customization()
if self.have_f_sources():
from numpy.distutils.fcompiler import new_fcompiler
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90='f90' in languages,
c_compiler=self.compiler)
if self._f_compiler is not None:
self._f_compiler.customize(self.distribution)
libraries = self.libraries
self.libraries = None
self._f_compiler.customize_cmd(self)
self.libraries = libraries
self._f_compiler.show_customization()
else:
self._f_compiler = None
self.build_libraries(self.libraries)
if self.inplace:
for l in self.distribution.installed_libraries:
libname = self.compiler.library_filename(l.name)
source = os.path.join(self.build_clib, libname)
target = os.path.join(l.target_dir, libname)
self.mkpath(l.target_dir)
shutil.copy(source, target)
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for lib in self.libraries:
filenames.extend(get_lib_source_files(lib))
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
self.build_a_library(build_info, lib_name, libraries)
def build_a_library(self, build_info, lib_name, libraries):
# default compilers
compiler = self.compiler
fcompiler = self._f_compiler
sources = build_info.get('sources')
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name)
sources = list(sources)
c_sources, cxx_sources, f_sources, fmodule_sources \
= filter_sources(sources)
requiref90 = not not fmodule_sources or \
build_info.get('language', 'c')=='f90'
# save source type information so that build_ext can use it.
source_languages = []
if c_sources: source_languages.append('c')
if cxx_sources: source_languages.append('c++')
if requiref90: source_languages.append('f90')
elif f_sources: source_languages.append('f77')
build_info['source_languages'] = source_languages
lib_file = compiler.library_filename(lib_name,
output_dir=self.build_clib)
depends = sources + build_info.get('depends', [])
if not (self.force or newer_group(depends, lib_file, 'newer')):
log.debug("skipping '%s' library (up-to-date)", lib_name)
return
else:
log.info("building '%s' library", lib_name)
config_fc = build_info.get('config_fc', {})
if fcompiler is not None and config_fc:
log.info('using additional config_fc from setup script '\
'for fortran compiler: %s' \
% (config_fc,))
from numpy.distutils.fcompiler import new_fcompiler
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=requiref90,
c_compiler=self.compiler)
if fcompiler is not None:
dist = self.distribution
base_config_fc = dist.get_option_dict('config_fc').copy()
base_config_fc.update(config_fc)
fcompiler.customize(base_config_fc)
# check availability of Fortran compilers
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("library %s has Fortran sources"\
" but no Fortran compiler found" % (lib_name))
if fcompiler is not None:
fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or []
fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or []
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
if include_dirs is None:
include_dirs = []
extra_postargs = build_info.get('extra_compiler_args') or []
include_dirs.extend(get_numpy_include_dirs())
# where compiled F90 module files are:
module_dirs = build_info.get('module_dirs') or []
module_build_dir = os.path.dirname(lib_file)
if requiref90: self.mkpath(module_build_dir)
if compiler.compiler_type=='msvc':
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
objects = []
if c_sources:
log.info("compiling C sources")
objects = compiler.compile(c_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
if cxx_sources:
log.info("compiling C++ sources")
cxx_compiler = compiler.cxx_compiler()
cxx_objects = cxx_compiler.compile(cxx_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
objects.extend(cxx_objects)
if f_sources or fmodule_sources:
extra_postargs = []
f_objects = []
if requiref90:
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(\
module_dirs, module_build_dir)
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
if requiref90 and self._f_compiler.module_dir_switch is None:
# move new compiled F90 module files to module_build_dir
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f)==os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' \
% (f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs)
else:
f_objects = []
objects.extend(f_objects)
# assume that default linker is suitable for
# linking Fortran object files
compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
# fix library dependencies
clib_libraries = build_info.get('libraries', [])
for lname, binfo in libraries:
if lname in clib_libraries:
clib_libraries.extend(binfo.get('libraries', []))
if clib_libraries:
build_info['libraries'] = clib_libraries
| bsd-3-clause |
CospanDesign/nysa | test/unit/test_sdb_component.py | 1 | 7028 | #!/usr/bin/python
import unittest
import json
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
from nysa.cbuilder import sdb_component
SDB_DATA = \
" Set the Vendor ID (Hexidecimal 64-bit Number)\n" \
" SDB_VENDOR_ID:800000000000C594\n" \
"\n" \
" Set the Product ID\n" \
" SDB_DEVICE_ID:0001\n" \
"\n" \
" Set the Version of the core\n" \
" SDB_CORE_VERSION:00.000.001\n" \
"\n" \
" Set the name of the core\n" \
" SDB_NAME:sdb_module\n" \
"\n" \
" Set ABI Class\n" \
" SDB_ABI_CLASS:0000\n" \
" Undocumented Device\n" \
"\n" \
" Set API Version Major\n" \
" SDB_ABI_VERSION_MAJOR:01\n" \
"\n" \
" Set ABI Version Minor\n" \
" SDB_ABI_VERSION_MINOR:00\n" \
"\n" \
" Set Endian BIG, LITTLE\n" \
" SDB_ABI_ENDIAN:BIG\n" \
"\n" \
" Set Device Width (8, 16, 32, 64)\n" \
" SDB_ABI_DEVICE_WIDTH:32\n" \
"\n" \
" Set the Modules URL\n" \
" SDB_MODULE_URL:http://www.example.com\n" \
"\n" \
" Date\n" \
" SDB_DATE:2015/01/05\n" \
"\n" \
" Device is executable\n" \
" SDB_EXECUTABLE:True\n" \
"\n" \
" Device is writeable\n" \
" SDB_WRITEABLE:True\n" \
"\n" \
" Device is readable\n" \
" SDB_READABLE:True\n" \
"\n"
TEST_INTERCONNECT_ROM = ""\
"5344422D\n" \
"00100100\n" \
"00000000\n" \
"01000000\n" \
"00000000\n" \
"01000005\n" \
"80000000\n" \
"0000C594\n" \
"00000001\n" \
"00000001\n" \
"140F0105\n" \
"7364625F\n" \
"6D6F6475\n" \
"6C650000\n" \
"00000000\n" \
"00000000"
TEST_BRIDGE_ROM = ""\
"10000000\n" \
"00000000\n" \
"00000000\n" \
"01000000\n" \
"00000000\n" \
"01000005\n" \
"80000000\n" \
"0000C594\n" \
"00000001\n" \
"00000001\n" \
"140F0105\n" \
"7364625F\n" \
"6D6F6475\n" \
"6C650000\n" \
"00000000\n" \
"00000002"
TEST_DEVICE_ROM = ""\
"00000100\n" \
"00000207\n" \
"00000000\n" \
"01000000\n" \
"00000000\n" \
"01000005\n" \
"80000000\n" \
"0000C594\n" \
"00000001\n" \
"00000001\n" \
"140F0105\n" \
"7364625F\n" \
"6D6F6475\n" \
"6C650000\n" \
"00000000\n" \
"00000001"
class Test (unittest.TestCase):
"""Unit test for SDB Component"""
def setUp(self):
self.dbg = False
self.sdbc = sdb_component.SDBComponent()
self.sdbc.parse_buffer(SDB_DATA)
self.sdbc.set_start_address(0x01000000)
self.sdbc.set_size(5)
self.sdbc.set_number_of_records(10)
self.sdbc.set_bridge_child_addr(0x1000000000000000)
def test_parse_buffer(self):
od = self.sdbc.generated_ordered_dict()
self.assertEqual(od["SDB_VENDOR_ID"] , "800000000000C594")
self.assertEqual(od["SDB_DEVICE_ID"] , "0001")
self.assertEqual(od["SDB_CORE_VERSION"] , "00.000.001")
self.assertEqual(od["SDB_NAME"] , "sdb_module")
self.assertEqual(od["SDB_ABI_CLASS"] , "0000")
self.assertEqual(od["SDB_ABI_VERSION_MAJOR"] , "01")
self.assertEqual(od["SDB_ABI_VERSION_MINOR"] , "00")
self.assertEqual(od["SDB_ABI_ENDIAN"] , "BIG")
self.assertEqual(od["SDB_ABI_DEVICE_WIDTH"] , "32")
self.assertEqual(od["SDB_MODULE_URL"] , "http://www.example.com")
self.assertEqual(od["SDB_DATE"] , "2015/01/05")
self.assertEqual(od["SDB_EXECUTABLE"] , "True")
self.assertEqual(od["SDB_WRITEABLE"] , "True")
self.assertEqual(od["SDB_READABLE"] , "True")
self.assertEqual(od["SDB_START_ADDRESS"] , "0x1000000")
self.assertEqual(od["SDB_LAST_ADDRESS"] , "0x1000005L")
self.assertEqual(od["SDB_NRECS"] , "10")
self.assertEqual(od["SDB_BRIDGE_CHILD_ADDR"] , "0x1000000000000000")
for e in od:
#print "%s:%s" % (e, od[e])
pass
def test_create_device_record(self):
device = sdb_component.create_device_record(name = "test device",
vendor_id = 0x1000000000000000,
device_id = 0x00000000,
core_version = "1.0",
abi_class = 0,
version_major = 1,
version_minor = 0)
self.assertTrue(device.is_device())
def test_create_interconnect_record(self):
interconnect = sdb_component.create_interconnect_record(
name = "peripherals",
vendor_id = 0x1000000000000000,
device_id = 0x00000000,
start_address = 0x01,
size = 10)
self.assertTrue(interconnect.is_interconnect())
def test_create_bridge_record(self):
bridge = sdb_component.create_bridge_record(name = "bridge",
vendor_id = 0x1000000000000000,
device_id = 0x00000000,
start_address = 0x01,
size = 10)
self.assertTrue(bridge.is_bridge())
def test_create_integration_record(self):
integration = sdb_component.create_integration_record(
information = "test integration",
vendor_id = 0x1000000000000000,
device_id = 0x00000000)
self.assertTrue(integration.is_integration_record())
def test_create_synthesis_record(self):
synth_record = sdb_component.create_synthesis_record(
synthesis_name = "name of names",
commit_id = 0000,
tool_name = "xilinx",
tool_version = 14.1,
user_name = "Dave McCoy")
self.assertTrue(synth_record.is_synthesis_record())
def test_create_repo_url_record(self):
url = sdb_component.create_repo_url_record("wwww.cospandesign.com")
self.assertTrue(url.is_url_record())
| mit |
ghchinoy/tensorflow | tensorflow/python/client/session_partial_run_test.py | 14 | 11707 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session's partial run APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class PartialRunTest(test_util.TensorFlowTestCase):
def RunTestPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def RunTestPartialRunIncomplete(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def RunTestConcurrentPartialRun(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def RunTestManyPartialRun(self, sess):
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.multiply(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def RunTestRunAndPartialRun(self, sess):
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = self.evaluate([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def RunTestPartialRunMissingPlaceholderFeedException(self, sess):
x = array_ops.placeholder(dtypes.float32, shape=())
fetches = [x * 2, x * 3]
handle = sess.partial_run_setup(fetches=fetches, feeds=[])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'You must feed a value for placeholder'):
sess.partial_run(handle, fetches[0])
def RunTestPartialRunUnspecifiedFeed(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
h = sess.partial_run_setup([r1], [a, b])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'was not specified in partial_run_setup.$'):
sess.partial_run(h, r1, feed_dict={a: 1, b: 2, c: 3})
def RunTestPartialRunUnspecifiedFetch(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(a, c)
h = sess.partial_run_setup([r1], [a, b, c])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'was not specified in partial_run_setup.$'):
sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
def RunTestPartialRunAlreadyFed(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(a, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'has already been fed.$'):
sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
def RunTestPartialRunAlreadyFetched(self, sess):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(a, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'has already been fetched.$'):
sess.partial_run(h, r1, feed_dict={c: 3})
def RunTestPartialRunEmptyFetches(self, sess):
a = array_ops.placeholder(dtypes.float32)
b = a * 2.0
h = sess.partial_run_setup(fetches=[b], feeds=[a])
sess.partial_run(h, [], {a: 3.0})
r = sess.partial_run(h, [b], {})
self.assertEqual([6.0], r)
@test_util.run_deprecated_v1
def testInvalidPartialRunSetup(self):
sess = session.Session()
x = array_ops.placeholder(dtypes.float32, shape=[])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'specify at least one target to fetch or execute.'):
sess.partial_run_setup(fetches=[], feeds=[x])
@test_util.run_deprecated_v1
def testPartialRunSetupNoFeedsPassed(self):
sess = session.Session()
r1 = constant_op.constant([6.0])
h = sess.partial_run_setup([r1])
result1 = sess.partial_run(h, r1)
self.assertEqual([6.0], result1)
@test_util.run_deprecated_v1
def testPartialRunDirect(self):
self.RunTestPartialRun(session.Session())
@test_util.run_deprecated_v1
def testPartialRunIncompleteDirect(self):
self.RunTestPartialRunIncomplete(session.Session())
@test_util.run_deprecated_v1
def testConcurrentPartialRunDirect(self):
self.RunTestConcurrentPartialRun(session.Session())
@test_util.run_deprecated_v1
def testManyPartialRunDirect(self):
self.RunTestManyPartialRun(session.Session())
@test_util.run_deprecated_v1
def testRunAndPartialRunDirect(self):
self.RunTestRunAndPartialRun(session.Session())
@test_util.run_deprecated_v1
def testPartialRunMissingPlaceholderFeedExceptionDirect(self):
self.RunTestPartialRunMissingPlaceholderFeedException(session.Session())
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFeedDirect(self):
self.RunTestPartialRunUnspecifiedFeed(session.Session())
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFetchDirect(self):
self.RunTestPartialRunUnspecifiedFetch(session.Session())
@test_util.run_deprecated_v1
def testPartialRunAlreadyFedDirect(self):
self.RunTestPartialRunAlreadyFed(session.Session())
@test_util.run_deprecated_v1
def testPartialRunAlreadyFetchedDirect(self):
self.RunTestPartialRunAlreadyFetched(session.Session())
@test_util.run_deprecated_v1
def testPartialRunEmptyFetchesDirect(self):
self.RunTestPartialRunEmptyFetches(session.Session())
@test_util.run_deprecated_v1
def testPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunIncompleteDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunIncomplete(session.Session(server.target))
@test_util.run_deprecated_v1
def testConcurrentPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestConcurrentPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testManyPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestManyPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testRunAndPartialRunDist(self):
server = server_lib.Server.create_local_server()
self.RunTestRunAndPartialRun(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunMissingPlaceholderFeedExceptionDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunMissingPlaceholderFeedException(
session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFeedDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunUnspecifiedFeed(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunUnspecifiedFetchDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunUnspecifiedFetch(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunAlreadyFedDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunAlreadyFed(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunAlreadyFetchedDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunAlreadyFetched(session.Session(server.target))
@test_util.run_deprecated_v1
def testPartialRunEmptyFetchesDist(self):
server = server_lib.Server.create_local_server()
self.RunTestPartialRunEmptyFetches(session.Session(server.target))
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
dangayle/substitutions | templatetags/substitutions.py | 1 | 1593 | # encoding: utf-8
"""
Make reading the news more fun.
http://xkcd.com/1288/
"""
from __future__ import unicode_literals
import re
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
substitutes = {
"witnesses": "these dudes I know",
"Witnesses": "These dudes I know",
"allegedly": "kinda probably",
"Allegedly": "Kinda probably",
"new study": "Tumblr post",
"New study": "Tumblr post",
"rebuild": "avenge",
"Rebuild": "Avenge",
"space": "spaace",
"Space": "Spaace",
"google glass": "Virtual Boy",
"Google glass": "Virtual Boy",
"Google Glass": "Virtual Boy",
"Smartphone": "Pokédex",
"smartphone": "Pokédex",
"electric": "atomic",
"Electric": "Atomic",
"senator": "elf-lord",
"Senator": "Elf-lord",
"car": "cat",
"Car": "Cat",
"election": "eating contest",
"Election": "Eating contest",
"congressional leaders": "river spirits",
"Congressional leaders": "River spirits",
"homeland security": "Homestar Runner",
"Homeland security": "Homestar Runner",
"Homeland Security": "Homestar Runner",
"could not be reached for comment": "is guilty and everyone knows it",
}
@register.filter(is_safe=True)
@stringfilter
def substitute(value):
"""
Substitute words in a string with replacements from substitutes dict.
"""
pattern = re.compile('|'.join(substitutes.keys()))
try:
result = pattern.sub(lambda x: substitutes[x.group()], value)
except:
result = value
return result
| mit |
kvar/ansible | lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py | 14 | 16746 | #!/usr/bin/python
#
# Copyright (c) 2017 Obezimnaka Boms, <[email protected]>
# Copyright (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_dnsrecordset
version_added: "2.4"
short_description: Create, delete and update DNS record sets and records
description:
- Creates, deletes, and updates DNS records sets and records within an existing Azure DNS Zone.
options:
resource_group:
description:
- Name of resource group.
required: true
zone_name:
description:
- Name of the existing DNS zone in which to manage the record set.
required: true
relative_name:
description:
- Relative name of the record set.
required: true
record_type:
description:
- The type of record set to create or delete.
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
- CAA
- SOA
required: true
record_mode:
description:
- Whether existing record values not sent to the module should be purged.
default: purge
choices:
- append
- purge
state:
description:
- Assert the state of the record set. Use C(present) to create or update and C(absent) to delete.
default: present
choices:
- absent
- present
time_to_live:
description:
- Time to live of the record set in seconds.
default: 3600
records:
description:
- List of records to be created depending on the type of record (set).
suboptions:
preference:
description:
- Used for creating an C(MX) record set/records.
priority:
description:
- Used for creating an C(SRV) record set/records.
weight:
description:
- Used for creating an C(SRV) record set/records.
port:
description:
- Used for creating an C(SRV) record set/records.
entry:
description:
- Primary data value for all record types.
extends_documentation_fragment:
- azure
- azure_tags
author:
- Obezimnaka Boms (@ozboms)
- Matt Davis (@nitzmahone)
'''
EXAMPLES = '''
- name: ensure an "A" record set with multiple records
azure_rm_dnsrecordset:
resource_group: myResourceGroup
relative_name: www
zone_name: testing.com
record_type: A
records:
- entry: 192.168.100.101
- entry: 192.168.100.102
- entry: 192.168.100.103
- name: delete a record set
azure_rm_dnsrecordset:
resource_group: myResourceGroup
record_type: A
relative_name: www
zone_name: testing.com
state: absent
- name: create multiple "A" record sets with multiple records
azure_rm_dnsrecordset:
resource_group: myResourceGroup
zone_name: testing.com
relative_name: "{{ item.name }}"
record_type: "{{ item.type }}"
records: "{{ item.records }}"
with_items:
- { name: 'servera', type: 'A', records: [ { entry: '10.10.10.20' }, { entry: '10.10.10.21' }] }
- { name: 'serverb', type: 'A', records: [ { entry: '10.10.10.30' }, { entry: '10.10.10.41' }] }
- { name: 'serverc', type: 'A', records: [ { entry: '10.10.10.40' }, { entry: '10.10.10.41' }] }
- name: create SRV records in a new record set
azure_rm_dnsrecordset:
resource_group: myResourceGroup
relative_name: _sip._tcp.testing.com
zone_name: testing.com
time_to_live: 7200
record_type: SRV
records:
- entry: sip.testing.com
preference: 10
priority: 20
weight: 10
port: 5060
- name: create PTR record in a new record set
azure_rm_dnsrecordset:
resource_group: myResourceGroup
relative_name: 192.168.100.101.in-addr.arpa
zone_name: testing.com
record_type: PTR
records:
- entry: servera.testing.com
- name: create TXT record in a new record set
azure_rm_dnsrecordset:
resource_group: myResourceGroup
relative_name: mail.testing.com
zone_name: testing.com
record_type: TXT
records:
- entry: 'v=spf1 a -all'
'''
RETURN = '''
state:
description:
- Current state of the DNS record set.
returned: always
type: complex
contains:
id:
description:
- The DNS record set ID.
returned: always
type: str
sample: "/subscriptions/xxxx......xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/dnszones/b57dc95985712e4523282.com/A/www"
name:
description:
- Relate name of the record set.
returned: always
type: str
sample: 'www'
fqdn:
description:
- Fully qualified domain name of the record set.
returned: always
type: str
sample: www.b57dc95985712e4523282.com
etag:
description:
- The etag of the record set.
returned: always
type: str
sample: 692c3e92-a618-46fc-aecd-8f888807cd6c
provisioning_state:
description:
- The DNS record set state.
returned: always
type: str
sample: Succeeded
target_resource:
description:
- The target resource of the record set.
returned: always
type: dict
sample: {}
ttl:
description:
- The TTL(time-to-live) of the records in the records set.
returned: always
type: int
sample: 3600
type:
description:
- The type of DNS record in this record set.
returned: always
type: str
sample: A
arecords:
description:
- A list of records in the record set.
returned: always
type: list
sample: [
{
"ipv4_address": "192.0.2.2"
},
{
"ipv4_address": "192.0.2.4"
},
{
"ipv4_address": "192.0.2.8"
}
]
'''
import inspect
import sys
from ansible.module_utils.basic import _load_params
from ansible.module_utils.six import iteritems
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
RECORD_ARGSPECS = dict(
A=dict(
ipv4_address=dict(type='str', required=True, aliases=['entry'])
),
AAAA=dict(
ipv6_address=dict(type='str', required=True, aliases=['entry'])
),
CNAME=dict(
cname=dict(type='str', required=True, aliases=['entry'])
),
MX=dict(
preference=dict(type='int', required=True),
exchange=dict(type='str', required=True, aliases=['entry'])
),
NS=dict(
nsdname=dict(type='str', required=True, aliases=['entry'])
),
PTR=dict(
ptrdname=dict(type='str', required=True, aliases=['entry'])
),
SRV=dict(
priority=dict(type='int', required=True),
port=dict(type='int', required=True),
weight=dict(type='int', required=True),
target=dict(type='str', required=True, aliases=['entry'])
),
TXT=dict(
value=dict(type='list', required=True, aliases=['entry'])
),
SOA=dict(
host=dict(type='str', aliases=['entry']),
email=dict(type='str'),
serial_number=dict(type='long'),
refresh_time=dict(type='long'),
retry_time=dict(type='long'),
expire_time=dict(type='long'),
minimum_ttl=dict(type='long')
),
CAA=dict(
value=dict(type='str', aliases=['entry']),
flags=dict(type='int'),
tag=dict(type='str')
)
# FUTURE: ensure all record types are supported (see https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-dns/azure/mgmt/dns/models)
)
RECORDSET_VALUE_MAP = dict(
A=dict(attrname='arecords', classobj='ARecord', is_list=True),
AAAA=dict(attrname='aaaa_records', classobj='AaaaRecord', is_list=True),
CNAME=dict(attrname='cname_record', classobj='CnameRecord', is_list=False),
MX=dict(attrname='mx_records', classobj='MxRecord', is_list=True),
NS=dict(attrname='ns_records', classobj='NsRecord', is_list=True),
PTR=dict(attrname='ptr_records', classobj='PtrRecord', is_list=True),
SRV=dict(attrname='srv_records', classobj='SrvRecord', is_list=True),
TXT=dict(attrname='txt_records', classobj='TxtRecord', is_list=True),
SOA=dict(attrname='soa_record', classobj='SoaRecord', is_list=False),
CAA=dict(attrname='caa_records', classobj='CaaRecord', is_list=True)
# FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py
) if HAS_AZURE else {}
class AzureRMRecordSet(AzureRMModuleBase):
def __init__(self):
# we're doing two-pass arg validation, sample and store the args internally to allow this
_load_params()
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
relative_name=dict(type='str', required=True),
zone_name=dict(type='str', required=True),
record_type=dict(choices=RECORD_ARGSPECS.keys(), required=True, type='str'),
record_mode=dict(choices=['append', 'purge'], default='purge'),
state=dict(choices=['present', 'absent'], default='present', type='str'),
time_to_live=dict(type='int', default=3600),
records=dict(type='list', elements='dict')
)
required_if = [
('state', 'present', ['records'])
]
self.results = dict(
changed=False
)
# first-pass arg validation so we can get the record type- skip exec_module
super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True, skip_exec=True)
# look up the right subspec and metadata
record_subspec = RECORD_ARGSPECS.get(self.module.params['record_type'])
# patch the right record shape onto the argspec
self.module_arg_spec['records']['options'] = record_subspec
self.resource_group = None
self.relative_name = None
self.zone_name = None
self.record_type = None
self.record_mode = None
self.state = None
self.time_to_live = None
self.records = None
# rerun validation and actually run the module this time
super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
zone = self.dns_client.zones.get(self.resource_group, self.zone_name)
if not zone:
self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, self.resource_group))
try:
self.log('Fetching Record Set {0}'.format(self.relative_name))
record_set = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type)
self.results['state'] = self.recordset_to_dict(record_set)
except CloudError:
record_set = None
# FUTURE: fail on anything other than ResourceNotFound
record_type_metadata = RECORDSET_VALUE_MAP.get(self.record_type)
# FUTURE: implement diff mode
if self.state == 'present':
# convert the input records to SDK objects
self.input_sdk_records = self.create_sdk_records(self.records, self.record_type)
if not record_set:
changed = True
else:
# and use it to get the type-specific records
server_records = getattr(record_set, record_type_metadata.get('attrname'))
# compare the input records to the server records
self.input_sdk_records, changed = self.records_changed(self.input_sdk_records, server_records)
# also check top-level recordset properties
changed |= record_set.ttl != self.time_to_live
# FUTURE: add metadata/tag check on recordset
self.results['changed'] |= changed
elif self.state == 'absent':
if record_set:
self.results['changed'] = True
if self.check_mode:
return self.results
if self.results['changed']:
if self.state == 'present':
record_set_args = dict(
ttl=self.time_to_live
)
record_set_args[record_type_metadata['attrname']] = self.input_sdk_records if record_type_metadata['is_list'] else self.input_sdk_records[0]
record_set = self.dns_models.RecordSet(**record_set_args)
self.results['state'] = self.create_or_update(record_set)
elif self.state == 'absent':
# delete record set
self.delete_record_set()
return self.results
def create_or_update(self, record_set):
try:
record_set = self.dns_client.record_sets.create_or_update(resource_group_name=self.resource_group,
zone_name=self.zone_name,
relative_record_set_name=self.relative_name,
record_type=self.record_type,
parameters=record_set)
return self.recordset_to_dict(record_set)
except Exception as exc:
self.fail("Error creating or updating dns record {0} - {1}".format(self.relative_name, exc.message or str(exc)))
def delete_record_set(self):
try:
# delete the record set
self.dns_client.record_sets.delete(resource_group_name=self.resource_group,
zone_name=self.zone_name,
relative_record_set_name=self.relative_name,
record_type=self.record_type)
except Exception as exc:
self.fail("Error deleting record set {0} - {1}".format(self.relative_name, exc.message or str(exc)))
return None
def create_sdk_records(self, input_records, record_type):
record = RECORDSET_VALUE_MAP.get(record_type)
if not record:
self.fail('record type {0} is not supported now'.format(record_type))
record_sdk_class = getattr(self.dns_models, record.get('classobj'))
return [record_sdk_class(**x) for x in input_records]
def records_changed(self, input_records, server_records):
# ensure we're always comparing a list, even for the single-valued types
if not isinstance(server_records, list):
server_records = [server_records]
input_set = set([self.module.jsonify(x.as_dict()) for x in input_records])
server_set = set([self.module.jsonify(x.as_dict()) for x in server_records])
if self.record_mode == 'append': # only a difference if the server set is missing something from the input set
input_set = server_set.union(input_set)
# non-append mode; any difference in the sets is a change
changed = input_set != server_set
records = [self.module.from_json(x) for x in input_set]
return self.create_sdk_records(records, self.record_type), changed
def recordset_to_dict(self, recordset):
result = recordset.as_dict()
result['type'] = result['type'].strip('Microsoft.Network/dnszones/')
return result
def main():
AzureRMRecordSet()
if __name__ == '__main__':
main()
| gpl-3.0 |
marcellodesales/svnedge-console | svn-server/lib/suds/mx/core.py | 210 | 4839 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides I{marshaller} core classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.appender import ContentAppender
from suds.sax.element import Element
from suds.sax.document import Document
from suds.sudsobject import Property
log = getLogger(__name__)
class Core:
"""
An I{abstract} marshaller. This class implement the core
functionality of the marshaller.
@ivar appender: A content appender.
@type appender: L{ContentAppender}
"""
def __init__(self):
"""
"""
self.appender = ContentAppender(self)
def process(self, content):
"""
Process (marshal) the tag with the specified value using the
optional type information.
@param content: The content to process.
@type content: L{Object}
"""
log.debug('processing:\n%s', content)
self.reset()
if content.tag is None:
content.tag = content.value.__class__.__name__
document = Document()
if isinstance(content.value, Property):
root = self.node(content)
self.append(document, content)
else:
self.append(document, content)
return document.root()
def append(self, parent, content):
"""
Append the specified L{content} to the I{parent}.
@param parent: The parent node to append to.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Object}
"""
log.debug('appending parent:\n%s\ncontent:\n%s', parent, content)
if self.start(content):
self.appender.append(parent, content)
self.end(parent, content)
def reset(self):
"""
Reset the marshaller.
"""
pass
def node(self, content):
"""
Create and return an XML node.
@param content: The content for which proccessing has been suspended.
@type content: L{Object}
@return: An element.
@rtype: L{Element}
"""
return Element(content.tag)
def start(self, content):
"""
Appending this content has started.
@param content: The content for which proccessing has started.
@type content: L{Content}
@return: True to continue appending
@rtype: boolean
"""
return True
def suspend(self, content):
"""
Appending this content has suspended.
@param content: The content for which proccessing has been suspended.
@type content: L{Content}
"""
pass
def resume(self, content):
"""
Appending this content has resumed.
@param content: The content for which proccessing has been resumed.
@type content: L{Content}
"""
pass
def end(self, parent, content):
"""
Appending this content has ended.
@param parent: The parent node ending.
@type parent: L{Element}
@param content: The content for which proccessing has ended.
@type content: L{Content}
"""
pass
def setnil(self, node, content):
"""
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set nil.
@type content: L{Content}
"""
pass
def setdefault(self, node, content):
"""
Set the value of the I{node} to a default value.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set the default value.
@type content: L{Content}
@return: The default.
"""
pass
def optional(self, content):
"""
Get whether the specified content is optional.
@param content: The content which to check.
@type content: L{Content}
"""
return False
| agpl-3.0 |
bleib1dj/boto | tests/unit/cloudsearch2/test_connection.py | 114 | 8278 | #!/usr/bin env python
from tests.unit import AWSMockServiceTestCase
from boto.cloudsearch2.domain import Domain
from boto.cloudsearch2.layer1 import CloudSearchConnection
class TestCloudSearchCreateDomain(AWSMockServiceTestCase):
connection_class = CloudSearchConnection
def default_body(self):
return b"""
{
"CreateDomainResponse": {
"CreateDomainResult": {
"DomainStatus": {
"SearchInstanceType": null,
"DomainId": "1234567890/demo",
"DomainName": "demo",
"Deleted": false,
"SearchInstanceCount": 0,
"Created": true,
"SearchService": {
"Endpoint": "search-demo.us-east-1.cloudsearch.amazonaws.com"
},
"RequiresIndexDocuments": false,
"Processing": false,
"DocService": {
"Endpoint": "doc-demo.us-east-1.cloudsearch.amazonaws.com"
},
"ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo",
"SearchPartitionCount": 0
}
},
"ResponseMetadata": {
"RequestId": "00000000-0000-0000-0000-000000000000"
}
}
}
"""
def test_create_domain(self):
self.set_http_response(status_code=200)
self.service_connection.create_domain('demo')
self.assert_request_parameters({
'Action': 'CreateDomain',
'ContentType': 'JSON',
'DomainName': 'demo',
'Version': '2013-01-01',
})
def test_cloudsearch_connect_result_endpoints(self):
"""Check that endpoints & ARNs are correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response['CreateDomainResponse']
['CreateDomainResult']
['DomainStatus'])
self.assertEqual(
domain.doc_service_endpoint,
"doc-demo.us-east-1.cloudsearch.amazonaws.com")
self.assertEqual(domain.service_arn,
"arn:aws:cs:us-east-1:1234567890:domain/demo")
self.assertEqual(
domain.search_service_endpoint,
"search-demo.us-east-1.cloudsearch.amazonaws.com")
def test_cloudsearch_connect_result_statuses(self):
"""Check that domain statuses are correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response['CreateDomainResponse']
['CreateDomainResult']
['DomainStatus'])
self.assertEqual(domain.created, True)
self.assertEqual(domain.processing, False)
self.assertEqual(domain.requires_index_documents, False)
self.assertEqual(domain.deleted, False)
def test_cloudsearch_connect_result_details(self):
"""Check that the domain information is correctly returned from AWS"""
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response['CreateDomainResponse']
['CreateDomainResult']
['DomainStatus'])
self.assertEqual(domain.id, "1234567890/demo")
self.assertEqual(domain.name, "demo")
def test_cloudsearch_documentservice_creation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response['CreateDomainResponse']
['CreateDomainResult']
['DomainStatus'])
document = domain.get_document_service()
self.assertEqual(
document.endpoint,
"doc-demo.us-east-1.cloudsearch.amazonaws.com")
def test_cloudsearch_searchservice_creation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_domain('demo')
domain = Domain(self, api_response['CreateDomainResponse']
['CreateDomainResult']
['DomainStatus'])
search = domain.get_search_service()
self.assertEqual(
search.endpoint,
"search-demo.us-east-1.cloudsearch.amazonaws.com")
class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase):
connection_class = CloudSearchConnection
def default_body(self):
return b"""
{
"DeleteDomainResponse": {
"DeleteDomainResult": {
"DomainStatus": {
"SearchInstanceType": null,
"DomainId": "1234567890/demo",
"DomainName": "test",
"Deleted": true,
"SearchInstanceCount": 0,
"Created": true,
"SearchService": {
"Endpoint": null
},
"RequiresIndexDocuments": false,
"Processing": false,
"DocService": {
"Endpoint": null
},
"ARN": "arn:aws:cs:us-east-1:1234567890:domain/demo",
"SearchPartitionCount": 0
}
},
"ResponseMetadata": {
"RequestId": "00000000-0000-0000-0000-000000000000"
}
}
}
"""
def test_cloudsearch_deletion(self):
"""
Check that the correct arguments are sent to AWS when creating a
cloudsearch connection.
"""
self.set_http_response(status_code=200)
self.service_connection.delete_domain('demo')
self.assert_request_parameters({
'Action': 'DeleteDomain',
'ContentType': 'JSON',
'DomainName': 'demo',
'Version': '2013-01-01',
})
class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase):
connection_class = CloudSearchConnection
def default_body(self):
return b"""
{
"IndexDocumentsResponse": {
"IndexDocumentsResult": {
"FieldNames": [
"average_score",
"brand_id",
"colors",
"context",
"context_owner",
"created_at",
"creator_id",
"description",
"file_size",
"format",
"has_logo",
"has_messaging",
"height",
"image_id",
"ingested_from",
"is_advertising",
"is_photo",
"is_reviewed",
"modified_at",
"subject_date",
"tags",
"title",
"width"
]
},
"ResponseMetadata": {
"RequestId": "42e618d9-c4d9-11e3-8242-c32da3041159"
}
}
}
"""
def test_cloudsearch_index_documents(self):
"""
Check that the correct arguments are sent to AWS when indexing a
domain.
"""
self.set_http_response(status_code=200)
self.service_connection.index_documents('demo')
self.assert_request_parameters({
'Action': 'IndexDocuments',
'ContentType': 'JSON',
'DomainName': 'demo',
'Version': '2013-01-01',
})
def test_cloudsearch_index_documents_resp(self):
"""
Check that the AWS response is being parsed correctly when indexing a
domain.
"""
self.set_http_response(status_code=200)
api_response = self.service_connection.index_documents('demo')
fields = (api_response['IndexDocumentsResponse']
['IndexDocumentsResult']
['FieldNames'])
self.assertEqual(fields, ['average_score', 'brand_id', 'colors',
'context', 'context_owner',
'created_at', 'creator_id',
'description', 'file_size', 'format',
'has_logo', 'has_messaging', 'height',
'image_id', 'ingested_from',
'is_advertising', 'is_photo',
'is_reviewed', 'modified_at',
'subject_date', 'tags', 'title',
'width'])
| mit |
maartenq/ansible | lib/ansible/module_utils/scaleway.py | 4 | 4064 | import json
import sys
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.urls import fetch_url
def scaleway_argument_spec():
return dict(
api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
no_log=True, aliases=['oauth_token']),
api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
api_timeout=dict(type='int', default=30, aliases=['timeout']),
validate_certs=dict(default=True, type='bool'),
)
class ScalewayException(Exception):
def __init__(self, message):
self.message = message
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
@property
def ok(self):
return self.status_code in (200, 201, 202, 204)
class Scaleway(object):
def __init__(self, module):
self.module = module
self.headers = {
'X-Auth-Token': self.module.params.get('api_token'),
'User-Agent': self.get_user_agent_string(module),
'Content-type': 'application/json',
}
self.name = None
def get_resources(self):
results = self.get('/%s' % self.name)
if not results.ok:
raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
results.status_code, results.json['message']
))
return results.json.get(self.name)
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.module.params.get('api_url'), path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
if headers is not None:
self.headers.update(headers)
resp, info = fetch_url(
self.module, url, data=data, headers=self.headers, method=method,
timeout=self.module.params.get('api_timeout')
)
# Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
if info['status'] == -1:
self.module.fail_json(msg=info['msg'])
return Response(resp, info)
@staticmethod
def get_user_agent_string(module):
return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
def patch(self, path, data=None, headers=None):
return self.send("PATCH", path, data, headers)
def update(self, path, data=None, headers=None):
return self.send("UPDATE", path, data, headers)
SCALEWAY_LOCATION = {
'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://cp-par1.scaleway.com'},
'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://cp-par1.scaleway.com'},
'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://cp-ams1.scaleway.com'},
'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://cp-ams1.scaleway.com'}
}
| gpl-3.0 |
maxogden/npm-www | node_modules/npm/node_modules/node-gyp/gyp/test/variants/gyptest-variants.py | 240 | 1315 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify handling of build variants.
TODO: Right now, only the SCons generator supports this, so the
test case is SCons-specific. In particular, it relise on SCons'
ability to rebuild in response to changes on the command line. It
may be simpler to just drop this feature if the other generators
can't be made to behave the same way.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['scons'])
test.run_gyp('variants.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('variants.gyp', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello, world!\n")
test.sleep()
test.build('variants.gyp', 'VARIANT1=1', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello from VARIANT1\n")
test.sleep()
test.build('variants.gyp', 'VARIANT2=1', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello from VARIANT2\n")
test.pass_test()
| bsd-2-clause |
domalexxx/shop | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| gpl-2.0 |
ayuopy/Planbot | src/facebook.py | 1 | 5626 | #!/usr/bin/python3
"""
Planbot's Facebook application. Handles requests and responses through the
Facebook Graph API. Converts location data to postcode with Postcodes.io API.
"""
import os
import logging
import requests
from bottle import Bottle, request, debug
from engine import Engine
# set environmental variables
FB_PAGE_TOKEN = os.environ.get('FB_PAGE_TOKEN')
FB_VERIFY_TOKEN = os.environ.get('FB_VERIFY_TOKEN')
# setup Bottle Server
debug(True)
app = application = Bottle()
# setup logging
logging.basicConfig(level=logging.INFO)
nlp_entities = {
'greetings': 'GET_STARTED_PAYLOAD',
'thanks': 'Thanks, bye!',
'bye': 'Thanks, bye!'
}
@app.get('/facebook')
def messenger_webhook():
verify_token = request.query.get('hub.verify_token')
if verify_token == FB_VERIFY_TOKEN:
challenge = request.query.get('hub.challenge')
return challenge
else:
return 'Invalid Request or Verification Token'
@app.post('/facebook')
def messenger_post():
responses, fb_id = parse_response(request.json)
logging.info('responses: {}'.format(responses))
if responses:
text = responses[0]
else:
text = 'NO_PAYLOAD'
bot = Engine()
for response in bot.response(user=fb_id, message=text):
logging.info(response)
sender_action(fb_id)
send(response)
return None
def parse_response(data):
responses = []
fb_id = None
if data['object'] == 'page':
for entry in data['entry']:
messages = entry['messaging']
if messages[0]:
message = messages[0]
fb_id = message['sender']['id']
if message.get('message'):
text = parse_text(message['message'])
logging.info('parsed text: {}'.format(text))
elif message.get('postback'):
text = message['postback']['payload']
logging.info('Message received: {}'.format(text))
responses.append(text)
else:
return 'Received Different Event'
return responses, fb_id
def parse_text(message):
if message.get('attachments'):
attachment = message['attachments'][0]
if attachment['title'] == 'Pinned Location':
long = attachment['coordinates']['long']
lat = attachment['coordinates']['lat']
text = geo_convert(longitude=long, latitude=lat)
else:
text = 'NO_PAYLOAD'
else:
if message.get('nlp'):
text = find_entity(message)
else:
text = message['message']['text']
return text
def find_entity(message):
logging.info(message)
entities = message['nlp']['entities']
entity = {ent: entities[ent][0]['confidence'] for ent in entities
if ent in nlp_entities}
logging.info('found entities: {}'.format(entity))
if entity:
match = sorted(entity, key=entity.get, reverse=True)[0]
text = nlp_entities[match]
else:
try:
text = message['text']
except KeyError:
text = 'NO_PAYLOAD'
logging.info('find_entity: {}'.format(text))
return text
def sender_action(sender_id):
data = {'recipient': {'id': sender_id}, 'sender_action': 'typing_on'}
qs = 'access_token=' + FB_PAGE_TOKEN
resp = requests.post('https://graph.facebook.com/v2.9/me/messages?' + qs,
json=data)
return resp.content
def send(response):
fb_id = response['id']
text = response['text']
quickreplies = cards = None
# check for quickreplies
if response.get('quickreplies'):
quickreplies = format_qr(response['quickreplies'])
# check for urls
if text.startswith('http'):
urls = text.split()
title = response['title']
titles = [title] if not isinstance(title, list) else title
cards = template(titles, urls)
fb_message(fb_id, text, quickreplies, cards)
return None
def fb_message(sender_id, text, quickreplies, cards):
data = {'recipient': {'id': sender_id}}
data['message'] = cards if cards \
else{'text': text, 'quick_replies': quickreplies} if quickreplies \
else {'text': text}
# logging.info('response = {}'.format(data))
qs = 'access_token=' + FB_PAGE_TOKEN
resp = requests.post('https://graph.facebook.com/v2.9/me/messages?' + qs,
json=data)
return resp.content
def format_qr(quickreplies):
return [{
'title': qr,
'content_type': 'text',
'payload': 'empty'}
for qr in quickreplies]
def template(titles, urls):
button_titles = ['Download' if url.endswith('pdf') else 'View'
for url in urls]
elements = [{
'title': titles[i],
'default_action': {
'type': 'web_url',
'url': urls[i]},
'buttons': [{
'type': 'web_url',
'url': urls[i],
'title': button_titles[i]}]}
for i in range(len(titles))]
return {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements}}}
def geo_convert(longitude=None, latitude=None):
url = 'https://api.postcodes.io/postcodes?lon={}&lat={}'
res = requests.get(url).json()
try:
text = res['result'][0]['admin_district']
except KeyError:
logging.info('Invalid coordinates: long={}; lat={}'.format(
longitude, latitude))
text = 'NO_PAYLOAD'
return text
| gpl-3.0 |
lino-framework/xl | lino_xl/lib/polls/utils.py | 1 | 1281 | # -*- coding: UTF-8 -*-
# Copyright 2013-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.utils.translation import gettext_lazy as _
from lino.mixins import RegistrableState
from lino.api import dd
class PollStates(dd.Workflow):
item_class = RegistrableState
verbose_name_plural = _("Poll states")
required_roles = dd.login_required(dd.SiteStaff)
add = PollStates.add_item
add('10', _("Draft"), 'draft')
add('20', _("Active"), 'active')
add('30', _("Closed"), 'closed')
PollStates.active.add_transition(
_("Publish"), required_states='draft')
PollStates.closed.add_transition(
_("Close"), required_states='draft active')
PollStates.draft.add_transition(
_("Reopen"), required_states='active closed')
class ResponseStates(dd.Workflow):
item_class = RegistrableState
verbose_name_plural = _("Response states")
required_roles = dd.login_required(dd.SiteStaff)
add = ResponseStates.add_item
add('10', _("Draft"), 'draft', is_editable=True)
add('20', _("Registered"), 'registered', is_editable=False)
ResponseStates.registered.add_transition(
_("Register"), required_states='draft')
ResponseStates.draft.add_transition(
_("Deregister"), required_states="registered")
| bsd-2-clause |
mozilla/olympia | src/olympia/lib/akismet/migrations/0001_initial.py | 7 | 2842 | # Generated by Django 2.2.5 on 2019-09-12 15:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('bandwagon', '0001_initial'),
('addons', '0002_addon_fk'),
('ratings', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('files', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AkismetReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('comment_type', models.CharField(max_length=255)),
('user_ip', models.CharField(max_length=255)),
('user_agent', models.CharField(max_length=255)),
('referrer', models.CharField(max_length=255)),
('user_name', models.CharField(max_length=255)),
('user_email', models.CharField(max_length=255)),
('user_homepage', models.CharField(max_length=255)),
('comment', models.TextField()),
('comment_modified', models.DateTimeField()),
('content_link', models.CharField(max_length=255, null=True)),
('content_modified', models.DateTimeField(null=True)),
('result', models.PositiveSmallIntegerField(choices=[(3, 'Unknown'), (0, 'Ham'), (1, 'Definite Spam'), (2, 'Maybe Spam')], null=True)),
('reported', models.BooleanField(default=False)),
('addon_instance', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='addons.Addon')),
('collection_instance', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='bandwagon.Collection')),
('rating_instance', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='ratings.Rating')),
('upload_instance', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='files.FileUpload')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'akismet_reports',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
]
| bsd-3-clause |
schatten/logan | app/wsgi.py | 1 | 1416 | """
WSGI config for logan project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "logan.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "logan.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
zeptonaut/catapult | tracing/third_party/tvcm/third_party/rcssmin/_setup/py3/setup.py | 29 | 14135 | # -*- coding: ascii -*-
#
# Copyright 2007 - 2013
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
===================
Main setup runner
===================
This module provides a wrapper around the distutils core setup.
"""
__author__ = "Andr\xe9 Malo"
__docformat__ = "restructuredtext en"
import configparser as _config_parser
from distutils import core as _core
import os as _os
import posixpath as _posixpath
import sys as _sys
from _setup import commands as _commands
from _setup import data as _data
from _setup import ext as _ext
from _setup import util as _util
from _setup import shell as _shell
def check_python_version(impl, version_min, version_max):
""" Check python version """
if impl == 'python':
version_info = _sys.version_info
elif impl == 'pypy':
version_info = getattr(_sys, 'pypy_version_info', None)
if not version_info:
return
elif impl == 'jython':
if not 'java' in _sys.platform.lower():
return
version_info = _sys.version_info
else:
raise AssertionError("impl not in ('python', 'pypy', 'jython')")
pyversion = list(map(int, version_info[:3]))
if version_min:
min_required = list(
map(int, '.'.join((version_min, '0.0.0')).split('.')[:3])
)
if pyversion < min_required:
raise EnvironmentError("Need at least %s %s (vs. %s)" % (
impl, version_min, '.'.join(map(str, pyversion))
))
if version_max:
max_required = list(map(int, version_max.split('.')))
max_required[-1] += 1
if pyversion >= max_required:
raise EnvironmentError("Need at max %s %s (vs. %s)" % (
impl,
version_max,
'.'.join(map(str, pyversion))
))
def find_description(docs):
"""
Determine the package description from DESCRIPTION
:Parameters:
`docs` : ``dict``
Docs config section
:Return: Tuple of summary, description and license
(``('summary', 'description', 'license')``)
(all may be ``None``)
:Rtype: ``tuple``
"""
summary = None
filename = docs.get('meta.summary', 'SUMMARY').strip()
if filename and _os.path.isfile(filename):
fp = open(filename, encoding='utf-8')
try:
try:
summary = fp.read().strip().splitlines()[0].rstrip()
except IndexError:
summary = ''
finally:
fp.close()
description = None
filename = docs.get('meta.description', 'DESCRIPTION').strip()
if filename and _os.path.isfile(filename):
fp = open(filename, encoding='utf-8')
try:
description = fp.read().rstrip()
finally:
fp.close()
if summary is None and description:
from docutils import core
summary = core.publish_parts(
source=description,
source_path=filename,
writer_name='html',
)['title'].encode('utf-8')
return summary, description
def find_classifiers(docs):
"""
Determine classifiers from CLASSIFIERS
:return: List of classifiers (``['classifier', ...]``)
:rtype: ``list``
"""
filename = docs.get('meta.classifiers', 'CLASSIFIERS').strip()
if filename and _os.path.isfile(filename):
fp = open(filename, encoding='utf-8')
try:
content = fp.read()
finally:
fp.close()
content = [item.strip() for item in content.splitlines()]
return [item for item in content if item and not item.startswith('#')]
return []
def find_provides(docs):
"""
Determine provides from PROVIDES
:return: List of provides (``['provides', ...]``)
:rtype: ``list``
"""
filename = docs.get('meta.provides', 'PROVIDES').strip()
if filename and _os.path.isfile(filename):
fp = open(filename, encoding='utf-8')
try:
content = fp.read()
finally:
fp.close()
content = [item.strip() for item in content.splitlines()]
return [item for item in content if item and not item.startswith('#')]
return []
def find_license(docs):
"""
Determine license from LICENSE
:return: License text
:rtype: ``str``
"""
filename = docs.get('meta.license', 'LICENSE').strip()
if filename and _os.path.isfile(filename):
fp = open(filename, encoding='utf-8')
try:
return fp.read().rstrip()
finally:
fp.close()
return None
def find_packages(manifest):
""" Determine packages and subpackages """
packages = {}
collect = manifest.get('packages.collect', '').split()
lib = manifest.get('packages.lib', '.')
try:
sep = _os.path.sep
except AttributeError:
sep = _os.path.join('1', '2')[1:-1]
for root in collect:
for dirpath, _, filenames in _shell.walk(_os.path.join(lib, root)):
if dirpath.find('.svn') >= 0 or dirpath.find('.git') >= 0:
continue
if '__init__.py' in filenames:
packages[
_os.path.normpath(dirpath).replace(sep, '.')
] = None
packages = list(packages.keys())
packages.sort()
return packages
def find_data(name, docs):
""" Determine data files """
result = []
if docs.get('extra', '').strip():
result.append(_data.Documentation(docs['extra'].split(),
prefix='share/doc/%s' % name,
))
if docs.get('examples.dir', '').strip():
tpl = ['recursive-include %s *' % docs['examples.dir']]
if docs.get('examples.ignore', '').strip():
tpl.extend(["global-exclude %s" % item
for item in docs['examples.ignore'].split()
])
strip = int(docs.get('examples.strip', '') or 0)
result.append(_data.Documentation.from_templates(*tpl, **{
'strip': strip,
'prefix': 'share/doc/%s' % name,
'preserve': 1,
}))
if docs.get('userdoc.dir', '').strip():
tpl = ['recursive-include %s *' % docs['userdoc.dir']]
if docs.get('userdoc.ignore', '').strip():
tpl.extend(["global-exclude %s" % item
for item in docs['userdoc.ignore'].split()
])
strip = int(docs.get('userdoc.strip', '') or 0)
result.append(_data.Documentation.from_templates(*tpl, **{
'strip': strip,
'prefix': 'share/doc/%s' % name,
'preserve': 1,
}))
if docs.get('apidoc.dir', '').strip():
tpl = ['recursive-include %s *' % docs['apidoc.dir']]
if docs.get('apidoc.ignore', '').strip():
tpl.extend(["global-exclude %s" % item
for item in docs['apidoc.ignore'].split()
])
strip = int(docs.get('apidoc.strip', '') or 0)
result.append(_data.Documentation.from_templates(*tpl, **{
'strip': strip,
'prefix': 'share/doc/%s' % name,
'preserve': 1,
}))
if docs.get('man', '').strip():
result.extend(_data.Manpages.dispatch(docs['man'].split()))
return result
def make_manifest(manifest, config, docs, kwargs):
""" Create file list to pack up """
# pylint: disable = R0912
kwargs = kwargs.copy()
kwargs['script_args'] = ['install']
kwargs['packages'] = list(kwargs.get('packages') or ()) + [
'_setup', '_setup.py2', '_setup.py3',
] + list(manifest.get('packages.extra', '').split() or ())
_core._setup_stop_after = "commandline"
try:
dist = _core.setup(**kwargs)
finally:
_core._setup_stop_after = None
result = ['MANIFEST', 'PKG-INFO', 'setup.py'] + list(config)
# TODO: work with default values:
for key in ('classifiers', 'description', 'summary', 'provides',
'license'):
filename = docs.get('meta.' + key, '').strip()
if filename and _os.path.isfile(filename):
result.append(filename)
cmd = dist.get_command_obj("build_py")
cmd.ensure_finalized()
#from pprint import pprint; pprint(("build_py", cmd.get_source_files()))
for item in cmd.get_source_files():
result.append(_posixpath.sep.join(
_os.path.normpath(item).split(_os.path.sep)
))
cmd = dist.get_command_obj("build_ext")
cmd.ensure_finalized()
#from pprint import pprint; pprint(("build_ext", cmd.get_source_files()))
for item in cmd.get_source_files():
result.append(_posixpath.sep.join(
_os.path.normpath(item).split(_os.path.sep)
))
for ext in cmd.extensions:
if ext.depends:
result.extend([_posixpath.sep.join(
_os.path.normpath(item).split(_os.path.sep)
) for item in ext.depends])
cmd = dist.get_command_obj("build_clib")
cmd.ensure_finalized()
if cmd.libraries:
#import pprint; pprint.pprint(("build_clib", cmd.get_source_files()))
for item in cmd.get_source_files():
result.append(_posixpath.sep.join(
_os.path.normpath(item).split(_os.path.sep)
))
for lib in cmd.libraries:
if lib[1].get('depends'):
result.extend([_posixpath.sep.join(
_os.path.normpath(item).split(_os.path.sep)
) for item in lib[1]['depends']])
cmd = dist.get_command_obj("build_scripts")
cmd.ensure_finalized()
#import pprint; pprint.pprint(("build_scripts", cmd.get_source_files()))
if cmd.get_source_files():
for item in cmd.get_source_files():
result.append(_posixpath.sep.join(
_os.path.normpath(item).split(_os.path.sep)
))
cmd = dist.get_command_obj("install_data")
cmd.ensure_finalized()
#from pprint import pprint; pprint(("install_data", cmd.get_inputs()))
try:
strings = str
except NameError:
strings = (str, str)
for item in cmd.get_inputs():
if isinstance(item, strings):
result.append(item)
else:
result.extend(item[1])
for item in manifest.get('dist', '').split():
result.append(item)
if _os.path.isdir(item):
for filename in _shell.files(item):
result.append(filename)
result = list(dict([(item, None) for item in result]).keys())
result.sort()
return result
def run(config=('package.cfg',), ext=None, script_args=None, manifest_only=0):
""" Main runner """
if ext is None:
ext = []
cfg = _util.SafeConfigParser()
cfg.read(config, encoding='utf-8')
pkg = dict(cfg.items('package'))
python_min = pkg.get('python.min') or None
python_max = pkg.get('python.max') or None
check_python_version('python', python_min, python_max)
pypy_min = pkg.get('pypy.min') or None
pypy_max = pkg.get('pypy.max') or None
check_python_version('pypy', pypy_min, pypy_max)
jython_min = pkg.get('jython.min') or None
jython_max = pkg.get('jython.max') or None
check_python_version('jython', jython_min, jython_max)
manifest = dict(cfg.items('manifest'))
try:
docs = dict(cfg.items('docs'))
except _config_parser.NoSectionError:
docs = {}
summary, description = find_description(docs)
scripts = manifest.get('scripts', '').strip() or None
if scripts:
scripts = scripts.split()
modules = manifest.get('modules', '').strip() or None
if modules:
modules = modules.split()
keywords = docs.get('meta.keywords', '').strip() or None
if keywords:
keywords = keywords.split()
revision = pkg.get('version.revision', '').strip()
if revision:
revision = "-r%s" % (revision,)
kwargs = {
'name': pkg['name'],
'version': "%s%s" % (
pkg['version.number'],
["", "-dev%s" % (revision,)][_util.humanbool(
'version.dev', pkg.get('version.dev', 'false')
)],
),
'provides': find_provides(docs),
'description': summary,
'long_description': description,
'classifiers': find_classifiers(docs),
'keywords': keywords,
'author': pkg['author.name'],
'author_email': pkg['author.email'],
'maintainer': pkg.get('maintainer.name'),
'maintainer_email': pkg.get('maintainer.email'),
'url': pkg.get('url.homepage'),
'download_url': pkg.get('url.download'),
'license': find_license(docs),
'package_dir': {'': manifest.get('packages.lib', '.')},
'packages': find_packages(manifest),
'py_modules': modules,
'ext_modules': ext,
'scripts': scripts,
'script_args': script_args,
'data_files': find_data(pkg['name'], docs),
'cmdclass': {
'build' : _commands.Build,
'build_ext' : _commands.BuildExt,
'install' : _commands.Install,
'install_data': _commands.InstallData,
'install_lib' : _commands.InstallLib,
}
}
for key in ('provides',):
if key not in _core.setup_keywords:
del kwargs[key]
if manifest_only:
return make_manifest(manifest, config, docs, kwargs)
# monkey-patch crappy manifest writer away.
from distutils.command import sdist
sdist.sdist.get_file_list = sdist.sdist.read_manifest
return _core.setup(**kwargs)
| bsd-3-clause |
YufeiZhang/Principles-of-Programming-Python-3 | Labs/lab6/sum_of_digits.py | 1 | 1045 | '''
$ python3 sum_of_digits.py
Input a number that we will use as available digits: 12234
Input a number that represents the desired sum: 5
There are 4 solutions.
$ python3 sum_of_digits.py
Input a number that we will use as available digits: 11111
Input a number that represents the desired sum: 5
There is a unique solution
'''
from itertools import combinations as cb
try:
a = input("Input a number that we will use as available digits: ")
b = input("Input a number that represents the desired sum: ")
if int(a) < 0 or int(b) < 0: raise ValueError
a_list = list(a)
sums = []
for i in range(len(a_list)):
combies = list(cb(a_list,i+1))
for ch in combies:
_sum = 0
ch = list(ch)
for c in ch:
_sum += int(c)
sums.append(_sum)
count = 0
for ch in sums:
if ch == int(b):
count+=1
if count > 1:
print("There are {:d} solutions.".format(count))
elif count == 1:
print("There is a unique solution")
else:
print("There is no solution.")
except ValueError:
print("Both inputs cannot be negative.") | gpl-3.0 |
rven/odoo | addons/website_slides_survey/tests/test_course_certification_failure.py | 4 | 6417 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.survey.tests.common import TestSurveyCommon
class TestCourseCertificationFailureFlow(TestSurveyCommon):
def test_course_certification_failure_flow(self):
# Step 1: create a simple certification
# --------------------------------------------------
with self.with_user('survey_user'):
certification = self.env['survey.survey'].create({
'title': 'Small course certification',
'access_mode': 'public',
'users_login_required': True,
'scoring_type': 'scoring_with_answers',
'certification': True,
'is_attempts_limited': True,
'scoring_success_min': 100.0,
'attempts_limit': 2,
'state': 'open',
})
self._add_question(
None, 'Question 1', 'simple_choice',
sequence=1,
survey_id=certification.id,
labels=[
{'value': 'Wrong answer'},
{'value': 'Correct answer', 'is_correct': True, 'answer_score': 1.0}
])
self._add_question(
None, 'Question 2', 'simple_choice',
sequence=2,
survey_id=certification.id,
labels=[
{'value': 'Wrong answer'},
{'value': 'Correct answer', 'is_correct': True, 'answer_score': 1.0}
])
# Step 1.1: create a simple channel
self.channel = self.env['slide.channel'].sudo().create({
'name': 'Test Channel',
'channel_type': 'training',
'enroll': 'public',
'visibility': 'public',
'is_published': True,
})
# Step 2: link the certification to a slide of type 'certification'
self.slide_certification = self.env['slide.slide'].sudo().create({
'name': 'Certification slide',
'channel_id': self.channel.id,
'slide_type': 'certification',
'survey_id': certification.id,
'is_published': True,
})
# Step 3: add public user as member of the channel
self.channel._action_add_members(self.user_public.partner_id)
# forces recompute of partner_ids as we create directly in relation
self.channel.invalidate_cache()
slide_partner = self.slide_certification._action_set_viewed(self.user_public.partner_id)
self.slide_certification.with_user(self.user_public)._generate_certification_url()
self.assertEqual(1, len(slide_partner.user_input_ids), 'A user input should have been automatically created upon slide view')
# Step 4: fill in the created user_input with wrong answers
self.fill_in_answer(slide_partner.user_input_ids[0], certification.question_ids)
self.assertFalse(slide_partner.survey_scoring_success, 'Quizz should not be marked as passed with wrong answers')
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should still be a member of the course because he still has attempts left')
# Step 5: simulate a 'retry'
retry_user_input = self.slide_certification.survey_id.sudo()._create_answer(
partner=self.user_public.partner_id,
**{
'slide_id': self.slide_certification.id,
'slide_partner_id': slide_partner.id
},
invite_token=slide_partner.user_input_ids[0].invite_token
)
# Step 6: fill in the new user_input with wrong answers again
self.fill_in_answer(retry_user_input, certification.question_ids)
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertNotIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should have been kicked out of the course because he failed his last attempt')
# Step 7: add public user as member of the channel once again
self.channel._action_add_members(self.user_public.partner_id)
# forces recompute of partner_ids as we create directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should be a member of the course once again')
new_slide_partner = self.slide_certification._action_set_viewed(self.user_public.partner_id)
self.slide_certification.with_user(self.user_public)._generate_certification_url()
self.assertEqual(1, len(new_slide_partner.user_input_ids.filtered(lambda user_input: user_input.state != 'done')), 'A new user input should have been automatically created upon slide view')
# Step 8: fill in the created user_input with correct answers this time
self.fill_in_answer(new_slide_partner.user_input_ids.filtered(lambda user_input: user_input.state != 'done')[0], certification.question_ids, good_answers=True)
self.assertTrue(new_slide_partner.survey_scoring_success, 'Quizz should be marked as passed with correct answers')
# forces recompute of partner_ids as we delete directly in relation
self.channel.invalidate_cache()
self.assertIn(self.user_public.partner_id, self.channel.partner_ids, 'Public user should still be a member of the course')
def fill_in_answer(self, answer, questions, good_answers=False):
""" Fills in the user_input with answers for all given questions.
You can control whether the answer will be correct or not with the 'good_answers' param.
(It's assumed that wrong answers are at index 0 of question.suggested_answer_ids and good answers at index 1) """
answer.write({
'state': 'done',
'user_input_line_ids': [
(0, 0, {
'question_id': question.id,
'answer_type': 'suggestion',
'answer_score': 1 if good_answers else 0,
'suggested_answer_id': question.suggested_answer_ids[1 if good_answers else 0].id
}) for question in questions
]
})
| agpl-3.0 |
ruuk/script.web.viewer2 | lib/webviewer/bs4/builder/_lxml.py | 12 | 6981 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, basestring):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| gpl-2.0 |
tatyankaZSGX/addressbook | fixture/db.py | 1 | 1791 | __author__ = 'ZSGX'
import mysql.connector
from model.group import Group
from model.contact import Contact
import re
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, address, home, mobile, work, phone2, email, email2, "
"email3 from addressbook where deprecated='0000-00-00 "
"00:00:00'")
for row in cursor:
(id, firstname, lastname, address, home, mobile, work, phone2, email, email2, email3) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, address=address,
homephone=home, mobilephone=mobile, workphone=work, phone2=phone2, email=email,
email2=email2, email3=email3, tel='', mails=''))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
| apache-2.0 |
Baumelbi/IntroPython2016 | students/darrylsw/session03/list_lab.py | 3 | 1279 | #!/usr/bin/env python3
# Author: Darryl Wong
# Date: 10/15/2016
# Week 3 homework
list = ["apples", "pears", "oranges", "peaches"]
print (list)
# prompt the user for a fruit to add to our list
fruit = input ("Add a fruit: ")
list.append(fruit)
print (list)
# prompt the user for a fruit by index number
index = 0
while (index <=0) or (index > len(list)):
index = int (input ("Enter a number from your list: " ))
index = index - 1
print ("list[index] = ", list[index])
# prompt the user for a fruit to add to the beginning of the list using "+"
fruit = input ("Add a fruit to the front: ")
list = [fruit] + list
print (list)
# prompt the user for a fruit to add to the beginning of the list using insert()
fruit = input ("Add a fruit to the front: ")
list.insert(0, fruit)
print (list)
# using a for loop display all the fruits that begin with "P"
for fruit in list:
if fruit[0] == "p":
print (fruit)
print ("Current list")
print (list)
# remove the last fruit from the list
list.pop()
print ("New list")
print (list)
# prompt the user for a fruit and remove it from the list
user_input = input ("Enter a fruit to remove from the list: ")
newlist = []
for fruit in list:
if user_input != fruit:
newlist = newlist + [fruit]
list = newlist[:]
print (list)
| unlicense |
veridiam/Madcow-Waaltz | build/lib/madcow/include/simplejson/tests/test_pass1.py | 259 | 1903 | from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
try:
json.dumps(res, allow_nan=False)
except ValueError:
pass
else:
self.fail("23456789012E666 should be out of range")
| gpl-3.0 |
liuqr/edx-xiaodun | lms/djangoapps/wechat/tests/test_video_handlers.py | 12 | 12263 | # -*- coding: utf-8 -*-
"""Video xmodule tests in mongo."""
from mock import patch
import os
import tempfile
import textwrap
import json
from datetime import timedelta
from webob import Request
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore import Location
from xmodule.contentstore.django import contentstore
from . import BaseTestXmodule
from .test_video_xml import SOURCE_XML
from cache_toolbox.core import del_cached_content
from xmodule.exceptions import NotFoundError
def _create_srt_file(content=None):
"""
Create srt file in filesystem.
"""
content = content or textwrap.dedent("""
0
00:00:00,12 --> 00:00:00,100
Привіт, edX вітає вас.
""")
srt_file = tempfile.NamedTemporaryFile(suffix=".srt")
srt_file.content_type = 'application/x-subrip'
srt_file.write(content)
srt_file.seek(0)
return srt_file
def _clear_assets(location):
"""
Clear all assets for location.
"""
store = contentstore()
content_location = StaticContent.compute_location(
location.org, location.course, location.name
)
assets, __ = store.get_all_content_for_course(content_location)
for asset in assets:
asset_location = Location(asset["_id"])
del_cached_content(asset_location)
id = StaticContent.get_id_from_location(asset_location)
store.delete(id)
def _get_subs_id(filename):
basename = os.path.splitext(os.path.basename(filename))[0]
return basename.replace('subs_', '').replace('.srt', '')
def _create_file(content=''):
"""
Create temporary subs_somevalue.srt.sjson file.
"""
sjson_file = tempfile.NamedTemporaryFile(prefix="subs_", suffix=".srt.sjson")
sjson_file.content_type = 'application/json'
sjson_file.write(textwrap.dedent(content))
sjson_file.seek(0)
return sjson_file
def _upload_sjson_file(subs_file, location, default_filename='subs_{}.srt.sjson'):
filename = default_filename.format(_get_subs_id(subs_file.name))
_upload_file(subs_file, location, filename)
def _upload_file(subs_file, location, filename):
mime_type = subs_file.content_type
content_location = StaticContent.compute_location(
location.org, location.course, filename
)
content = StaticContent(content_location, filename, mime_type, subs_file.read())
contentstore().save(content)
del_cached_content(content.location)
class TestVideo(BaseTestXmodule):
"""Integration tests: web client + mongo."""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def test_handle_ajax_wrong_dispatch(self):
responses = {
user.username: self.clients[user.username].post(
self.get_url('whatever'),
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
for user in self.users
}
self.assertEqual(
set([
response.status_code
for _, response in responses.items()
]).pop(),
404)
def test_handle_ajax(self):
data = [
{'speed': 2.0},
{'saved_video_position': "00:00:10"},
{'transcript_language': json.dumps('uk')},
]
for sample in data:
response = self.clients[self.users[0].username].post(
self.get_url('save_user_state'),
sample,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(self.item_descriptor.speed, None)
self.item_descriptor.handle_ajax('save_user_state', {'speed': json.dumps(2.0)})
self.assertEqual(self.item_descriptor.speed, 2.0)
self.assertEqual(self.item_descriptor.global_speed, 2.0)
self.assertEqual(self.item_descriptor.saved_video_position, timedelta(0))
self.item_descriptor.handle_ajax('save_user_state', {'saved_video_position': "00:00:10"})
self.assertEqual(self.item_descriptor.saved_video_position, timedelta(0, 10))
self.assertEqual(self.item_descriptor.transcript_language, 'en')
self.item_descriptor.handle_ajax('save_user_state', {'transcript_language': json.dumps("uk")})
self.assertEqual(self.item_descriptor.transcript_language, 'uk')
def tearDown(self):
_clear_assets(self.item_descriptor.location)
class TestVideoTranscriptTranslation(TestVideo):
"""
Test video handlers that provide translation transcripts.
"""
non_en_file = _create_srt_file()
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="{}"/>
</video>
""".format(os.path.split(non_en_file.name)[1])
MODEL_DATA = {
'data': DATA
}
def setUp(self):
super(TestVideoTranscriptTranslation, self).setUp()
self.item_descriptor.render('student_view')
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
def test_language_is_not_supported(self):
request = Request.blank('/download?language=ru')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.status, '404 Not Found')
def test_download_transcript_not_exist(self):
request = Request.blank('/download?language=en')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.status, '404 Not Found')
@patch('xmodule.video_module.VideoModule.get_transcript', return_value='Subs!')
def test_download_exist(self, __):
request = Request.blank('/download?language=en')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.body, 'Subs!')
def test_translation_fails(self):
# No videoId
request = Request.blank('/translation?language=ru')
response = self.item.transcript(request=request, dispatch='translation')
self.assertEqual(response.status, '400 Bad Request')
# Language is not in available languages
request = Request.blank('/translation?language=ru&videoId=12345')
response = self.item.transcript(request=request, dispatch='translation')
self.assertEqual(response.status, '404 Not Found')
def test_translaton_en_success(self):
subs = {"start": [10], "end": [100], "text": ["Hi, welcome to Edx."]}
good_sjson = _create_file(json.dumps(subs))
_upload_sjson_file(good_sjson, self.item_descriptor.location)
subs_id = _get_subs_id(good_sjson.name)
self.item.sub = subs_id
request = Request.blank('/translation?language=en&videoId={}'.format(subs_id))
response = self.item.transcript(request=request, dispatch='translation')
self.assertDictEqual(json.loads(response.body), subs)
def test_translaton_non_en_non_youtube_success(self):
subs = {
u'end': [100],
u'start': [12],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.non_en_file.seek(0)
_upload_file(self.non_en_file, self.item_descriptor.location, os.path.split(self.non_en_file.name)[1])
subs_id = _get_subs_id(self.non_en_file.name)
# manually clean youtube_id_1_0, as it has default value
self.item.youtube_id_1_0 = ""
request = Request.blank('/translation?language=uk&videoId={}'.format(subs_id))
response = self.item.transcript(request=request, dispatch='translation')
self.assertDictEqual(json.loads(response.body), subs)
def test_translation_non_en_youtube(self):
subs = {
u'end': [100],
u'start': [12],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]}
self.non_en_file.seek(0)
_upload_file(self.non_en_file, self.item_descriptor.location, os.path.split(self.non_en_file.name)[1])
subs_id = _get_subs_id(self.non_en_file.name)
# youtube 1_0 request, will generate for all speeds for existing ids
self.item.youtube_id_1_0 = subs_id
self.item.youtube_id_0_75 = '0_75'
request = Request.blank('/translation?language=uk&videoId={}'.format(subs_id))
response = self.item.transcript(request=request, dispatch='translation')
self.assertDictEqual(json.loads(response.body), subs)
# 0_75 subs are exist
request = Request.blank('/translation?language=uk&videoId={}'.format('0_75'))
response = self.item.transcript(request=request, dispatch='translation')
calculated_0_75 = {
u'end': [75],
u'start': [9],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.assertDictEqual(json.loads(response.body), calculated_0_75)
# 1_5 will be generated from 1_0
self.item.youtube_id_1_5 = '1_5'
request = Request.blank('/translation?language=uk&videoId={}'.format('1_5'))
response = self.item.transcript(request=request, dispatch='translation')
calculated_1_5 = {
u'end': [150],
u'start': [18],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.assertDictEqual(json.loads(response.body), calculated_1_5)
class TestVideoTranscriptsDownload(TestVideo):
"""
Make sure that `get_transcript` method works correctly
"""
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA
}
METADATA = {}
def setUp(self):
super(TestVideoTranscriptsDownload, self).setUp()
self.item_descriptor.render('student_view')
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
def test_good_transcript(self):
good_sjson = _create_file(content=textwrap.dedent("""\
{
"start": [
270,
2720
],
"end": [
2720,
5430
],
"text": [
"Hi, welcome to Edx.",
"Let's start with what is on your screen right now."
]
}
"""))
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
text = self.item.get_transcript()
expected_text = textwrap.dedent("""\
0
00:00:00,270 --> 00:00:02,720
Hi, welcome to Edx.
1
00:00:02,720 --> 00:00:05,430
Let's start with what is on your screen right now.
""")
self.assertEqual(text, expected_text)
def test_not_found_error(self):
with self.assertRaises(NotFoundError):
self.item.get_transcript()
def test_value_error(self):
good_sjson = _create_file(content='bad content')
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
with self.assertRaises(ValueError):
self.item.get_transcript()
def test_key_error(self):
good_sjson = _create_file(content="""
{
"start": [
270,
2720
],
"end": [
2720,
5430
]
}
""")
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
with self.assertRaises(KeyError):
self.item.get_transcript()
| agpl-3.0 |
SerCeMan/intellij-community | plugins/hg4idea/testData/bin/hgext/rebase.py | 90 | 32420 | # rebase.py - rebasing feature for mercurial
#
# Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to move sets of revisions to a different ancestor
This extension lets you rebase changesets in an existing Mercurial
repository.
For more information:
http://mercurial.selenic.com/wiki/RebaseExtension
'''
from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
from mercurial import extensions, patch, scmutil, phases, obsolete, error
from mercurial.commands import templateopts
from mercurial.node import nullrev
from mercurial.lock import release
from mercurial.i18n import _
import os, errno
nullmerge = -2
revignored = -3
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
@command('rebase',
[('s', 'source', '',
_('rebase from the specified changeset'), _('REV')),
('b', 'base', '',
_('rebase from the base of the specified changeset '
'(up to greatest common ancestor of base and dest)'),
_('REV')),
('r', 'rev', [],
_('rebase these revisions'),
_('REV')),
('d', 'dest', '',
_('rebase onto the specified changeset'), _('REV')),
('', 'collapse', False, _('collapse the rebased changesets')),
('m', 'message', '',
_('use text as collapse commit message'), _('TEXT')),
('e', 'edit', False, _('invoke editor on commit messages')),
('l', 'logfile', '',
_('read collapse commit message from file'), _('FILE')),
('', 'keep', False, _('keep original changesets')),
('', 'keepbranches', False, _('keep original branch names')),
('D', 'detach', False, _('(DEPRECATED)')),
('t', 'tool', '', _('specify merge tool')),
('c', 'continue', False, _('continue an interrupted rebase')),
('a', 'abort', False, _('abort an interrupted rebase'))] +
templateopts,
_('[-s REV | -b REV] [-d REV] [OPTION]'))
def rebase(ui, repo, **opts):
"""move changeset (and descendants) to a different branch
Rebase uses repeated merging to graft changesets from one part of
history (the source) onto another (the destination). This can be
useful for linearizing *local* changes relative to a master
development tree.
You should not rebase changesets that have already been shared
with others. Doing so will force everybody else to perform the
same rebase or they will end up with duplicated changesets after
pulling in your rebased changesets.
In its default configuration, Mercurial will prevent you from
rebasing published changes. See :hg:`help phases` for details.
If you don't specify a destination changeset (``-d/--dest``),
rebase uses the tipmost head of the current named branch as the
destination. (The destination changeset is not modified by
rebasing, but new changesets are added as its descendants.)
You can specify which changesets to rebase in two ways: as a
"source" changeset or as a "base" changeset. Both are shorthand
for a topologically related set of changesets (the "source
branch"). If you specify source (``-s/--source``), rebase will
rebase that changeset and all of its descendants onto dest. If you
specify base (``-b/--base``), rebase will select ancestors of base
back to but not including the common ancestor with dest. Thus,
``-b`` is less precise but more convenient than ``-s``: you can
specify any changeset in the source branch, and rebase will select
the whole branch. If you specify neither ``-s`` nor ``-b``, rebase
uses the parent of the working directory as the base.
For advanced usage, a third way is available through the ``--rev``
option. It allows you to specify an arbitrary set of changesets to
rebase. Descendants of revs you specify with this option are not
automatically included in the rebase.
By default, rebase recreates the changesets in the source branch
as descendants of dest and then destroys the originals. Use
``--keep`` to preserve the original source changesets. Some
changesets in the source branch (e.g. merges from the destination
branch) may be dropped if they no longer contribute any change.
One result of the rules for selecting the destination changeset
and source branch is that, unlike ``merge``, rebase will do
nothing if you are at the latest (tipmost) head of a named branch
with two heads. You need to explicitly specify source and/or
destination (or ``update`` to the other head, if it's the head of
the intended source branch).
If a rebase is interrupted to manually resolve a merge, it can be
continued with --continue/-c or aborted with --abort/-a.
Returns 0 on success, 1 if nothing to rebase.
"""
originalwd = target = None
activebookmark = None
external = nullrev
state = {}
skipped = set()
targetancestors = set()
editor = None
if opts.get('edit'):
editor = cmdutil.commitforceeditor
lock = wlock = None
try:
wlock = repo.wlock()
lock = repo.lock()
# Validate input and define rebasing points
destf = opts.get('dest', None)
srcf = opts.get('source', None)
basef = opts.get('base', None)
revf = opts.get('rev', [])
contf = opts.get('continue')
abortf = opts.get('abort')
collapsef = opts.get('collapse', False)
collapsemsg = cmdutil.logmessage(ui, opts)
extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion
keepf = opts.get('keep', False)
keepbranchesf = opts.get('keepbranches', False)
# keepopen is not meant for use on the command line, but by
# other extensions
keepopen = opts.get('keepopen', False)
if collapsemsg and not collapsef:
raise util.Abort(
_('message can only be specified with collapse'))
if contf or abortf:
if contf and abortf:
raise util.Abort(_('cannot use both abort and continue'))
if collapsef:
raise util.Abort(
_('cannot use collapse with continue or abort'))
if srcf or basef or destf:
raise util.Abort(
_('abort and continue do not allow specifying revisions'))
if opts.get('tool', False):
ui.warn(_('tool option will be ignored\n'))
(originalwd, target, state, skipped, collapsef, keepf,
keepbranchesf, external, activebookmark) = restorestatus(repo)
if abortf:
return abort(repo, originalwd, target, state)
else:
if srcf and basef:
raise util.Abort(_('cannot specify both a '
'source and a base'))
if revf and basef:
raise util.Abort(_('cannot specify both a '
'revision and a base'))
if revf and srcf:
raise util.Abort(_('cannot specify both a '
'revision and a source'))
cmdutil.bailifchanged(repo)
if not destf:
# Destination defaults to the latest revision in the
# current branch
branch = repo[None].branch()
dest = repo[branch]
else:
dest = scmutil.revsingle(repo, destf)
if revf:
rebaseset = repo.revs('%lr', revf)
elif srcf:
src = scmutil.revrange(repo, [srcf])
rebaseset = repo.revs('(%ld)::', src)
else:
base = scmutil.revrange(repo, [basef or '.'])
rebaseset = repo.revs(
'(children(ancestor(%ld, %d)) and ::(%ld))::',
base, dest, base)
if rebaseset:
root = min(rebaseset)
else:
root = None
if not rebaseset:
repo.ui.debug('base is ancestor of destination\n')
result = None
elif (not (keepf or obsolete._enabled)
and repo.revs('first(children(%ld) - %ld)',
rebaseset, rebaseset)):
raise util.Abort(
_("can't remove original changesets with"
" unrebased descendants"),
hint=_('use --keep to keep original changesets'))
else:
result = buildstate(repo, dest, rebaseset, collapsef)
if not result:
# Empty state built, nothing to rebase
ui.status(_('nothing to rebase\n'))
return 1
elif not keepf and not repo[root].mutable():
raise util.Abort(_("can't rebase immutable changeset %s")
% repo[root],
hint=_('see hg help phases for details'))
else:
originalwd, target, state = result
if collapsef:
targetancestors = repo.changelog.ancestors([target],
inclusive=True)
external = checkexternal(repo, state, targetancestors)
if keepbranchesf:
assert not extrafn, 'cannot use both keepbranches and extrafn'
def extrafn(ctx, extra):
extra['branch'] = ctx.branch()
if collapsef:
branches = set()
for rev in state:
branches.add(repo[rev].branch())
if len(branches) > 1:
raise util.Abort(_('cannot collapse multiple named '
'branches'))
# Rebase
if not targetancestors:
targetancestors = repo.changelog.ancestors([target], inclusive=True)
# Keep track of the current bookmarks in order to reset them later
currentbookmarks = repo._bookmarks.copy()
activebookmark = activebookmark or repo._bookmarkcurrent
if activebookmark:
bookmarks.unsetcurrent(repo)
sortedstate = sorted(state)
total = len(sortedstate)
pos = 0
for rev in sortedstate:
pos += 1
if state[rev] == -1:
ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])),
_('changesets'), total)
storestatus(repo, originalwd, target, state, collapsef, keepf,
keepbranchesf, external, activebookmark)
p1, p2 = defineparents(repo, rev, target, state,
targetancestors)
if len(repo.parents()) == 2:
repo.ui.debug('resuming interrupted rebase\n')
else:
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
stats = rebasenode(repo, rev, p1, state, collapsef)
if stats and stats[3] > 0:
raise error.InterventionRequired(
_('unresolved conflicts (see hg '
'resolve, then hg rebase --continue)'))
finally:
ui.setconfig('ui', 'forcemerge', '')
cmdutil.duplicatecopies(repo, rev, target)
if not collapsef:
newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
editor=editor)
else:
# Skip commit if we are collapsing
repo.setparents(repo[p1].node())
newrev = None
# Update the state
if newrev is not None:
state[rev] = repo[newrev].rev()
else:
if not collapsef:
ui.note(_('no changes, revision %d skipped\n') % rev)
ui.debug('next revision set to %s\n' % p1)
skipped.add(rev)
state[rev] = p1
ui.progress(_('rebasing'), None)
ui.note(_('rebase merging completed\n'))
if collapsef and not keepopen:
p1, p2 = defineparents(repo, min(state), target,
state, targetancestors)
if collapsemsg:
commitmsg = collapsemsg
else:
commitmsg = 'Collapsed revision'
for rebased in state:
if rebased not in skipped and state[rebased] > nullmerge:
commitmsg += '\n* %s' % repo[rebased].description()
commitmsg = ui.edit(commitmsg, repo.ui.username())
newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
extrafn=extrafn, editor=editor)
if 'qtip' in repo.tags():
updatemq(repo, state, skipped, **opts)
if currentbookmarks:
# Nodeids are needed to reset bookmarks
nstate = {}
for k, v in state.iteritems():
if v > nullmerge:
nstate[repo[k].node()] = repo[v].node()
# XXX this is the same as dest.node() for the non-continue path --
# this should probably be cleaned up
targetnode = repo[target].node()
if not keepf:
collapsedas = None
if collapsef:
collapsedas = newrev
clearrebased(ui, repo, state, skipped, collapsedas)
if currentbookmarks:
updatebookmarks(repo, targetnode, nstate, currentbookmarks)
clearstatus(repo)
ui.note(_("rebase completed\n"))
util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
if skipped:
ui.note(_("%d revisions have been skipped\n") % len(skipped))
if (activebookmark and
repo['tip'].node() == repo._bookmarks[activebookmark]):
bookmarks.setcurrent(repo, activebookmark)
finally:
release(lock, wlock)
def checkexternal(repo, state, targetancestors):
"""Check whether one or more external revisions need to be taken in
consideration. In the latter case, abort.
"""
external = nullrev
source = min(state)
for rev in state:
if rev == source:
continue
# Check externals and fail if there are more than one
for p in repo[rev].parents():
if (p.rev() not in state
and p.rev() not in targetancestors):
if external != nullrev:
raise util.Abort(_('unable to collapse, there is more '
'than one external parent'))
external = p.rev()
return external
def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
'Commit the changes and store useful information in extra'
try:
repo.setparents(repo[p1].node(), repo[p2].node())
ctx = repo[rev]
if commitmsg is None:
commitmsg = ctx.description()
extra = {'rebase_source': ctx.hex()}
if extrafn:
extrafn(ctx, extra)
# Commit might fail if unresolved files exist
newrev = repo.commit(text=commitmsg, user=ctx.user(),
date=ctx.date(), extra=extra, editor=editor)
repo.dirstate.setbranch(repo[newrev].branch())
targetphase = max(ctx.phase(), phases.draft)
# retractboundary doesn't overwrite upper phase inherited from parent
newnode = repo[newrev].node()
if newnode:
phases.retractboundary(repo, targetphase, [newnode])
return newrev
except util.Abort:
# Invalidate the previous setparents
repo.dirstate.invalidate()
raise
def rebasenode(repo, rev, p1, state, collapse):
'Rebase a single revision'
# Merge phase
# Update to target and merge it with local
if repo['.'].rev() != repo[p1].rev():
repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1]))
merge.update(repo, p1, False, True, False)
else:
repo.ui.debug(" already in target\n")
repo.dirstate.write()
repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev]))
base = None
if repo[rev].rev() != repo[min(state)].rev():
base = repo[rev].p1().node()
# When collapsing in-place, the parent is the common ancestor, we
# have to allow merging with it.
return merge.update(repo, rev, True, True, False, base, collapse)
def nearestrebased(repo, rev, state):
"""return the nearest ancestors of rev in the rebase result"""
rebased = [r for r in state if state[r] > nullmerge]
candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
if candidates:
return state[candidates[0]]
else:
return None
def defineparents(repo, rev, target, state, targetancestors):
'Return the new parent relationship of the revision that will be rebased'
parents = repo[rev].parents()
p1 = p2 = nullrev
P1n = parents[0].rev()
if P1n in targetancestors:
p1 = target
elif P1n in state:
if state[P1n] == nullmerge:
p1 = target
elif state[P1n] == revignored:
p1 = nearestrebased(repo, P1n, state)
if p1 is None:
p1 = target
else:
p1 = state[P1n]
else: # P1n external
p1 = target
p2 = P1n
if len(parents) == 2 and parents[1].rev() not in targetancestors:
P2n = parents[1].rev()
# interesting second parent
if P2n in state:
if p1 == target: # P1n in targetancestors or external
p1 = state[P2n]
elif state[P2n] == revignored:
p2 = nearestrebased(repo, P2n, state)
if p2 is None:
# no ancestors rebased yet, detach
p2 = target
else:
p2 = state[P2n]
else: # P2n external
if p2 != nullrev: # P1n external too => rev is a merged revision
raise util.Abort(_('cannot use revision %d as base, result '
'would have 3 parents') % rev)
p2 = P2n
repo.ui.debug(" future parents are %d and %d\n" %
(repo[p1].rev(), repo[p2].rev()))
return p1, p2
def isagitpatch(repo, patchname):
'Return true if the given patch is in git format'
mqpatch = os.path.join(repo.mq.path, patchname)
for line in patch.linereader(file(mqpatch, 'rb')):
if line.startswith('diff --git'):
return True
return False
def updatemq(repo, state, skipped, **opts):
'Update rebased mq patches - finalize and then import them'
mqrebase = {}
mq = repo.mq
original_series = mq.fullseries[:]
skippedpatches = set()
for p in mq.applied:
rev = repo[p.node].rev()
if rev in state:
repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
(rev, p.name))
mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
else:
# Applied but not rebased, not sure this should happen
skippedpatches.add(p.name)
if mqrebase:
mq.finish(repo, mqrebase.keys())
# We must start import from the newest revision
for rev in sorted(mqrebase, reverse=True):
if rev not in skipped:
name, isgit = mqrebase[rev]
repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name))
mq.qimport(repo, (), patchname=name, git=isgit,
rev=[str(state[rev])])
else:
# Rebased and skipped
skippedpatches.add(mqrebase[rev][0])
# Patches were either applied and rebased and imported in
# order, applied and removed or unapplied. Discard the removed
# ones while preserving the original series order and guards.
newseries = [s for s in original_series
if mq.guard_re.split(s, 1)[0] not in skippedpatches]
mq.fullseries[:] = newseries
mq.seriesdirty = True
mq.savedirty()
def updatebookmarks(repo, targetnode, nstate, originalbookmarks):
'Move bookmarks to their correct changesets, and delete divergent ones'
marks = repo._bookmarks
for k, v in originalbookmarks.iteritems():
if v in nstate:
# update the bookmarks for revs that have moved
marks[k] = nstate[v]
bookmarks.deletedivergent(repo, [targetnode], k)
marks.write()
def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
external, activebookmark):
'Store the current status to allow recovery'
f = repo.opener("rebasestate", "w")
f.write(repo[originalwd].hex() + '\n')
f.write(repo[target].hex() + '\n')
f.write(repo[external].hex() + '\n')
f.write('%d\n' % int(collapse))
f.write('%d\n' % int(keep))
f.write('%d\n' % int(keepbranches))
f.write('%s\n' % (activebookmark or ''))
for d, v in state.iteritems():
oldrev = repo[d].hex()
if v > nullmerge:
newrev = repo[v].hex()
else:
newrev = v
f.write("%s:%s\n" % (oldrev, newrev))
f.close()
repo.ui.debug('rebase status stored\n')
def clearstatus(repo):
'Remove the status files'
util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
def restorestatus(repo):
'Restore a previously stored status'
try:
target = None
collapse = False
external = nullrev
activebookmark = None
state = {}
f = repo.opener("rebasestate")
for i, l in enumerate(f.read().splitlines()):
if i == 0:
originalwd = repo[l].rev()
elif i == 1:
target = repo[l].rev()
elif i == 2:
external = repo[l].rev()
elif i == 3:
collapse = bool(int(l))
elif i == 4:
keep = bool(int(l))
elif i == 5:
keepbranches = bool(int(l))
elif i == 6 and not (len(l) == 81 and ':' in l):
# line 6 is a recent addition, so for backwards compatibility
# check that the line doesn't look like the oldrev:newrev lines
activebookmark = l
else:
oldrev, newrev = l.split(':')
if newrev in (str(nullmerge), str(revignored)):
state[repo[oldrev].rev()] = int(newrev)
else:
state[repo[oldrev].rev()] = repo[newrev].rev()
skipped = set()
# recompute the set of skipped revs
if not collapse:
seen = set([target])
for old, new in sorted(state.items()):
if new != nullrev and new in seen:
skipped.add(old)
seen.add(new)
repo.ui.debug('computed skipped revs: %s\n' % skipped)
repo.ui.debug('rebase status resumed\n')
return (originalwd, target, state, skipped,
collapse, keep, keepbranches, external, activebookmark)
except IOError, err:
if err.errno != errno.ENOENT:
raise
raise util.Abort(_('no rebase in progress'))
def abort(repo, originalwd, target, state):
'Restore the repository to its original state'
dstates = [s for s in state.values() if s != nullrev]
immutable = [d for d in dstates if not repo[d].mutable()]
if immutable:
raise util.Abort(_("can't abort rebase due to immutable changesets %s")
% ', '.join(str(repo[r]) for r in immutable),
hint=_('see hg help phases for details'))
descendants = set()
if dstates:
descendants = set(repo.changelog.descendants(dstates))
if descendants - set(dstates):
repo.ui.warn(_("warning: new changesets detected on target branch, "
"can't abort\n"))
return -1
else:
# Strip from the first rebased revision
merge.update(repo, repo[originalwd].rev(), False, True, False)
rebased = filter(lambda x: x > -1 and x != target, state.values())
if rebased:
strippoints = [c.node() for c in repo.set('roots(%ld)', rebased)]
# no backup of rebased cset versions needed
repair.strip(repo.ui, repo, strippoints)
clearstatus(repo)
repo.ui.warn(_('rebase aborted\n'))
return 0
def buildstate(repo, dest, rebaseset, collapse):
'''Define which revisions are going to be rebased and where
repo: repo
dest: context
rebaseset: set of rev
'''
# This check isn't strictly necessary, since mq detects commits over an
# applied patch. But it prevents messing up the working directory when
# a partially completed rebase is blocked by mq.
if 'qtip' in repo.tags() and (dest.node() in
[s.node for s in repo.mq.applied]):
raise util.Abort(_('cannot rebase onto an applied mq patch'))
roots = list(repo.set('roots(%ld)', rebaseset))
if not roots:
raise util.Abort(_('no matching revisions'))
roots.sort()
state = {}
detachset = set()
for root in roots:
commonbase = root.ancestor(dest)
if commonbase == root:
raise util.Abort(_('source is ancestor of destination'))
if commonbase == dest:
samebranch = root.branch() == dest.branch()
if not collapse and samebranch and root in dest.children():
repo.ui.debug('source is a child of destination\n')
return None
repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots))
state.update(dict.fromkeys(rebaseset, nullrev))
# Rebase tries to turn <dest> into a parent of <root> while
# preserving the number of parents of rebased changesets:
#
# - A changeset with a single parent will always be rebased as a
# changeset with a single parent.
#
# - A merge will be rebased as merge unless its parents are both
# ancestors of <dest> or are themselves in the rebased set and
# pruned while rebased.
#
# If one parent of <root> is an ancestor of <dest>, the rebased
# version of this parent will be <dest>. This is always true with
# --base option.
#
# Otherwise, we need to *replace* the original parents with
# <dest>. This "detaches" the rebased set from its former location
# and rebases it onto <dest>. Changes introduced by ancestors of
# <root> not common with <dest> (the detachset, marked as
# nullmerge) are "removed" from the rebased changesets.
#
# - If <root> has a single parent, set it to <dest>.
#
# - If <root> is a merge, we cannot decide which parent to
# replace, the rebase operation is not clearly defined.
#
# The table below sums up this behavior:
#
# +------------------+----------------------+-------------------------+
# | | one parent | merge |
# +------------------+----------------------+-------------------------+
# | parent in | new parent is <dest> | parents in ::<dest> are |
# | ::<dest> | | remapped to <dest> |
# +------------------+----------------------+-------------------------+
# | unrelated source | new parent is <dest> | ambiguous, abort |
# +------------------+----------------------+-------------------------+
#
# The actual abort is handled by `defineparents`
if len(root.parents()) <= 1:
# ancestors of <root> not ancestors of <dest>
detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
[root.rev()]))
for r in detachset:
if r not in state:
state[r] = nullmerge
if len(roots) > 1:
# If we have multiple roots, we may have "hole" in the rebase set.
# Rebase roots that descend from those "hole" should not be detached as
# other root are. We use the special `revignored` to inform rebase that
# the revision should be ignored but that `defineparents` should search
# a rebase destination that make sense regarding rebased topology.
rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
for ignored in set(rebasedomain) - set(rebaseset):
state[ignored] = revignored
return repo['.'].rev(), dest.rev(), state
def clearrebased(ui, repo, state, skipped, collapsedas=None):
"""dispose of rebased revision at the end of the rebase
If `collapsedas` is not None, the rebase was a collapse whose result if the
`collapsedas` node."""
if obsolete._enabled:
markers = []
for rev, newrev in sorted(state.items()):
if newrev >= 0:
if rev in skipped:
succs = ()
elif collapsedas is not None:
succs = (repo[collapsedas],)
else:
succs = (repo[newrev],)
markers.append((repo[rev], succs))
if markers:
obsolete.createmarkers(repo, markers)
else:
rebased = [rev for rev in state if state[rev] > nullmerge]
if rebased:
stripped = []
for root in repo.set('roots(%ld)', rebased):
if set(repo.changelog.descendants([root.rev()])) - set(state):
ui.warn(_("warning: new changesets detected "
"on source branch, not stripping\n"))
else:
stripped.append(root.node())
if stripped:
# backup the old csets by default
repair.strip(ui, repo, stripped, "all")
def pullrebase(orig, ui, repo, *args, **opts):
'Call rebase after pull if the latter has been invoked with --rebase'
if opts.get('rebase'):
if opts.get('update'):
del opts['update']
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
movemarkfrom = repo['.'].node()
cmdutil.bailifchanged(repo)
revsprepull = len(repo)
origpostincoming = commands.postincoming
def _dummy(*args, **kwargs):
pass
commands.postincoming = _dummy
try:
orig(ui, repo, *args, **opts)
finally:
commands.postincoming = origpostincoming
revspostpull = len(repo)
if revspostpull > revsprepull:
# --rev option from pull conflict with rebase own --rev
# dropping it
if 'rev' in opts:
del opts['rev']
rebase(ui, repo, **opts)
branch = repo[None].branch()
dest = repo[branch].rev()
if dest != repo['.'].rev():
# there was nothing to rebase we force an update
hg.update(repo, dest)
if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
ui.status(_("updating bookmark %s\n")
% repo._bookmarkcurrent)
else:
if opts.get('tool'):
raise util.Abort(_('--tool can only be used with --rebase'))
orig(ui, repo, *args, **opts)
def uisetup(ui):
'Replace pull with a decorator to provide --rebase option'
entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
entry[1].append(('', 'rebase', None,
_("rebase working directory to branch head")))
entry[1].append(('t', 'tool', '',
_("specify merge tool for rebase")))
| apache-2.0 |
40223211/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/atexit.py | 743 | 1049 | """allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
| gpl-3.0 |
pentestfail/TA-FireEye_TAP | bin/ta_fireeye_tap/solnlib/packages/requests/packages/urllib3/util/retry.py | 198 | 9981 | from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| mit |
huggingface/transformers | src/transformers/models/roformer/tokenization_utils.py | 1 | 2651 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization utils for RoFormer."""
from typing import List
from tokenizers import NormalizedString, PreTokenizedString, normalizers
class JiebaPreTokenizer:
def __init__(self, vocab) -> None:
self.vocab = vocab
self.normalizers = normalizers.BertNormalizer(
clean_text=False,
handle_chinese_chars=True,
strip_accents=False,
lowercase=False,
)
try:
import rjieba
except ImportError:
raise ImportError(
"You need to install rjieba to use RoFormerTokenizer."
"See https://pypi.org/project/rjieba/ for installation."
)
self.jieba = rjieba
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# this code slice normalized_string is too slow (6s) but test_alignement_methods can pass
for token, start, end in self.jieba.tokenize(str(normalized_string), hmm=False):
if token in self.vocab:
splits.append(normalized_string[start:end])
else:
token_list = self.normalizers.normalize_str(token).split()
for token in token_list:
if token:
end = start + len(token)
splits.append(normalized_string[start:end])
start = end
# this code test_alignement_methods can't pass but fast (300ms)
# for token in self.jieba.cut(str(normalized_string), False):
# if token in self.vocab:
# splits.append(NormalizedString(token))
# else:
# token_list = self.normalizers.normalize_str(token).split()
# for token in token_list:
# if token:
# splits.append(NormalizedString(token))
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
pretok.split(self.jieba_split)
| apache-2.0 |
hwroitzsch/BikersLifeSaver | lib/python3.5/site-packages/numpy/linalg/info.py | 264 | 1198 | """\
Core Linear Algebra Tools
-------------------------
Linear algebra basics:
- norm Vector or matrix norm
- inv Inverse of a square matrix
- solve Solve a linear system of equations
- det Determinant of a square matrix
- lstsq Solve linear least-squares problem
- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
- matrix_power Integer power of a square matrix
Eigenvalues and decompositions:
- eig Eigenvalues and vectors of a square matrix
- eigh Eigenvalues and eigenvectors of a Hermitian matrix
- eigvals Eigenvalues of a square matrix
- eigvalsh Eigenvalues of a Hermitian matrix
- qr QR decomposition of a matrix
- svd Singular value decomposition of a matrix
- cholesky Cholesky decomposition of a matrix
Tensor operations:
- tensorsolve Solve a linear tensor equation
- tensorinv Calculate an inverse of a tensor
Exceptions:
- LinAlgError Indicates a failed linear algebra operation
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
| mit |
AndroidOpenDevelopment/android_external_skia | platform_tools/android/gyp_gen/generate_user_config.py | 67 | 3795 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Function for generating the SkUserConfig file, customized for Android."""
import os
import shutil
AUTOGEN_WARNING = (
"""
///////////////////////////////////////////////////////////////////////////////
//
// THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
//
// This file contains Skia's upstream include/config/SkUserConfig.h as a
// reference, followed by the actual defines set for Android.
//
///////////////////////////////////////////////////////////////////////////////
"""
)
BUILD_GUARD = 'SkUserConfig_Android_DEFINED'
def generate_user_config(original_sk_user_config, require_sk_user_config,
target_dir, ordered_set):
"""Generate the SkUserConfig file specific to the Android framework.
Android needs its #defines in its skia/include/core directory, so that other
libraries which use Skia's headers get the right definitions. This function
takes the existing sample version of SkUserConfig, checked into Skia, and
appends the defines from ordered_set, which is expected to be a
vars_dict_lib.OrderedSet containing the defines. The result is written to
target_dir/SkUserConfig.h
Args:
original_sk_user_config: Path to original SkUserConfig.h
require_sk_user_config: If True, raise an AssertionError if
SkUserConfig.h does not exist. Either way, if it does exist, copy it
into the new file.
target_dir: Directory within which the modified SkUserConfig.h will be
written. Its name will be the same basename as
original_sk_user_config. If None, the new file will be written to the
working directory.
ordered_set: A vars_dict_lib.OrderedSet, containing a list of defines to
be appended to SkUserConfig.
Raises:
AssertionError: If original_sk_user_config does not exist.
"""
sk_user_config_exists = os.path.exists(original_sk_user_config)
if require_sk_user_config:
assert sk_user_config_exists
dst_filename = os.path.basename(original_sk_user_config)
if target_dir:
dst_filename = os.path.join(target_dir, dst_filename)
with open(dst_filename, 'w') as dst:
dst.write(AUTOGEN_WARNING)
# Copy the original exactly. This is merely for reference. Many of the
# defines written to the file below, either manually or generated from the
# gyp files, have explanations in the original SkUserConfig.h
if sk_user_config_exists:
with open(original_sk_user_config, 'r') as original:
shutil.copyfileobj(original, dst)
# Now add the defines specific to Android. Write a custom build guard to
# ensure they don't get defined more than once.
dst.write('\n// Android defines:\n')
dst.write('#ifndef ' + BUILD_GUARD + '\n')
dst.write('#define ' + BUILD_GUARD + '\n')
# Add conditional defines manually:
# do this build check for other tools that still read this header
dst.write('#ifdef ANDROID\n')
dst.write(' #include <utils/misc.h>\n')
dst.write('#endif\n\n')
dst.write('#if __BYTE_ORDER == __BIG_ENDIAN\n')
dst.write(' #define SK_CPU_BENDIAN\n')
dst.write(' #undef SK_CPU_LENDIAN\n')
dst.write('#else\n')
dst.write(' #define SK_CPU_LENDIAN\n')
dst.write(' #undef SK_CPU_BENDIAN\n')
dst.write('#endif\n\n')
# Now add the defines from the gyp files.
for item in ordered_set:
# Although our defines may have '=' in them, when written to the header
# there should be a space between the macro and what it replaces.
dst.write('#define ' + item.replace('=', ' ') + '\n')
dst.write('\n#endif // ' + BUILD_GUARD + '\n')
| bsd-3-clause |
Lx37/pyqtgraph | examples/ViewBox.py | 6 | 2787 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
ViewBox is the general-purpose graphical container that allows the user to
zoom / pan to inspect any area of a 2D coordinate system.
This unimaginative example demonstrates the constrution of a ViewBox-based
plot area with axes, very similar to the way PlotItem is built.
"""
## Add path to library (just for examples; you do not need this)
import initExample
## This example uses a ViewBox to create a PlotWidget-like interface
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.setWindowTitle('pyqtgraph example: ViewBox')
mw.show()
mw.resize(800, 600)
gv = pg.GraphicsView()
mw.setCentralWidget(gv)
l = QtGui.QGraphicsGridLayout()
l.setHorizontalSpacing(0)
l.setVerticalSpacing(0)
vb = pg.ViewBox()
p1 = pg.PlotDataItem()
vb.addItem(p1)
## Just something to play with inside the ViewBox
class movableRect(QtGui.QGraphicsRectItem):
def __init__(self, *args):
QtGui.QGraphicsRectItem.__init__(self, *args)
self.setAcceptHoverEvents(True)
def hoverEnterEvent(self, ev):
self.savedPen = self.pen()
self.setPen(QtGui.QPen(QtGui.QColor(255, 255, 255)))
ev.ignore()
def hoverLeaveEvent(self, ev):
self.setPen(self.savedPen)
ev.ignore()
def mousePressEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton:
ev.accept()
self.pressDelta = self.mapToParent(ev.pos()) - self.pos()
else:
ev.ignore()
def mouseMoveEvent(self, ev):
self.setPos(self.mapToParent(ev.pos()) - self.pressDelta)
rect = movableRect(QtCore.QRectF(0, 0, 1, 1))
rect.setPen(QtGui.QPen(QtGui.QColor(100, 200, 100)))
vb.addItem(rect)
l.addItem(vb, 0, 1)
gv.centralWidget.setLayout(l)
xScale = pg.AxisItem(orientation='bottom', linkView=vb)
l.addItem(xScale, 1, 1)
yScale = pg.AxisItem(orientation='left', linkView=vb)
l.addItem(yScale, 0, 0)
xScale.setLabel(text="<span style='color: #ff0000; font-weight: bold'>X</span> <i>Axis</i>", units="s")
yScale.setLabel('Y Axis', units='V')
def rand(n):
data = np.random.random(n)
data[int(n*0.1):int(n*0.13)] += .5
data[int(n*0.18)] += 2
data[int(n*0.1):int(n*0.13)] *= 5
data[int(n*0.18)] *= 20
return data, np.arange(n, n+len(data)) / float(n)
def updateData():
yd, xd = rand(10000)
p1.setData(y=yd, x=xd)
yd, xd = rand(10000)
updateData()
vb.autoRange()
t = QtCore.QTimer()
t.timeout.connect(updateData)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
veo-labs/nose-progressive | noseprogressive/plugin.py | 4 | 10478 | from functools import partial
from os import getcwd
import pdb
import sys
from warnings import warn
from nose.plugins import Plugin
from noseprogressive.runner import ProgressiveRunner
from noseprogressive.tracebacks import DEFAULT_EDITOR_SHORTCUT_TEMPLATE
from noseprogressive.wrapping import cmdloop, set_trace, StreamWrapper
class ProgressivePlugin(Plugin):
"""A nose plugin which has a progress bar and formats tracebacks for humans"""
name = 'progressive'
_totalTests = 0
score = 10000 # Grab stdout and stderr before the capture plugin.
def __init__(self, *args, **kwargs):
super(ProgressivePlugin, self).__init__(*args, **kwargs)
# Same wrapping pattern as the built-in capture plugin. The lists
# shouldn't be necessary, but they don't cost much, and I have to
# wonder why capture uses them.
self._stderr, self._stdout, self._set_trace, self._cmdloop = \
[], [], [], []
def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd()
def finalize(self, result):
"""Put monkeypatches back as we found them."""
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop()
def options(self, parser, env):
super(ProgressivePlugin, self).options(parser, env)
parser.add_option('--progressive-editor',
type='string',
dest='editor',
default=env.get('NOSE_PROGRESSIVE_EDITOR',
env.get('EDITOR', 'vi')),
help='The editor to use for the shortcuts in '
'tracebacks. Defaults to the value of $EDITOR '
'and then "vi". [NOSE_PROGRESSIVE_EDITOR]')
parser.add_option('--progressive-abs',
action='store_true',
dest='absolute_paths',
default=env.get('NOSE_PROGRESSIVE_ABSOLUTE_PATHS', False),
help='Display paths in traceback as absolute, '
'rather than relative to the current working '
'directory. [NOSE_PROGRESSIVE_ABSOLUTE_PATHS]')
parser.add_option('--progressive-advisories',
action='store_true',
dest='show_advisories',
default=env.get('NOSE_PROGRESSIVE_ADVISORIES', False),
help='Show skips and deprecation exceptions in '
'addition to failures and errors. '
'[NOSE_PROGRESSIVE_ADVISORIES]')
parser.add_option('--progressive-with-styling',
action='store_true',
dest='with_styling',
default=env.get('NOSE_PROGRESSIVE_WITH_STYLING', False),
help='nose-progressive automatically omits bold and '
'color formatting when its output is directed '
'to a non-terminal. Specifying '
'--progressive-with-styling forces such '
'styling to be output regardless. '
'[NOSE_PROGRESSIVE_WITH_STYLING]')
parser.add_option('--progressive-with-bar',
action='store_true',
dest='with_bar',
default=env.get('NOSE_PROGRESSIVE_WITH_BAR', False),
help='nose-progressive automatically omits the '
'progress bar when its output is directed to a '
'non-terminal. Specifying '
'--progressive-with-bar forces the bar to be '
'output regardless. This option implies '
'--progressive-with-styling. '
'[NOSE_PROGRESSIVE_WITH_BAR]')
parser.add_option('--progressive-function-color',
type='int',
dest='function_color',
default=env.get('NOSE_PROGRESSIVE_FUNCTION_COLOR', 12),
help='Color of function names in tracebacks. An '
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_FUNCTION_COLOR]')
parser.add_option('--progressive-dim-color',
type='int',
dest='dim_color',
default=env.get('NOSE_PROGRESSIVE_DIM_COLOR', 8),
help='Color of de-emphasized text (like editor '
'shortcuts) in tracebacks. An ANSI color '
'expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_DIM_COLOR]')
parser.add_option('--progressive-bar-filled-color',
type='int',
dest='bar_filled_color',
default=env.get('NOSE_PROGRESSIVE_BAR_FILLED_COLOR', 8),
help="Color of the progress bar's filled portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_FILLED_COLOR]')
parser.add_option('--progressive-bar-empty-color',
type='int',
dest='bar_empty_color',
default=env.get('NOSE_PROGRESSIVE_BAR_EMPTY_COLOR', 7),
help="Color of the progress bar's empty portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_EMPTY_COLOR]')
parser.add_option('--progressive-editor-shortcut-template',
type='string',
dest='editor_shortcut_template',
default=env.get(
'NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE',
DEFAULT_EDITOR_SHORTCUT_TEMPLATE),
help='A str.format() template for the non-code lines'
' of the traceback. '
'[NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE]')
def configure(self, options, conf):
"""Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
"""
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True
def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkeypatch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
"""
def capture_suite(orig_method, *args, **kwargs):
"""Intercept calls to the loader before they get lazy.
Re-execute them to grab a copy of the possibly lazy suite, and
count the tests therein.
"""
self._totalTests += orig_method(*args, **kwargs).countTestCases()
# Clear out the loader's cache. Otherwise, it never finds any tests
# for the actual test run:
loader._visitedPaths = set()
return orig_method(*args, **kwargs)
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
if hasattr(loader, 'loadTestsFromNames'):
loader.loadTestsFromNames = partial(capture_suite,
loader.loadTestsFromNames)
def prepareTestRunner(self, runner):
"""Replace TextTestRunner with something that prints fewer dots."""
return ProgressiveRunner(self._cwd,
self._totalTests,
runner.stream,
verbosity=self.conf.verbosity,
config=self.conf) # So we don't get a default
# NoPlugins manager
def prepareTestResult(self, result):
"""Hang onto the progress bar so the StreamWrappers can grab it."""
self.bar = result.bar
| mit |
janslow/boto | tests/integration/gs/test_resumable_uploads.py | 101 | 25789 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests of Google Cloud Storage resumable uploads.
"""
import StringIO
import errno
import random
import os
import time
import boto
from boto import storage_uri
from boto.gs.resumable_upload_handler import ResumableUploadHandler
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from cb_test_harness import CallbackTestHarness
from tests.integration.gs.testcase import GSTestCase
SMALL_KEY_SIZE = 2 * 1024 # 2 KB.
LARGE_KEY_SIZE = 500 * 1024 # 500 KB.
LARGEST_KEY_SIZE = 1024 * 1024 # 1 MB.
class ResumableUploadTests(GSTestCase):
"""Resumable upload test suite."""
def build_input_file(self, size):
buf = []
# I manually construct the random data here instead of calling
# os.urandom() because I want to constrain the range of data (in
# this case to 0'..'9') so the test
# code can easily overwrite part of the StringIO file with
# known-to-be-different values.
for i in range(size):
buf.append(str(random.randint(0, 9)))
file_as_string = ''.join(buf)
return (file_as_string, StringIO.StringIO(file_as_string))
def make_small_file(self):
return self.build_input_file(SMALL_KEY_SIZE)
def make_large_file(self):
return self.build_input_file(LARGE_KEY_SIZE)
def make_tracker_file(self, tmpdir=None):
if not tmpdir:
tmpdir = self._MakeTempDir()
tracker_file = os.path.join(tmpdir, 'tracker')
return tracker_file
def test_non_resumable_upload(self):
"""
Tests that non-resumable uploads work
"""
small_src_file_as_string, small_src_file = self.make_small_file()
# Seek to end incase its the first test.
small_src_file.seek(0, os.SEEK_END)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(small_src_file)
self.fail("should fail as need to rewind the filepointer")
except AttributeError:
pass
# Now try calling with a proper rewind.
dst_key.set_contents_from_file(small_src_file, rewind=True)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_upload_without_persistent_tracker(self):
"""
Tests a single resumable upload, with no tracker URI persistence
"""
res_upload_handler = ResumableUploadHandler()
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_failed_upload_with_persistent_tracker(self):
"""
Tests that failed resumable upload leaves a correct tracker URI file
"""
harness = CallbackTestHarness()
tracker_file_name = self.make_tracker_file()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=0)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException, e:
# We'll get a ResumableUploadException at this point because
# of CallbackTestHarness (above). Check that the tracker file was
# created correctly.
self.assertEqual(e.disposition,
ResumableTransferDisposition.ABORT_CUR_PROCESS)
self.assertTrue(os.path.exists(tracker_file_name))
f = open(tracker_file_name)
uri_from_file = f.readline().strip()
f.close()
self.assertEqual(uri_from_file,
res_upload_handler.get_tracker_uri())
def test_retryable_exception_recovery(self):
"""
Tests handling of a retryable exception
"""
# Test one of the RETRYABLE_EXCEPTIONS.
exception = ResumableUploadHandler.RETRYABLE_EXCEPTIONS[0]
harness = CallbackTestHarness(exception=exception)
res_upload_handler = ResumableUploadHandler(num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_broken_pipe_recovery(self):
"""
Tests handling of a Broken Pipe (which interacts with an httplib bug)
"""
exception = IOError(errno.EPIPE, "Broken pipe")
harness = CallbackTestHarness(exception=exception)
res_upload_handler = ResumableUploadHandler(num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_non_retryable_exception_handling(self):
"""
Tests a resumable upload that fails with a non-retryable exception
"""
harness = CallbackTestHarness(
exception=OSError(errno.EACCES, 'Permission denied'))
res_upload_handler = ResumableUploadHandler(num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected OSError')
except OSError, e:
# Ensure the error was re-raised.
self.assertEqual(e.errno, 13)
def test_failed_and_restarted_upload_with_persistent_tracker(self):
"""
Tests resumable upload that fails once and then completes, with tracker
file
"""
harness = CallbackTestHarness()
tracker_file_name = self.make_tracker_file()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=1)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
# Ensure tracker file deleted.
self.assertFalse(os.path.exists(tracker_file_name))
def test_multiple_in_process_failures_then_succeed(self):
"""
Tests resumable upload that fails twice in one process, then completes
"""
res_upload_handler = ResumableUploadHandler(num_retries=3)
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_multiple_in_process_failures_then_succeed_with_tracker_file(self):
"""
Tests resumable upload that fails completely in one process,
then when restarted completes, using a tracker file
"""
# Set up test harness that causes more failures than a single
# ResumableUploadHandler instance will handle, writing enough data
# before the first failure that some of it survives that process run.
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2, num_times_to_fail=2)
tracker_file_name = self.make_tracker_file()
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=1)
larger_src_file_as_string, larger_src_file = self.make_large_file()
larger_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException, e:
self.assertEqual(e.disposition,
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Ensure a tracker file survived.
self.assertTrue(os.path.exists(tracker_file_name))
# Try it one more time; this time should succeed.
larger_src_file.seek(0)
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.assertEqual(LARGE_KEY_SIZE, dst_key.size)
self.assertEqual(larger_src_file_as_string,
dst_key.get_contents_as_string())
self.assertFalse(os.path.exists(tracker_file_name))
# Ensure some of the file was uploaded both before and after failure.
self.assertTrue(len(harness.transferred_seq_before_first_failure) > 1
and
len(harness.transferred_seq_after_first_failure) > 1)
def test_upload_with_inital_partial_upload_before_failure(self):
"""
Tests resumable upload that successfully uploads some content
before it fails, then restarts and completes
"""
# Set up harness to fail upload after several hundred KB so upload
# server will have saved something before we retry.
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2)
res_upload_handler = ResumableUploadHandler(num_retries=1)
larger_src_file_as_string, larger_src_file = self.make_large_file()
larger_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
# Ensure uploaded object has correct content.
self.assertEqual(LARGE_KEY_SIZE, dst_key.size)
self.assertEqual(larger_src_file_as_string,
dst_key.get_contents_as_string())
# Ensure some of the file was uploaded both before and after failure.
self.assertTrue(len(harness.transferred_seq_before_first_failure) > 1
and
len(harness.transferred_seq_after_first_failure) > 1)
def test_empty_file_upload(self):
"""
Tests uploading an empty file (exercises boundary conditions).
"""
res_upload_handler = ResumableUploadHandler()
empty_src_file = StringIO.StringIO('')
empty_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
empty_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(0, dst_key.size)
def test_upload_retains_metadata(self):
"""
Tests that resumable upload correctly sets passed metadata
"""
res_upload_handler = ResumableUploadHandler()
headers = {'Content-Type' : 'text/plain', 'x-goog-meta-abc' : 'my meta',
'x-goog-acl' : 'public-read'}
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, headers=headers,
res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
dst_key.open_read()
self.assertEqual('text/plain', dst_key.content_type)
self.assertTrue('abc' in dst_key.metadata)
self.assertEqual('my meta', str(dst_key.metadata['abc']))
acl = dst_key.get_acl()
for entry in acl.entries.entry_list:
if str(entry.scope) == '<AllUsers>':
self.assertEqual('READ', str(acl.entries.entry_list[1].permission))
return
self.fail('No <AllUsers> scope found')
def test_upload_with_file_size_change_between_starts(self):
"""
Tests resumable upload on a file that changes sizes between initial
upload start and restart
"""
harness = CallbackTestHarness(
fail_after_n_bytes=LARGE_KEY_SIZE/2)
tracker_file_name = self.make_tracker_file()
# Set up first process' ResumableUploadHandler not to do any
# retries (initial upload request will establish expected size to
# upload server).
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name, num_retries=0)
larger_src_file_as_string, larger_src_file = self.make_large_file()
larger_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
larger_src_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException, e:
# First abort (from harness-forced failure) should be
# ABORT_CUR_PROCESS.
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Ensure a tracker file survived.
self.assertTrue(os.path.exists(tracker_file_name))
# Try it again, this time with different size source file.
# Wait 1 second between retry attempts, to give upload server a
# chance to save state so it can respond to changed file size with
# 500 response in the next attempt.
time.sleep(1)
try:
largest_src_file = self.build_input_file(LARGEST_KEY_SIZE)[1]
largest_src_file.seek(0)
dst_key.set_contents_from_file(
largest_src_file, res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException, e:
# This abort should be a hard abort (file size changing during
# transfer).
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(e.message.find('file size changed'), -1, e.message)
def test_upload_with_file_size_change_during_upload(self):
"""
Tests resumable upload on a file that changes sizes while upload
in progress
"""
# Create a file we can change during the upload.
test_file_size = 500 * 1024 # 500 KB.
test_file = self.build_input_file(test_file_size)[1]
harness = CallbackTestHarness(fp_to_change=test_file,
fp_change_pos=test_file_size)
res_upload_handler = ResumableUploadHandler(num_retries=1)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
test_file, cb=harness.call,
res_upload_handler=res_upload_handler)
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException, e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('File changed during upload'), -1)
def test_upload_with_file_content_change_during_upload(self):
"""
Tests resumable upload on a file that changes one byte of content
(so, size stays the same) while upload in progress.
"""
def Execute():
res_upload_handler = ResumableUploadHandler(num_retries=1)
dst_key = self._MakeKey(set_contents=False)
bucket_uri = storage_uri('gs://' + dst_key.bucket.name)
dst_key_uri = bucket_uri.clone_replace_name(dst_key.name)
try:
dst_key.set_contents_from_file(
test_file, cb=harness.call,
res_upload_handler=res_upload_handler)
return False
except ResumableUploadException, e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
# Ensure the file size didn't change.
test_file.seek(0, os.SEEK_END)
self.assertEqual(test_file_size, test_file.tell())
self.assertNotEqual(
e.message.find('md5 signature doesn\'t match etag'), -1)
# Ensure the bad data wasn't left around.
try:
dst_key_uri.get_key()
self.fail('Did not get expected InvalidUriError')
except InvalidUriError, e:
pass
return True
test_file_size = 500 * 1024 # 500 KB
# The sizes of all the blocks written, except the final block, must be a
# multiple of 256K bytes. We need to trigger a failure after the first
# 256K bytes have been uploaded so that at least one block of data is
# written on the server.
# See https://developers.google.com/storage/docs/concepts-techniques#resumable
# for more information about chunking of uploads.
n_bytes = 300 * 1024 # 300 KB
delay = 0
# First, try the test without a delay. If that fails, try it with a
# 15-second delay. The first attempt may fail to recognize that the
# server has a block if the server hasn't yet committed that block
# when we resume the transfer. This would cause a restarted upload
# instead of a resumed upload.
for attempt in range(2):
test_file = self.build_input_file(test_file_size)[1]
harness = CallbackTestHarness(
fail_after_n_bytes=n_bytes,
fp_to_change=test_file,
# Write to byte 1, as the CallbackTestHarness writes
# 3 bytes. This will result in the data on the server
# being different than the local file.
fp_change_pos=1,
delay_after_change=delay)
if Execute():
break
if (attempt == 0 and
0 in harness.transferred_seq_after_first_failure):
# We can confirm the upload was restarted instead of resumed
# by determining if there is an entry of 0 in the
# transferred_seq_after_first_failure list.
# In that case, try again with a 15 second delay.
delay = 15
continue
self.fail('Did not get expected ResumableUploadException')
def test_upload_with_content_length_header_set(self):
"""
Tests resumable upload on a file when the user supplies a
Content-Length header. This is used by gsutil, for example,
to set the content length when gzipping a file.
"""
res_upload_handler = ResumableUploadHandler()
small_src_file_as_string, small_src_file = self.make_small_file()
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
try:
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler,
headers={'Content-Length' : SMALL_KEY_SIZE})
self.fail('Did not get expected ResumableUploadException')
except ResumableUploadException, e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('Attempt to specify Content-Length header'), -1)
def test_upload_with_syntactically_invalid_tracker_uri(self):
"""
Tests resumable upload with a syntactically invalid tracker URI
"""
tmp_dir = self._MakeTempDir()
syntactically_invalid_tracker_file_name = os.path.join(tmp_dir,
'synt_invalid_uri_tracker')
with open(syntactically_invalid_tracker_file_name, 'w') as f:
f.write('ftp://example.com')
res_upload_handler = ResumableUploadHandler(
tracker_file_name=syntactically_invalid_tracker_file_name)
small_src_file_as_string, small_src_file = self.make_small_file()
# An error should be printed about the invalid URI, but then it
# should run the update successfully.
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
def test_upload_with_invalid_upload_id_in_tracker_file(self):
"""
Tests resumable upload with invalid upload ID
"""
invalid_upload_id = ('http://pub.storage.googleapis.com/?upload_id='
'AyzB2Uo74W4EYxyi5dp_-r68jz8rtbvshsv4TX7srJVkJ57CxTY5Dw2')
tmpdir = self._MakeTempDir()
invalid_upload_id_tracker_file_name = os.path.join(tmpdir,
'invalid_upload_id_tracker')
with open(invalid_upload_id_tracker_file_name, 'w') as f:
f.write(invalid_upload_id)
res_upload_handler = ResumableUploadHandler(
tracker_file_name=invalid_upload_id_tracker_file_name)
small_src_file_as_string, small_src_file = self.make_small_file()
# An error should occur, but then the tracker URI should be
# regenerated and the the update should succeed.
small_src_file.seek(0)
dst_key = self._MakeKey(set_contents=False)
dst_key.set_contents_from_file(
small_src_file, res_upload_handler=res_upload_handler)
self.assertEqual(SMALL_KEY_SIZE, dst_key.size)
self.assertEqual(small_src_file_as_string,
dst_key.get_contents_as_string())
self.assertNotEqual(invalid_upload_id,
res_upload_handler.get_tracker_uri())
def test_upload_with_unwritable_tracker_file(self):
"""
Tests resumable upload with an unwritable tracker file
"""
# Make dir where tracker_file lives temporarily unwritable.
tmp_dir = self._MakeTempDir()
tracker_file_name = self.make_tracker_file(tmp_dir)
save_mod = os.stat(tmp_dir).st_mode
try:
os.chmod(tmp_dir, 0)
res_upload_handler = ResumableUploadHandler(
tracker_file_name=tracker_file_name)
except ResumableUploadException, e:
self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
self.assertNotEqual(
e.message.find('Couldn\'t write URI tracker file'), -1)
finally:
# Restore original protection of dir where tracker_file lives.
os.chmod(tmp_dir, save_mod)
| mit |
rdonnelly/ultimate-league-app | src/ultimate/leagues/migrations/0017_auto_20200204_2133.py | 2 | 10389 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-02-04 21:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leagues', '0016_fieldnames_hidden'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='code',
field=models.CharField(blank=True, help_text='Leaving this field empty will generate a random code.', max_length=30, unique=True),
),
migrations.AlterField(
model_name='coupon',
name='note',
field=models.TextField(blank=True, help_text='What is the coupon for?'),
),
migrations.AlterField(
model_name='coupon',
name='type',
field=models.CharField(choices=[('full', 'Full Value'), ('percentage', 'Percentage'), ('amount', 'Amount')], max_length=20),
),
migrations.AlterField(
model_name='coupon',
name='use_count',
field=models.IntegerField(default=0, help_text='How many times the coupon has been used'),
),
migrations.AlterField(
model_name='coupon',
name='use_limit',
field=models.IntegerField(default=1, help_text='How many uses the coupon should have'),
),
migrations.AlterField(
model_name='coupon',
name='valid_until',
field=models.DateTimeField(blank=True, help_text='Leave empty for coupons that never expire', null=True),
),
migrations.AlterField(
model_name='field',
name='type',
field=models.CharField(choices=[('indoor', 'Indoor'), ('outdoor', 'Outdoor')], max_length=32),
),
migrations.AlterField(
model_name='fieldnames',
name='type',
field=models.CharField(choices=[('grass', 'Grass'), ('turf', 'Turf')], max_length=32),
),
migrations.AlterField(
model_name='league',
name='baggage',
field=models.IntegerField(help_text='max group size'),
),
migrations.AlterField(
model_name='league',
name='captaining_note',
field=models.TextField(blank=True, help_text='note for captaining, typically captain meeting date and time'),
),
migrations.AlterField(
model_name='league',
name='check_cost_increase',
field=models.IntegerField(help_text='amount to be added to paypal_cost if paying by check'),
),
migrations.AlterField(
model_name='league',
name='detailed_info',
field=models.TextField(help_text='details page text, use HTML'),
),
migrations.AlterField(
model_name='league',
name='division_captains_email',
field=models.CharField(blank=True, help_text='email address for league captains', max_length=64, null=True),
),
migrations.AlterField(
model_name='league',
name='division_email',
field=models.CharField(blank=True, help_text='email address for just this league', max_length=64, null=True),
),
migrations.AlterField(
model_name='league',
name='end_time',
field=models.TimeField(help_text='end time for league', null=True),
),
migrations.AlterField(
model_name='league',
name='fields',
field=models.ManyToManyField(help_text='Select the fields these games will be played at, use the green "+" icon if we\'re playing at a new field.', through='leagues.LeagueFields', to='leagues.Field'),
),
migrations.AlterField(
model_name='league',
name='gender',
field=models.CharField(choices=[('capandfill', 'Cap-and-Fill'), ('corec', 'Co-Rec'), ('mens', "Men's"), ('mixed', 'Mixed'), ('open', 'Open'), ('womens', "Women's")], max_length=32),
),
migrations.AlterField(
model_name='league',
name='group_lock_start_date',
field=models.DateTimeField(help_text='date and time that groups are locked'),
),
migrations.AlterField(
model_name='league',
name='late_cost_increase',
field=models.IntegerField(help_text='amount to be added to paypal_cost if paying after price_increase_start_date'),
),
migrations.AlterField(
model_name='league',
name='league_end_date',
field=models.DateField(help_text='date of last game'),
),
migrations.AlterField(
model_name='league',
name='league_start_date',
field=models.DateField(help_text='date of first game'),
),
migrations.AlterField(
model_name='league',
name='level',
field=models.CharField(choices=[('comp', 'Competitive'), ('rec', 'Recreational'), ('youth', 'Youth')], max_length=32),
),
migrations.AlterField(
model_name='league',
name='mail_check_address',
field=models.TextField(help_text='treasurer mailing address'),
),
migrations.AlterField(
model_name='league',
name='max_players',
field=models.IntegerField(help_text='max players for league, extra registrations will be placed on waitlist'),
),
migrations.AlterField(
model_name='league',
name='min_age',
field=models.IntegerField(default=0, help_text='minimum age (in years)'),
),
migrations.AlterField(
model_name='league',
name='night',
field=models.CharField(help_text='lower case, no special characters, e.g. "sunday", "tuesday and thursday", "end of season tournament"', max_length=32),
),
migrations.AlterField(
model_name='league',
name='num_games_per_week',
field=models.IntegerField(default=1, help_text='number of games per week, used to calculate number of games for a league'),
),
migrations.AlterField(
model_name='league',
name='num_skip_weeks',
field=models.IntegerField(default=0, help_text='number of weeks skipped, e.g. skipping 4th of July'),
),
migrations.AlterField(
model_name='league',
name='num_time_slots',
field=models.IntegerField(default=1, help_text='number of time slots'),
),
migrations.AlterField(
model_name='league',
name='paypal_cost',
field=models.IntegerField(help_text='base cost of league if paying by PayPal'),
),
migrations.AlterField(
model_name='league',
name='price_increase_start_date',
field=models.DateTimeField(help_text='date and time when cost increases for league'),
),
migrations.AlterField(
model_name='league',
name='reg_start_date',
field=models.DateTimeField(help_text='date and time that registration process is open (not currently automated)'),
),
migrations.AlterField(
model_name='league',
name='registration_prompt',
field=models.TextField(blank=True, help_text='prompt to show during registration, e.g. to collect data around format preference'),
),
migrations.AlterField(
model_name='league',
name='schedule_note',
field=models.TextField(blank=True, help_text='note to appear under the schedule'),
),
migrations.AlterField(
model_name='league',
name='start_time',
field=models.TimeField(help_text='start time for league', null=True),
),
migrations.AlterField(
model_name='league',
name='state',
field=models.CharField(choices=[('cancelled', 'Cancelled - visible to all, registration closed to all'), ('closed', 'Closed - visible to all, registration closed to all'), ('hidden', 'Hidden - hidden to all, registration closed to all'), ('open', 'Open - visible to all, registration conditionally open to all'), ('preview', 'Preview - visible only to admins, registration conditionally open only to admins')], help_text='state of league, changes whether registration is open or league is visible', max_length=32),
),
migrations.AlterField(
model_name='league',
name='summary_info',
field=models.TextField(help_text='notes for league, e.g. 50-50 league format, showcase league notes'),
),
migrations.AlterField(
model_name='league',
name='tagline',
field=models.TextField(blank=True, help_text='short tagline for description fields, e.g. SEO, Facebook, etc.'),
),
migrations.AlterField(
model_name='league',
name='times',
field=models.TextField(help_text='start to end time, e.g. 6:00-8:00pm'),
),
migrations.AlterField(
model_name='league',
name='type',
field=models.CharField(choices=[('event', 'Event'), ('league', 'League'), ('tournament', 'Tournament')], max_length=32),
),
migrations.AlterField(
model_name='league',
name='waitlist_start_date',
field=models.DateTimeField(help_text='date and time that waitlist is started (regardless of number of registrations)'),
),
migrations.AlterField(
model_name='league',
name='year',
field=models.IntegerField(help_text='four digit year, e.g. 2013'),
),
migrations.AlterField(
model_name='registrations',
name='pay_type',
field=models.CharField(blank=True, choices=[('check', 'Check'), ('paypal', 'PayPal')], max_length=6, null=True),
),
migrations.AlterField(
model_name='registrations',
name='prompt_response',
field=models.CharField(blank=True, help_text='response to the registration prompt for a division', max_length=255, null=True),
),
]
| bsd-3-clause |
EdgarSun/Django-Demo | django/conf/locale/it/formats.py | 232 | 1838 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i:s' # 14:30:59
DATETIME_FORMAT = 'l d F Y H:i:s' # Mercoledì 25 Ottobre 2006 14:30:59
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/M/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/M/Y H:i:s' # 25/10/2009 14:30:59
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%Y/%m/%d', # '2008-10-25', '2008/10/25'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2006', '25/10/2006'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.