max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/test/match_pattern.py | elsid/master | 0 | 12787751 | # coding: utf-8
from os.path import dirname, realpath, join
from subprocess import check_output
from hamcrest import assert_that, equal_to
BASE_DIR = dirname(realpath(__file__))
DATA_DIR = join(BASE_DIR, 'data')
MODEL_DIR = join(DATA_DIR, 'model')
PATTERN_DIR = join(DATA_DIR, 'pattern')
MATCH_DIR = join(DATA_DIR, 'match')
MATCH_PATTERN_DIR = join(BASE_DIR, '../match_pattern')
PATTERN_MODEL = join(MATCH_PATTERN_DIR, 'pattern_model.py')
MATCH_PATTERN = join(MATCH_PATTERN_DIR, 'match_pattern.py')
def make_pattern_model(name):
model = check_output(['python2', PATTERN_MODEL, name])
pattern_path = join(PATTERN_DIR, name + '.yaml')
open(pattern_path, 'w').write(model)
return pattern_path
def match_pattern(target, pattern, limit=None):
command = ['python2', MATCH_PATTERN]
if limit:
command += ['-l', str(limit)]
return check_output(command + [target, pattern])
def load_match_result(name):
return open(join(MATCH_DIR, name + '.log')).read()
def base_test_match_pattern(name, target_name, pattern_name, limit=None):
pattern_path = make_pattern_model(pattern_name)
target_path = join(MODEL_DIR, target_name + '.yaml')
match_result = match_pattern(target_path, pattern_path, limit)
assert_that(match_result, equal_to(load_match_result(name)))
def test_match_empty_in_empty():
base_test_match_pattern('empty', 'empty', 'Empty')
def test_match_base_derived():
base_test_match_pattern('base_derived_in_extends', 'extends', 'BaseDerived')
def test_match_overridden_method_call():
base_test_match_pattern('overridden_method_call', 'overridden_method_call',
'OverriddenMethodCall')
def test_match_all_base_derived_in_hierarchy():
base_test_match_pattern('all_base_derived_in_hierarchy', 'hierarchy',
'BaseDerived')
def test_match_one_base_derived_in_hierarchy():
base_test_match_pattern('one_base_derived_in_hierarchy', 'hierarchy',
'BaseDerived', 1)
def test_match_three_base_derived_in_hierarchy():
base_test_match_pattern('three_base_derived_in_hierarchy', 'hierarchy',
'BaseDerived', 3)
| 2.234375 | 2 |
test/IECoreMaya/FnSceneShapeTest.py | bradleyhenke/cortex | 386 | 12787752 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import maya.cmds
import imath
import IECore
import IECoreScene
import IECoreMaya
class FnSceneShapeTest( IECoreMaya.TestCase ) :
__testFile = "test/test.scc"
def setUp( self ) :
scene = IECoreScene.SceneCache( FnSceneShapeTest.__testFile, IECore.IndexedIO.OpenMode.Write )
sc = scene.createChild( str(1) )
mesh = IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1)))
mesh["Cd"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ imath.V3f( 1, 0, 0 ) ] * 6 ) )
sc.writeObject( mesh, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 1, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
sc = sc.createChild( "child" )
mesh = IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1)))
mesh["Cd"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ imath.V3f( 0, 1, 0 ) ] * 6 ) )
sc.writeObject( mesh, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 2, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
sc = sc.createChild( str( 3 ) )
mesh = IECoreScene.MeshPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1)))
mesh["Cd"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ imath.V3f( 0, 0, 1 ) ] * 6 ) )
sc.writeObject( mesh, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 3, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
return scene
def __setupTableProp( self ):
boxSize = imath.Box3f( imath.V3f( -.5, -.5, -.5 ), imath.V3f( .5, .5, .5 ) )
table = IECoreScene.SceneCache( FnSceneShapeTest.__testFile, IECore.IndexedIO.Write )
table.writeAttribute( 'scene:visible', IECore.BoolData( True ), 0 )
table.writeAttribute( 'user:testBool', IECore.BoolData( True ), 0 )
table.writeAttribute( 'user:testShort', IECore.ShortData( 2 ), 0 )
table.writeAttribute( 'user:testInt', IECore.IntData( 3 ), 0 )
table.writeAttribute( 'user:testInt64', IECore.Int64Data( 4 ), 0 )
table.writeAttribute( 'user:testFloat', IECore.FloatData( 5 ), 0 )
table.writeAttribute( 'user:testDouble', IECore.DoubleData( 6 ), 0 )
table.writeAttribute( 'user:testString', IECore.StringData( 'seven' ), 0 )
mat = imath.M44d( ( 8, 9, 10, 11 ), ( 12, 13, 14, 15 ), ( 16, 17, 18, 19 ), ( 20, 21, 22, 23 ) )
table.writeAttribute( 'user:testMatrixd', IECore.M44dData(mat), 0 )
mat = imath.M44f( ( 24, 25, 26, 27 ), ( 28, 29, 30, 31 ), ( 32, 33, 34, 35 ), ( 36, 37, 38, 39 ) )
table.writeAttribute( 'user:testMatrixf', IECore.M44fData(mat), 0 )
pedestal_GEO = table.createChild( 'pedestal_GEO' )
pedestal_GEO.writeObject( IECoreScene.MeshPrimitive.createBox(boxSize), 0 )
s = imath.V3d(15, 1, 15)
r = imath.Eulerd()
t = imath.V3d(0, .5, 0)
mat = IECore.TransformationMatrixd( s, r, t )
pedestal_GEO.writeTransform( IECore.TransformationMatrixdData(mat), 0 )
column_GEO = pedestal_GEO.createChild( 'column_GEO' )
column_GEO.writeObject( IECoreScene.MeshPrimitive.createBox(boxSize), 0 )
s = imath.V3d(.25, 20, .25)
r = imath.Eulerd()
t = imath.V3d(0, 10.5, 0)
mat = IECore.TransformationMatrixd( s, r, t )
column_GEO.writeTransform( IECore.TransformationMatrixdData(mat), 0 )
tableTop_GEO = column_GEO.createChild( 'tableTop_GEO' )
tableTop_GEO.writeObject( IECoreScene.MeshPrimitive.createBox(boxSize), 0 )
s = imath.V3d(10, 0.05, 10)
r = imath.Eulerd()
t = imath.V3d(0, .525, 0)
mat = IECore.TransformationMatrixd( s, r, t )
tableTop_GEO.writeTransform( IECore.TransformationMatrixdData(mat), 0 )
def testSceneInterface( self ) :
maya.cmds.file( new=True, f=True )
node = maya.cmds.createNode( "ieSceneShape" )
maya.cmds.setAttr( node+'.file', FnSceneShapeTest.__testFile,type='string' )
fn = IECoreMaya.FnSceneShape( node )
# Check scene for a wrong path
maya.cmds.setAttr( node+'.root', 'blabla', type='string' )
scene = fn.sceneInterface()
self.assertEqual( scene, None )
maya.cmds.setAttr( node+'.root', '/', type='string' )
scene = fn.sceneInterface()
self.assertTrue( isinstance( scene, IECoreScene.SceneCache ) )
self.assertEqual( scene.childNames(), ['1'] )
self.assertFalse( scene.hasObject() )
maya.cmds.setAttr( node+'.root', '/1', type='string' )
scene = fn.sceneInterface()
self.assertTrue( isinstance( scene, IECoreScene.SceneCache ) )
self.assertEqual( scene.childNames(), ['child'] )
self.assertTrue( scene.hasObject() )
def testCreationName( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "bob" )
self.assertEqual( fn.fullPathName(), u"|bob|bobSceneShape" )
fn = IECoreMaya.FnSceneShape.create( "bob1")
self.assertEqual( fn.fullPathName(), u"|bob1|bobSceneShape1" )
fn = IECoreMaya.FnSceneShape.create( "bob" )
self.assertEqual( fn.fullPathName(), u"|bob2|bobSceneShape2" )
def testCreationSetup( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
self.assertTrue( maya.cmds.sets( fn.fullPathName(), isMember="initialShadingGroup" ) )
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly", l=True ) )
self.assertFalse( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertTrue( maya.cmds.isConnected( "time1.outTime", fn.fullPathName()+".time" ) )
def testExpandOnce( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
result = fn.expandOnce()
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".queryPaths[0]" ), "/1" )
self.assertTrue( len(result) == 1 )
childFn = result[0]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), "|test|sceneShape_1|sceneShape_SceneShape1" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".root" ), "/1" )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1.translate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1.rotate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1.scale" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTime", childFn.fullPathName()+".time" ) )
maya.cmds.setAttr( childFn.fullPathName()+".drawGeometry", 1 )
result = childFn.expandOnce()
self.assertTrue( maya.cmds.getAttr( childFn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".queryPaths[0]" ), "/child" )
self.assertTrue( len(result) == 1 )
self.assertTrue( isinstance( result[0], IECoreMaya.FnSceneShape ) )
self.assertEqual( result[0].fullPathName(), "|test|sceneShape_1|child|childSceneShape" )
self.assertEqual( maya.cmds.getAttr( result[0].fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( result[0].fullPathName()+".root" ), "/1/child" )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1|child.translate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1|child.rotate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1|child.scale" ) )
self.assertEqual( maya.cmds.getAttr( result[0].fullPathName()+".drawGeometry"), 1 )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTime", result[0].fullPathName()+".time" ) )
def testExpandOnceNamespace( self ) :
maya.cmds.file( new=True, f=True )
namespace = "INPUT"
if not maya.cmds.namespace( exists=namespace ):
maya.cmds.namespace( addNamespace=namespace )
def addnamespace( path ):
return path.replace( "|", "|" + namespace + ":" )
fn = IECoreMaya.FnSceneShape.create( namespace + ":" + "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile, type='string' )
result = fn.expandOnce( preserveNamespace=True )
self.assertTrue( len(result) == 1 )
childFn = result[ 0 ]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), addnamespace ( "|test|sceneShape_1|sceneShape_SceneShape1" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", addnamespace ( "|test|sceneShape_1.translate" ) ) )
def testExpandAll( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 1 )
result = fn.expandAll()
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".queryPaths[0]" ), "/1" )
self.assertTrue( len(result) == 3 )
childFn = result[0]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), "|test|sceneShape_1|sceneShape_SceneShape1" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".root" ), "/1" )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1.translate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1.rotate" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1.scale" ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTime", childFn.fullPathName()+".time" ) )
self.assertTrue( maya.cmds.getAttr( childFn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".queryPaths[0]" ), "/child" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".drawGeometry"), 1 )
self.assertTrue( isinstance( result[1], IECoreMaya.FnSceneShape ) )
self.assertEqual( result[1].fullPathName(), "|test|sceneShape_1|child|childSceneShape" )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".root" ), "/1/child" )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outTranslate", "|test|sceneShape_1|child.translate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outRotate", "|test|sceneShape_1|child.rotate" ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outScale", "|test|sceneShape_1|child.scale" ) )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".drawGeometry"), 1 )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTime", result[1].fullPathName()+".time" ) )
def testExpandAllNamespace( self ) :
namespace = "INPUT"
if not maya.cmds.namespace( exists=namespace ):
maya.cmds.namespace( addNamespace=namespace )
def addnamespace( path ):
return path.replace( "|", "|" + namespace + ":" )
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( namespace + ":" + "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 1 )
result = fn.expandAll( preserveNamespace=True )
self.assertTrue( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".queryPaths[0]" ), "/1" )
self.assertTrue( len(result) == 3 )
childFn = result[0]
self.assertTrue( isinstance( childFn, IECoreMaya.FnSceneShape ) )
self.assertEqual( childFn.fullPathName(), addnamespace( "|test|sceneShape_1|sceneShape_SceneShape1" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".root" ), "/1" )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outTranslate", addnamespace( "|test|sceneShape_1.translate" ) ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outRotate", addnamespace( "|test|sceneShape_1.rotate" ) ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTransform[0].outScale", addnamespace( "|test|sceneShape_1.scale" ) ) )
self.assertTrue( maya.cmds.isConnected( fn.fullPathName()+".outTime", childFn.fullPathName()+".time" ) )
self.assertTrue( maya.cmds.getAttr( childFn.fullPathName()+".objectOnly" ) )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".queryPaths[0]" ), "/child" )
self.assertEqual( maya.cmds.getAttr( childFn.fullPathName()+".drawGeometry"), 1 )
self.assertTrue( isinstance( result[1], IECoreMaya.FnSceneShape ) )
self.assertEqual( result[1].fullPathName(), addnamespace( "|test|sceneShape_1|child|childSceneShape" ) )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".file" ), FnSceneShapeTest.__testFile )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".root" ), "/1/child" )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outTranslate", addnamespace( "|test|sceneShape_1|child.translate" ) ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outRotate", addnamespace( "|test|sceneShape_1|child.rotate" ) ) )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTransform[0].outScale", addnamespace( "|test|sceneShape_1|child.scale" ) ) )
self.assertEqual( maya.cmds.getAttr( result[1].fullPathName()+".drawGeometry"), 1 )
self.assertTrue( maya.cmds.isConnected( childFn.fullPathName()+".outTime", result[1].fullPathName()+".time" ) )
def testCollapse( self ) :
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
result = fn.expandOnce()
result[0].expandOnce()
children = set( ["|test|testSceneShape", "|test|sceneShape_1", "|test|sceneShape_1|sceneShape_SceneShape1", "|test|sceneShape_1|child", "|test|sceneShape_1|child|childSceneShape"] )
self.assertEqual( set(maya.cmds.listRelatives( "|test", ad=True, f=True )), children )
fn.collapse()
self.assertEqual( maya.cmds.listRelatives( "|test", ad=True, f=True ), ["|test|testSceneShape"] )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".objectOnly" ), 0 )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".visibility" ), 1 )
def testConvertAllToGeometry( self ):
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
fn.convertAllToGeometry()
children = ["|test|testSceneShape", "|test|sceneShape_1"]
self.assertEqual( maya.cmds.listRelatives( "|test", f=True ), children )
self.assertEqual( maya.cmds.getAttr( fn.fullPathName()+".intermediateObject" ), 0 )
children = ["|test|sceneShape_1|sceneShape_SceneShape1", "|test|sceneShape_1|child", "|test|sceneShape_1|sceneShape_Shape1"]
self.assertEqual( maya.cmds.listRelatives( "|test|sceneShape_1", f=True ), children )
self.assertEqual( maya.cmds.getAttr( "|test|sceneShape_1|sceneShape_SceneShape1.intermediateObject" ), 1 )
self.assertEqual( maya.cmds.nodeType( "|test|sceneShape_1|sceneShape_Shape1" ), "mesh")
self.assertEqual( maya.cmds.getAttr( "|test|sceneShape_1|sceneShape_SceneShape1.queryPaths[1]" ), "/" )
self.assertTrue( maya.cmds.isConnected( "|test|sceneShape_1|sceneShape_SceneShape1.outObjects[1]", "|test|sceneShape_1|sceneShape_Shape1.inMesh" ) )
def testComponentNames( self ):
maya.cmds.file( new=True, f=True )
fn = IECoreMaya.FnSceneShape.create( "test" )
maya.cmds.setAttr( fn.fullPathName()+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 0 )
self.assertEqual( fn.componentNames(), [] )
maya.cmds.setAttr( fn.fullPathName()+".drawGeometry", 1 )
self.assertEqual( fn.componentNames(), ['/', '/1', '/1/child', '/1/child/3'] )
fn.selectComponentNames( ['/', '/1', '/1/child/3'] )
self.assertEqual( fn.selectedComponentNames(), set( ['/', '/1', '/1/child/3'] ) )
def testQuery( self ):
maya.cmds.file( new=True, f=True )
def createSceneFile():
scene = IECoreScene.SceneCache( FnSceneShapeTest.__testFile, IECore.IndexedIO.OpenMode.Write )
sc = scene.createChild( str(1) )
curves = IECoreScene.CurvesPrimitive.createBox(imath.Box3f(imath.V3f(0),imath.V3f(1))) # 6 curves.
sc.writeObject( curves, 0.0 )
matrix = imath.M44d().translate( imath.V3d( 0, 0, 0 ) )
sc.writeTransform( IECore.M44dData( matrix ), 0.0 )
createSceneFile()
node = maya.cmds.createNode( "ieSceneShape" )
maya.cmds.setAttr( node+'.file', FnSceneShapeTest.__testFile,type='string' )
maya.cmds.setAttr( node+'.root', '/',type='string' )
fn = IECoreMaya.FnSceneShape( node )
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[0]", type=True), None )
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[1]", type=True), None )
maya.cmds.setAttr( fn.fullPathName()+".queryPaths[0]" , "/1", type="string")
maya.cmds.setAttr( fn.fullPathName()+".queryPaths[1]" , "/1", type="string")
maya.cmds.setAttr( fn.fullPathName()+".queryConvertParameters[0]", "-index 0", type="string" ) # Set it to output 0 th box curve.
maya.cmds.setAttr( fn.fullPathName()+".queryConvertParameters[1]", "-index 1", type="string" ) # Set it to output 1 th box curve.
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[0]", type=True), "nurbsCurve" )
self.assertEqual( maya.cmds.getAttr(fn.fullPathName()+".outObjects[1]", type=True), "nurbsCurve" )
curveShape0 = maya.cmds.createNode( "nurbsCurve" )
curveShape1 = maya.cmds.createNode( "nurbsCurve" )
maya.cmds.connectAttr( fn.fullPathName()+ ".outObjects[0]", curveShape0 + '.create' )
maya.cmds.connectAttr( fn.fullPathName()+ ".outObjects[1]", curveShape1 + '.create' )
self.assertNotEqual( maya.cmds.pointPosition(curveShape0 + '.cv[0]' ), maya.cmds.pointPosition(curveShape1 + '.cv[0]' ) )
maya.cmds.setAttr( fn.fullPathName()+".queryConvertParameters[1]", "-index 0", type="string" )
self.assertEqual( maya.cmds.pointPosition(curveShape0 + '.cv[0]' ), maya.cmds.pointPosition(curveShape1 + '.cv[0]' ) )
def testPromotableAttributeNames( self ):
maya.cmds.file( new=True, force=True )
self.__setupTableProp()
sceneShapeFn = IECoreMaya.FnSceneShape.create( 'table' )
sceneShapeFn.findPlug( 'file' ).setString( FnSceneShapeTest.__testFile )
expectedAttrs = [
'user:testBool', 'user:testShort', 'user:testInt', 'user:testInt64', 'user:testFloat',
'user:testDouble', 'user:testString', 'user:testMatrixd', 'user:testMatrixf', 'scene:visible'
]
self.assertEquals( set( sceneShapeFn.promotableAttributeNames() ), set( expectedAttrs ) )
def testPromoteAttribute( self ):
maya.cmds.file( new=True, force=True )
self.__setupTableProp()
sceneShapeFn = IECoreMaya.FnSceneShape.create( 'table' )
sceneShapeFn.findPlug( 'file' ).setString( FnSceneShapeTest.__testFile )
for pAttr in sceneShapeFn.promotableAttributeNames():
sceneShapeFn.promoteAttribute( pAttr )
sceneShape = sceneShapeFn.fullPathName()
table = maya.cmds.listRelatives( sceneShape, parent=True )[0]
testVisibility = maya.cmds.getAttr( table + '.' + str( IECoreMaya.LiveScene.visibilityOverrideName ) )
testBool = maya.cmds.getAttr( table + '.ieAttr_testBool' )
testShort = maya.cmds.getAttr( table + '.ieAttr_testShort' )
testInt = maya.cmds.getAttr( table + '.ieAttr_testInt' )
testInt64 = maya.cmds.getAttr( table + '.ieAttr_testInt64' )
testFloat = maya.cmds.getAttr( table + '.ieAttr_testFloat' )
testDouble = maya.cmds.getAttr( table + '.ieAttr_testDouble' )
testString = maya.cmds.getAttr( table + '.ieAttr_testString' )
testMatrixd = maya.cmds.getAttr( table + '.ieAttr_testMatrixd' )
testMatrixf = maya.cmds.getAttr( table + '.ieAttr_testMatrixf' )
self.assertTrue( testVisibility )
self.assertTrue( testBool )
self.assertEquals( testShort, 2 )
self.assertEquals( testInt, 3 )
self.assertEquals( testInt64, 4 )
self.assertEquals( testFloat, 5. )
self.assertEquals( testDouble, 6. )
self.assertEquals( testString, 'seven' )
self.assertEquals( testMatrixd, [ 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23. ] )
self.assertEquals( testMatrixf, [ 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39. ] )
def tearDown( self ) :
if os.path.exists( FnSceneShapeTest.__testFile ) :
os.remove( FnSceneShapeTest.__testFile )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
| 1.039063 | 1 |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0071.py | moacirsouza/nadas | 1 | 12787753 | print("""
071) Crie um programa que simule o funcionamento de uma caixa eletrônico.
No início, pergutne ao usuário qual será o valor a ser sacado(número inteiro)
e o programa vai informar quantas cédulas de cada valor serão entregues.
OBS. Considere que o caixa possui cédulas de R$50, R$20, R$10
e R$1.
""")
print('=' * 30)
print('{:^30}'.format('BANCO RM'))
print('=' * 30)
valordosaque = int(input('Digite o valor a ser sacado: R$'))
total = valordosaque
cedula = 50
totalcedula = 0
while True:
if total >= cedula:
total -= cedula
totalcedula +=1
else:
if totalcedula > 0:
print('Total de {} cédulas de R${}'.format(totalcedula, cedula
))
if cedula == 50:
cedula = 20
elif cedula == 20:
cedula = 10
elif cedula == 10:
cedula = 1
totalcedula = 0
if total == 0:
break
print('=' * 30)
print('Volte sempre !!!')
| 4.1875 | 4 |
models/glove.py | felixnext/disaster_pipeline | 0 | 12787754 | '''Module to load and use GloVe Models.
Code Inspiration from:
https://www.kaggle.com/jhoward/improved-lstm-baseline-glove-dropout
'''
import os
import numpy as np
import pandas as pd
import urllib.request
from zipfile import ZipFile
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import KMeans
folder = os.path.dirname(os.path.realpath(__file__))
def download(name):
'''Downloads the relevant dataset and extracts it.
Args:
name (str): Name of the model to download (options are: [twitter, wikipedia])
Returns:
True if successful, otherwise False
'''
url = None
if name == 'twitter':
url = 'http://nlp.stanford.edu/data/wordvecs/glove.twitter.27B.zip'
elif name == 'wikipedia':
url = 'http://nlp.stanford.edu/data/wordvecs/glove.840B.300d.zip'
if url is not None:
try:
urllib.request.urlretrieve(url, os.path.join(folder, '{}.zip'.format(name)))
except:
print("download failed")
return False
try:
# Create a ZipFile Object and load sample.zip in it
with ZipFile(os.path.join(folder, '{}.zip'.format(name)), 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(folder)
return True
except:
print("extraction failed")
return False
return False
class GloveEmbeddings:
'''Class to load embeddings model and generate it for words or sentences.'''
def __init__(self, name, dim=25):
# load data
self.emb = self.load_vectors(name, dim)
self.emb_size = dim
# calculate items for randomization (explicit convert to list to avoid numpy warning)
all_embs = np.stack(list(self.emb.values()))
self.emb_mean,self.emb_std = all_embs.mean(), all_embs.std()
def get_coefs(self, word, *arr):
'''Helper Function to transform the given vector into a float array.'''
return word, np.asarray(arr, dtype='float32')
def load_vectors(self, name, dim):
'''Load the given vector data.'''
# retrieve file name
file = None
if name == 'twitter':
file = os.path.join(folder, 'glove.{}.27B.{}d.txt'.format(name, dim))
elif name == 'wikipedia':
file = os.path.join(folder, 'glove.840B.{}d.txt'.format(dim))
else:
raise ValueError('Unkown model type ({})'.format(name))
# load the embeddings
with open(file, encoding='utf-8') as file:
embeddings_index = [self.get_coefs(*o.strip().split()) for o in file]
embeddings_index = list(filter(lambda x: len(x[1]) == dim, embeddings_index))
return dict(embeddings_index)
def word_vector(self,word):
'''Tries to retrieve the embedding for the given word, otherwise returns random vector.'''
# generate randomness otherwise
vec = self.emb.get(word)
return vec if vec is not None else np.random.normal(self.emb_mean, self.emb_std, (self.emb_size))
def sent_vector(self, sent, use_rand=True):
'''Generates a single embedding vector.
Args:
sent (list): List of tokenized words to use
use_rand (bool): Defines if unkown words should be filled with random vectors (otherwise only use known vectors)
Returns:
Single normalized Vector to be used as embedding
'''
vec = None
vec_count = 0
for word in sent:
wvec = self.emb.get(word)
if wvec is None and use_rand:
wvec = np.random.normal(self.emb_mean, self.emb_std, (self.emb_size))
if wvec is not None:
if vec is None:
vec = wvec
else:
vec += wvec
vec_count += 1
# normalize the vector
if vec is not None and vec_count > 0:
vec = vec / vec_count
# if no word is found return random vector
return vec if vec is not None else np.random.normal(self.emb_mean, self.emb_std, (self.emb_size))
def sent_matrix(self, sent, max_feat, pad, dedub=False):
'''Generates a Matrix of single embeddings for the item.
Args:
sent (list): List of tokenized words
max_feat (int): Number of maximal features to extract
pad (bool): Defines if the resulting matrix should be zero-padded to max_feat
dedub (bool): Defines if the word list should be de-duplicated
Returns:
2-D Matrix with dimensions [max_feat, embedding_size]
'''
# remove duplicates
if dedub:
sent = list(set(sent))
# setup matrix
nb_words = min(max_feat, len(sent))
embedding_matrix = np.random.normal(self.emb_mean, self.emb_std, (nb_words, self.emb_size))
# iterate through all words
for i, word in enumerate(sent):
if i >= max_feat: continue
vec = self.emb.get(word)
if vec is not None: embedding_matrix[i] = vec
# pad the matrix to max features
if pad and nb_words < max_feat:
embedding_matrix = np.pad(embedding_matrix, (max_feat, self.emb_size), 'constant', constant_values=[0])
return embedding_matrix
def centroid_vectors(self, sent, max_feat):
'''Generates a list of `max_feat` vectors to be used as representation.
Args:
sent (list): Tokenized words in the document
max_feat (int): Number of vectors to generate
Returns:
Array of centroid vectors for the given document
'''
# generate list of vectors (use set as order not relevant and to avoid duplicates)
vecs = []
for word in set(sent):
vec = self.emb.get(word)
if vec is not None: vecs.append(vec)
# return random vector if none found
if len(vecs) < max_feat:
return np.array(vecs + [np.random.normal(self.emb_mean, self.emb_std, (self.emb_size)) for i in range(max_feat - len(vecs))])
elif len(vecs) == max_feat:
return np.array(vecs)
# perform clustering
kmeans = KMeans(n_clusters=max_feat).fit(vecs)
# return the centroid vectors
return kmeans.cluster_centers_
class GloVeTransformer(BaseEstimator, TransformerMixin):
'''Transformer for the GloVe Model.'''
def __init__(self, name, dim, type, tokenizer, max_feat=None):
'''Create the Transformer.
Note that the centroid option might be slow.
Args:
name (str): Name of the model
dim (int): Number of dimensions to use
type (str): Type of the transformation (options are: ['word', 'sent', 'sent-matrix', 'centroid'])
tokenizer (fct): Function to tokenize the input data
max_feat (int): Number of maximal feature vectors used per input
'''
# safty checks
if type not in ['word', 'sent', 'sent-matrix', 'centroid']:
raise ValueError("Invalid value for type: ({})".format(type))
if type in ['sent-matrix', 'centroid'] and max_feat is None:
raise ValueError("Required value for max_feat for type ({})".format(type))
# set values
self.glove = GloveEmbeddings(name, dim)
self.type = type
self.tokenizer = tokenizer
self.max_feat = max_feat
def fit(self, x, y=None):
return self
def vectors(self, text):
'''Extracts the specified type of vector for the given input data.'''
# retrieve the vectors
tokens = self.tokenizer(text)
if self.type == 'word':
return np.concat([self.glove.word_vector(tok) for tok in tokens])
elif self.type == 'sent':
return self.glove.sent_vector(tokens)
elif self.type == 'sent-matrix':
# note: use padding to avoid pipeline problems
return self.glove.sent_matrix(tokens, self.max_feat, True).reshape([-1])
elif self.type == 'centroid':
return self.glove.centroid_vectors(tokens, self.max_feat).reshape([-1])
return np.nan
def transform(self, X):
X_tagged = pd.Series(X).apply(lambda x: pd.Series(self.vectors(x)))
df = pd.DataFrame(X_tagged).fillna(0).replace([-np.inf], -1).replace([np.inf], 1)
return df
| 2.875 | 3 |
medium/103-Binary Tree Zigzag Level Order Traversal.py | Davidxswang/leetcode | 2 | 12787755 | """
https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/
Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# time complexity: O(n), space complexity: O(n)
# I used two stacks here for clarity, one stores the layer that we should read from left to right and the other stores the layer that we should read from right to left and we just need to pay attention to what order we should follow when we put in the left and right children into the stack
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
result = [[]]
stack_toright = [root]
stack_toleft = []
toright = True
size = 1
while stack_toright or stack_toleft:
node = stack_toright.pop() if toright else stack_toleft.pop()
size -= 1
if node:
result[-1].append(node.val)
if toright:
if node.left:
stack_toleft.append(node.left)
if node.right:
stack_toleft.append(node.right)
else:
if node.right:
stack_toright.append(node.right)
if node.left:
stack_toright.append(node.left)
if size == 0 and (stack_toright or stack_toleft):
size = len(stack_toright) + len(stack_toleft)
result.append([])
toright = not toright
if result and not result[-1]:
result.pop()
return result
| 4.0625 | 4 |
simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/test_run.py | EricssonResearch/scott-eu | 19 | 12787756 | <filename>simulation-ros/src/turtlebot2i/turtlebot2i_safety/src/test_run.py
#!/usr/bin/env python
"""
Edited from navigation.py in turtlebot2i_navigation module
"""
import rospy
import actionlib
from nav_msgs.msg import Odometry
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import geometry_msgs.msg
from sensor_msgs.msg import LaserScan
from tf.transformations import quaternion_from_euler
import math
from std_msgs.msg import Float64
from turtlebot2i_safety.msg import SafetyZone
import numpy as np
from kobuki_msgs.msg import BumperEvent
def init_var():
#Here we initialize the global variables.
global time_start, prev_pos, init_pos, pose_cb_count, travelled_distance, sum_distance_to_goal, goal
goal = MoveBaseGoal()
time_start = rospy.get_time()
init_pos = geometry_msgs.msg.Point()
prev_pos = geometry_msgs.msg.Point()
curr_pos = geometry_msgs.msg.Point()
pose_cb_count = 0
travelled_distance = 0.0 #the less the better
sum_distance_to_goal = 0.0 #the less the better
global n_sensors, lidar_cb_count, sum_mean_obs_distance, min_obs_distance, collision_flag, n_collision, collision_distance
n_sensors = 675 #684 #if lidar on top: 684 data, if lidar in front of robot: 675 data
lidar_cb_count = 0
sum_mean_obs_distance = 0.0 #SM1
min_obs_distance = [1000.0] * n_sensors #SM2
collision_flag = False
n_collision = 0
collision_distance = 0.20
global risk_sum, risk_count, risk_max, risk_x_speed_sum, risk_x_speed_max
risk_sum = 0.0
risk_count = 0
risk_max = -1.0
risk_x_speed_sum = 0.0
risk_x_speed_max = -1.0
global r_warning, r_critical, obstacle_zone, prev_obstacle_zone, clear_zone, warning_zone, critical_zone, warning_duration, critical_duration
r_warning = 0.0
r_critical = 0.0
obstacle_zone, prev_obstacle_zone = 0, 0
clear_zone, warning_zone, critical_zone = 0,1,2
warning_duration = 0.0
critical_duration = 0.0
global robot_speed, sum_speed, speed_cb_count
robot_speed = 0.0
sum_speed = 0.0
speed_cb_count = 0
def move_to_goal():
global goal, client
#client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
#client = actionlib.SimpleActionClient('turtlebot2i_0/move_base', MoveBaseAction)
client = actionlib.SimpleActionClient('turtlebot2i/move_base', MoveBaseAction)
client.wait_for_server()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = -2.0 #2.0
goal.target_pose.pose.position.y = -2.5 #4.0
goal.target_pose.pose.position.z = 0.063 #1.34851861
#goal.target_pose.pose.orientation.w = 1.0
#copied from navi_goal_talker
orientation=geometry_msgs.msg.Quaternion()
yaw = -180*math.pi/180 #unit: from deg. to rad.
orientation=quaternion_from_euler(0,0,yaw)#(roll, pitch,yaw) # return an array
goal.target_pose.pose.orientation.x=0.0
goal.target_pose.pose.orientation.y=0.0
goal.target_pose.pose.orientation.z=orientation[2]
goal.target_pose.pose.orientation.w=orientation[3]
client.send_goal(goal)
print("Goal position is sent! waiting the robot to finish....")
wait = client.wait_for_result(timeout=rospy.Duration(1200.0)) #timeout in seconds
if not wait:
rospy.logerr("Action server not available or timeout!")
rospy.signal_shutdown("Action server not available!")
def move_to_start():
global goal, client
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = -4.5 #2.0 #-4.5,-3.5
goal.target_pose.pose.position.y = -3.5 #4.0
goal.target_pose.pose.position.z = 0.063 #1.34851861
#goal.target_pose.pose.orientation.w = 1.0
#copied from navi_goal_talker
orientation=geometry_msgs.msg.Quaternion()
yaw = -90*math.pi/180 #unit: from deg. to rad.
orientation=quaternion_from_euler(0,0,yaw)#(roll, pitch,yaw) # return an array
goal.target_pose.pose.orientation.x=0.0
goal.target_pose.pose.orientation.y=0.0
goal.target_pose.pose.orientation.z=orientation[2]
goal.target_pose.pose.orientation.w=orientation[3]
client.send_goal(goal)
print("Goal position is sent! waiting the robot to finish....")
wait = client.wait_for_result(timeout=rospy.Duration(1200.0)) #timeout in seconds
if not wait:
rospy.logerr("Action server not available or timeout!")
rospy.signal_shutdown("Action server not available!")
def distance2D(pos1, pos2):
return math.sqrt((pos1.x - pos2.x)**2 + (pos1.y - pos2.y)**2)
def init_subscription():
time_start = rospy.get_time()
rospy.Subscriber('/turtlebot2i/sensors/global_pose', geometry_msgs.msg.PoseStamped, update_pose_callback)
rospy.Subscriber('/turtlebot2i/lidar/scan_transformed', LaserScan, lidar_callback) #this is used with Risk mitigation
rospy.Subscriber('/turtlebot2i/safety/risk_val', Float64, risk_callback)
rospy.Subscriber('/turtlebot2i/safety/safety_zone', SafetyZone, safety_zone_callback)
rospy.Subscriber('/turtlebot2i/odom', Odometry, speed_callback)
rospy.Subscriber('/turtlebot2i/events/bumper', BumperEvent, bumper_callback)
def summarize_running_test():
global warning_duration, critical_duration
rospy.loginfo("Goal reached!")
time_finish = rospy.get_time()
duration = time_finish - time_start
if obstacle_zone == warning_zone:
warning_time_end = rospy.get_time()
warning_duration += (warning_time_end-warning_time_start)
elif obstacle_zone == critical_zone:
critical_time_end = rospy.get_time()
critical_duration += (critical_time_end - critical_time_start)
safe_duration = duration-(warning_duration+critical_duration)
rospy.loginfo("1. Time execution : %1.4f s (less is better)",duration)
rospy.loginfo(" a) Safe (green) zone : %1.4f s (%.1f%%) (more is better)",safe_duration, 100*safe_duration/duration)
rospy.loginfo(" b) Warning (yellow) zone : %1.4f s (%.1f%%) (less is better)",warning_duration, 100*warning_duration/duration)
rospy.loginfo(" c) Critical (red )zone : %1.4f s (%.1f%%) (less is better)",critical_duration, 100*critical_duration/duration)
rospy.loginfo("2. Travelled distance : %1.4f m (less is better)",travelled_distance)
rospy.loginfo("3. Average speed : %1.4f m/s(more is better)",sum_speed/speed_cb_count)
rospy.loginfo("4. Mean distance to goal : %1.4f m (less is better)",sum_distance_to_goal/pose_cb_count)
rospy.loginfo("5. SM1 mean distance to obstacle : %1.4f m (more is better)",sum_mean_obs_distance/lidar_cb_count)
rospy.loginfo("6. SM2 mean minimum distance to obstacle : %1.4f m (more is better)",sum(min_obs_distance)/n_sensors)
rospy.loginfo("7. Minimum distance to obstacle : %1.4f m (more is better)",min(min_obs_distance))
rospy.loginfo("8. Number of collision during operation : %d (less is better)",n_collision)
rospy.loginfo("9. Risk mean(average) value : %1.4f (less is better)",risk_sum/risk_count)
rospy.loginfo("10.Risk maximum value : %1.4f (less is better)",risk_max)
rospy.loginfo("11.Risk x speed mean(average) value : %1.4f (less is better)",risk_x_speed_sum/risk_count)
rospy.loginfo("12.Risk x speed maximum value : %1.4f (less is better)",risk_x_speed_max)
np.savez_compressed('result_x.npz', duration=duration, safe_duration=safe_duration, warning_duration=warning_duration, critical_duration=critical_duration, travelled_distance=travelled_distance,
average_speed=sum_speed/speed_cb_count, mean_distance_to_goal=sum_distance_to_goal/pose_cb_count, sm1=sum_mean_obs_distance/lidar_cb_count, sm2=sum(min_obs_distance)/n_sensors,
min_distance_to_obstacle=min(min_obs_distance), n_collision=n_collision, risk_mean=risk_sum/risk_count, risk_max=risk_max, risk_speed_mean=risk_x_speed_sum/risk_count, risk_speed=risk_x_speed_max)
def update_pose_callback(data):
global prev_pos, init_pos, travelled_distance, sum_distance_to_goal, pose_cb_count
pose_cb_count += 1
if distance2D(data.pose.position, goal.target_pose.pose.position) < 0.2: #check distance to goal
print("goal reached!")
client.cancel_all_goals()
if prev_pos == init_pos:
prev_pos = data.pose.position
sum_distance_to_goal = distance2D(prev_pos, goal.target_pose.pose.position)
else:
curr_pos = data.pose.position
travelled_distance += distance2D(curr_pos, prev_pos)
sum_distance_to_goal += distance2D(curr_pos, goal.target_pose.pose.position)
prev_pos = data.pose.position
def lidar_callback(data):
global n_sensors, lidar_cb_count, sum_mean_obs_distance, min_obs_distance, collision_flag, n_collision, collision_distance
global r_warning, r_critical, obstacle_zone, prev_obstacle_zone, clear_zone, warning_zone, critical_zone
global warning_duration, critical_duration, warning_time_start, critical_time_start
#sensor_reads = data.ranges
#ignore 4 first data and 5 last data
sensor_reads = data.ranges[4:n_sensors+4]
#print(sensor_reads[0], sensor_reads[-1])
#print("Lidar callback", len(sensor_reads), min(sensor_reads), max(sensor_reads))
sum_mean_obs_distance += (sum(sensor_reads)/n_sensors)
lidar_cb_count += 1
for i in range(n_sensors):
min_obs_distance[i] = min(min_obs_distance[i], sensor_reads[i])
min_dist_to_obstacle = min(sensor_reads)
prev_obstacle_zone = obstacle_zone
if min_dist_to_obstacle > r_warning:
obstacle_zone = clear_zone
#print("clear_zone")
elif min_dist_to_obstacle > r_critical:
obstacle_zone = warning_zone
#print("warning_zone")
else:
obstacle_zone = critical_zone
#print("critical_zone")
if obstacle_zone!=prev_obstacle_zone:
#print("prev_obstacle_zone: ", prev_obstacle_zone, "| obstacle_zone:",obstacle_zone)
if obstacle_zone == warning_zone:
warning_time_start = rospy.get_time()
elif obstacle_zone == critical_zone:
critical_time_start = rospy.get_time()
if prev_obstacle_zone == warning_zone:
warning_time_end = rospy.get_time()
warning_duration += (warning_time_end-warning_time_start)
elif prev_obstacle_zone == critical_zone:
critical_time_end = rospy.get_time()
critical_duration += (critical_time_end - critical_time_start)
def bumper_callback(data):
global n_collision, collision_flag
if data.state == 1 and not collision_flag:
print("collision happen!")
collision_flag = True
n_collision += 1
elif data.state == 0 and collision_flag:
collision_flag = False
def risk_callback(risk_value):
global risk_sum, risk_count, risk_max, risk_x_speed_sum, risk_x_speed_max
risk_sum += risk_value.data
risk_count += 1
risk_max = max(risk_max, risk_value.data)
risk_x_speed = risk_value.data*robot_speed
risk_x_speed_sum += risk_x_speed
risk_x_speed_max = max(risk_x_speed_max, risk_x_speed)
def safety_zone_callback(data):
global r_warning, r_critical
r_warning = data.warning_zone_radius
r_critical = data.critical_zone_radius
def speed_callback(data):
global robot_speed, sum_speed, speed_cb_count
robot_speed = math.sqrt(data.twist.twist.linear.x**2 + data.twist.twist.linear.y**2)
sum_speed += robot_speed
speed_cb_count += 1
if __name__ == '__main__':
try:
rospy.init_node('test_run_py')
init_var()
init_subscription()
move_to_goal()
move_to_start()
move_to_goal()
summarize_running_test()
rospy.loginfo("Running test done!")
except rospy.ROSInterruptException:
rospy.loginfo("Navigation test finished.")
| 2.578125 | 3 |
guildwars2/database.py | Maselkov/GW2Bot | 75 | 12787757 | <filename>guildwars2/database.py
import asyncio
import collections
import datetime
import re
import time
import discord
from discord.ext import commands
from discord_slash.context import ComponentContext
from discord_slash.utils.manage_components import (create_actionrow,
create_select,
create_select_option,
wait_for_component)
from pymongo import ReplaceOne
from pymongo.errors import BulkWriteError
from .exceptions import APIError, APIKeyError
class DatabaseMixin:
@commands.group(case_insensitive=True)
@commands.is_owner()
async def database(self, ctx):
"""Commands related to DB management"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
return
@database.command(name="create")
async def db_create(self, ctx):
"""Create a new database
"""
await self.rebuild_database()
async def upgrade_legacy_guildsync(self, guild):
doc = await self.bot.database.get(guild, self)
sync = doc.get("sync")
if not sync:
return False
if not sync.get("setupdone") or not sync.get("on"):
return False
key = sync.get("leader_key", None)
if not key:
return False
sync_doc = {}
ranks_to_role_ids = sync.get("ranks")
purge = sync.get("purge", False)
guild_role_name = sync.get("name")
guild_role_id = None
gid = sync["gid"]
base_ep = f"guild/{gid}"
try:
info = await self.call_api(base_ep)
except APIError:
print("No such guild exists. Skipping.")
return False
try:
await self.call_api(base_ep + "/members", key=key)
except APIError:
print("Invalid key or permissions")
return False
tag_role = None
if guild_role_name:
guild_role_id = ranks_to_role_ids.pop(guild_role_name, None)
if guild_role_id:
tag_role = guild_role_id
sync_doc = {
"guild_id": guild.id,
"enabled": {
"tag": sync.get("guildrole", False) and sync["on"],
"ranks": sync["on"]
},
"gid": gid,
"name": info["name"],
"tag": info["tag"],
"tag_role": tag_role,
"rank_roles": ranks_to_role_ids,
"key": key
}
if await self.can_add_sync(guild, gid):
await self.db.guildsyncs.insert_one(sync_doc)
await self.bot.database.set(guild, {
"guildsync.enabled": True,
"guildsync.purge": purge
}, self)
return True
return False
@database.command(name="update_legacy_guildsync")
async def db_update_guildsync(self, ctx, guild: int = None):
if not guild:
conversions = 0
for guild in self.bot.guilds:
res = await self.upgrade_legacy_guildsync(guild)
if res:
conversions += 1
await asyncio.sleep(0.2)
await ctx.send(f"{conversions} successful")
return
guild = self.bot.get_guild(guild)
if not guild:
return await ctx.send("Nope")
done = await self.upgrade_legacy_guildsync(guild)
if done:
await ctx.send("Successful conversion")
else:
await ctx.send("Encountered error")
pass
@database.command(name="getwvwdata")
async def db_getwvwdata(self, ctx, guild: int = None):
"""Get historical wvw population data. Might not work"""
await self.get_historical_world_pop_data()
await ctx.send("Done")
@database.command(name="statistics")
async def db_stats(self, ctx):
"""Some statistics """
result = await self.bot.database.users.count_documents(
{"cogs.GuildWars2.key": {
"$ne": None
}}, self)
await ctx.send("{} registered users".format(result))
async def get_title(self, title_id):
try:
results = await self.db.titles.find_one({"_id": title_id})
title = results["name"]
except (KeyError, TypeError):
return ""
return title
async def get_world_name(self, wid):
try:
doc = await self.db.worlds.find_one({"_id": wid})
name = doc["name"]
except KeyError:
name = None
return name
async def get_world_id(self, world):
world = re.escape(world)
world = "^" + world + "$"
search = re.compile(world, re.IGNORECASE)
if world is None:
return None
doc = await self.db.worlds.find_one({"name": search})
if not doc:
return None
return doc["_id"]
async def fetch_statname(self, item):
statset = await self.db.itemstats.find_one({"_id": item})
try:
name = statset["name"]
except:
name = ""
return name
async def fetch_item(self, item):
return await self.db.items.find_one({"_id": item})
async def fetch_key(self, user, scopes=None):
doc = await self.bot.database.get_user(user, self)
if not doc or "key" not in doc or not doc["key"]:
raise APIKeyError(
"No API key associated with {.mention}. "
"Add your key using `$key add` command. If you don't know "
"how, the command includes a tutorial.".format(user))
if scopes:
missing = []
for scope in scopes:
if scope not in doc["key"]["permissions"]:
missing.append(scope)
if missing:
missing = ", ".join(missing)
raise APIKeyError(
"{.mention}, your API key is missing the following "
"permissions to use this command: `{}`\nConsider adding "
"a new key with those permissions "
"checked".format(user, missing))
return doc["key"]
async def cache_dailies(self, *, tomorrow=False):
if not tomorrow:
try:
await self.cache_endpoint("achievements")
except Exception:
pass
try:
ep = "achievements/daily"
if tomorrow:
ep += "/tomorrow"
results = await self.call_api(
ep, schema_string="2021-07-15T13:00:00.000Z")
doc = {}
for category, dailies in results.items():
daily_list = []
for daily in dailies:
if not daily["level"]["max"] == 80:
continue
required_access = daily.get("required_access", {})
if required_access.get("condition", "") == "NoAccess":
continue
daily_doc = await self.db.achievements.find_one(
{"_id": daily["id"]})
if not daily_doc:
continue
name = daily_doc["name"]
if category == "fractals":
if name.startswith(
"Daily Tier"
) and not name.startswith("Daily Tier 4"):
continue
daily_list.append(name)
daily_list.sort()
if category == "pve":
daily_list.extend(self.get_lw_dailies(tomorrow=tomorrow))
doc[category] = daily_list
offset = 0
if tomorrow:
offset = 1
doc["psna"] = [self.get_psna(offset_days=offset)]
doc["psna_later"] = [self.get_psna(offset_days=1 + offset)]
key = "cache.dailies"
if tomorrow:
key += "_tomorrow"
await self.bot.database.set_cog_config(self, {key: doc})
except Exception as e:
self.log.exception("Exception caching dailies: ", exc_info=e)
if not tomorrow:
await self.cache_dailies(tomorrow=True)
async def cache_raids(self):
raids = []
raids_index = await self.call_api("raids")
for raid in raids_index:
raids.append(await self.call_api("raids/" + raid))
await self.bot.database.set_cog_config(self, {"cache.raids": raids})
async def cache_pois(self):
async def bulk_write(group):
requests = []
for item in group:
item["_id"] = item.pop("id")
requests.append(
ReplaceOne({"_id": item["_id"]}, item, upsert=True))
try:
await self.db.pois.bulk_write(requests)
except BulkWriteError as e:
self.log.exception("BWE while caching continents")
continents = await self.call_api("continents?ids=all")
pois = []
for continent in continents:
floors = await self.call_api(
f"continents/{continent['id']}/floors?ids=all")
for floor in floors:
for region in floor["regions"].values():
for game_map in region["maps"].values():
for poi in game_map["points_of_interest"].values():
del poi["chat_link"]
poi["continent_id"] = continent["id"]
pois.append(poi)
if len(pois) > 200:
await bulk_write(pois)
pois = []
print("Continents done")
async def get_raids(self):
config = await self.bot.database.get_cog_config(self)
return config["cache"].get("raids")
async def cache_endpoint(self, endpoint, all_at_once=False):
async def bulk_write(item_group):
requests = []
for item in itemgroup:
item["_id"] = item.pop("id")
requests.append(
ReplaceOne({"_id": item["_id"]}, item, upsert=True))
try:
await self.db[endpoint.replace("/", "_")].bulk_write(requests)
except BulkWriteError as e:
self.log.exception("BWE while caching {}".format(endpoint),
exc_info=e)
items = await self.call_api(endpoint,
schema_string="2021-07-15T13:00:00.000Z")
if not all_at_once:
counter = 0
total = len(items)
while True:
percentage = (counter / total) * 100
print("Progress: {0:.1f}%".format(percentage))
ids = ",".join(str(x) for x in items[counter:counter + 200])
if not ids:
print("{} done".format(endpoint))
break
itemgroup = await self.call_api(
f"{endpoint}?ids={ids}",
schema_string="2021-07-15T13:00:00.000Z")
await bulk_write(itemgroup)
counter += 200
else:
itemgroup = await self.call_api(
"{}?ids=all".format(endpoint),
schema_string="2021-07-15T13:00:00.000Z")
await bulk_write(itemgroup)
async def rebuild_database(self):
start = time.time()
self.bot.available = False
await self.bot.change_presence(
activity=discord.Game(name="Rebuilding API cache"),
status=discord.Status.dnd)
endpoints = [["items"], ["achievements"], ["itemstats", True],
["titles", True], ["recipes"], ["skins"],
["currencies", True], ["skills", True],
["specializations", True], ["traits", True],
["worlds", True], ["minis", True], ["pvp/amulets", True],
["professions", True], ["legends", True], ["pets", True],
["outfits", True], ["colors", True]]
for e in endpoints:
try:
await self.cache_endpoint(*e)
except:
msg = "Caching {} failed".format(e)
self.log.warn(msg)
owner = self.bot.get_user(self.bot.owner_id)
await owner.send(msg)
await self.db.items.create_index("name")
await self.db.achievements.create_index("name")
await self.db.titles.create_index("name")
await self.db.recipes.create_index("output_item_id")
await self.db.skins.create_index("name")
await self.db.currencies.create_index("name")
await self.db.skills.create_index("name")
await self.db.worlds.create_index("name")
await self.cache_raids()
await self.cache_pois()
end = time.time()
await self.bot.change_presence()
self.bot.available = True
print("Done")
self.log.info("Database done! Time elapsed: {} seconds".format(end -
start))
async def itemname_to_id(self,
ctx,
item,
*,
flags=[],
filters={},
database="items",
group_duplicates=False,
prompt_user=False,
component_context=None,
limit=125,
hidden=False,
placeholder="Select the item you want..."
): # TODO cleanup this monstrosity
def consolidate_duplicates(items):
unique_items = collections.OrderedDict()
for item in items:
item_tuple = item["name"], item["rarity"], item["type"]
if item_tuple not in unique_items:
unique_items[item_tuple] = []
unique_items[item_tuple].append(item["_id"])
unique_list = []
for k, v in unique_items.items():
unique_list.append({
"name": k[0],
"rarity": k[1],
"ids": v,
"type": k[2]
})
return unique_list
item_sanitized = re.escape(item)
search = re.compile(item_sanitized + ".*", re.IGNORECASE)
query = {"name": search, "flags": {"$nin": flags}, **filters}
number = await self.db[database].count_documents(query)
if not number:
await ctx.send(
"Your search gave me no results, sorry. Check for "
"typos.\nAlways use singular forms, e.g. Legendary Insight")
return None
cursor = self.db[database].find(query)
if number > limit: # TODO multiple selections for 125 items.
await ctx.send("Your search gave me {} item results. "
"Please be more specific".format(number))
return None
items = []
async for item in cursor:
items.append(item)
items.sort(key=lambda i: i["name"])
if group_duplicates:
distinct_items = consolidate_duplicates(items)
else:
for item in items:
item["ids"] = [item["_id"]]
distinct_items = items
if len(distinct_items) == 1:
if not prompt_user:
return distinct_items
return distinct_items, None
if not prompt_user:
return distinct_items
rows = []
options = []
for i, item in enumerate(
sorted(distinct_items, key=lambda c: c["name"]), 1):
if not i % limit:
rows.append(options)
options = []
emoji = self.get_emoji(ctx, item["type"], return_obj=True)
options.append(
create_select_option(item["name"],
i - 1,
description=item["rarity"],
emoji=emoji or None))
rows.append(options)
action_rows = []
for row in rows:
ph = placeholder
if len(rows) > 1:
first_letter = row[0]["label"][0]
last_letter = row[-1]["label"][0]
if first_letter != last_letter:
ph += f" [{first_letter}-{last_letter}]"
else:
ph += f" [{first_letter}]"
action_rows.append(
create_actionrow(
create_select(row,
min_values=1,
max_values=1,
placeholder=ph)))
if len(rows) > 1:
content = "Due to Discord limitations, your selection had been split into several."
else:
content = "** **"
if component_context:
await component_context.edit_origin(content=content,
components=action_rows)
else:
msg = await ctx.send(content,
components=action_rows,
hidden=hidden)
def tell_off(answer):
self.bot.loop.create_task(
answer.send("Only the command owner may do that.",
hidden=True))
try:
while True:
answer = await wait_for_component(self.bot,
components=action_rows,
timeout=120)
if answer.author != ctx.author:
tell_off(answer)
continue
index = int(answer.selected_options[0])
return distinct_items[index], answer
except asyncio.TimeoutError:
if component_context:
await component_context.edit_origin(content="Timed out.",
components=None)
else:
await msg.edit(content="Timed out.", components=None)
return None, None
# for item in items:
# if item["_id"] in choice["ids"]:
# if item["type"] == "UpgradeComponent":
# choice["is_upgrade"] = True
# return choice
async def selection_menu(self,
ctx,
cursor,
number,
*,
filter_callable=None):
# TODO implement fields
def check(m):
return m.channel == ctx.channel and m.author == ctx.author
if not number:
await ctx.send(
"Your search gave me no results, sorry. Check for "
"typos.\nAlways use singular forms, e.g. Legendary Insight")
return None
if number > 25:
await ctx.send("Your search gave me {} item results. "
"Please be more specific".format(number))
return None
items = []
async for item in cursor:
items.append(item)
key = "name"
if filter_callable:
items = filter_callable(items)
items.sort(key=lambda i: i[key])
options = []
if len(items) != 1:
for i, item in enumerate(items):
options.append(create_select_option(item[key], value=i))
select = create_select(min_values=1,
max_values=1,
options=options,
placeholder="Select the item you want")
components = [create_actionrow(select)]
msg = await ctx.send("** **", components=components)
try:
answer: ComponentContext = await self.bot.wait_for(
"component",
timeout=120,
check=lambda context: context.author == ctx.author and
context.origin_message.id == msg.id)
choice = items[int(answer.selected_options[0])]
await answer.defer(edit_origin=True)
return (choice, answer)
except asyncio.TimeoutError:
await msg.edit(content="No response in time", components=None)
return None
else:
choice = items[0]
return choice
async def get_historical_world_pop_data(self):
# This might break in the future, but oh well
url = "https://pop.apfelcreme.net/serverinfo.php?id={}"
cursor = self.db.worlds.find({})
async for world in cursor:
try:
world_id = world["_id"]
async with self.session.get(url.format(world_id)) as r:
data = await r.json()
for entry in data:
pop = self.population_to_int(entry["population"])
if not entry["time_stamp"]:
continue
date = datetime.datetime.fromtimestamp(
entry["time_stamp"] / 1000)
doc = {
"population": pop,
"world_id": world_id,
"date": date
}
await self.db.worldpopulation.replace_one(
{
"world_id": world_id,
"date": date
},
doc,
upsert=True)
print("added " + world["name"] + ": " + str(pop))
except Exception as e:
print(f"Unable to get data for world: {world['name']}\n{e}")
| 2.21875 | 2 |
syn_net/data_generation/_mp_make.py | lilleswing/SynNet | 14 | 12787758 | """
This file contains a function to generate a single synthetic tree, prepared for
multiprocessing.
"""
import pandas as pd
import numpy as np
# import dill as pickle
# import gzip
from syn_net.data_generation.make_dataset import synthetic_tree_generator
from syn_net.utils.data_utils import ReactionSet
path_reaction_file = '/pool001/whgao/data/synth_net/st_pis/reactions_pis.json.gz'
path_to_building_blocks = '/pool001/whgao/data/synth_net/st_pis/enamine_us_matched.csv.gz'
building_blocks = pd.read_csv(path_to_building_blocks, compression='gzip')['SMILES'].tolist()
r_set = ReactionSet()
r_set.load(path_reaction_file)
rxns = r_set.rxns
# with gzip.open(path_reaction_file, 'rb') as f:
# rxns = pickle.load(f)
print('Finish reading the templates and building blocks list!')
def func(_):
np.random.seed(_)
tree, action = synthetic_tree_generator(building_blocks, rxns, max_step=15)
return tree, action
| 2.953125 | 3 |
moya/testprojects/expose/site/py/exposed.py | moyaproject/moya | 129 | 12787759 | from __future__ import unicode_literals
from moya.expose import View
class TestView(View):
name = "hello"
def get(self, context):
return "Hello, World"
| 1.882813 | 2 |
src/auth.py | bplusv/ufs-casting-agency | 0 | 12787760 | <filename>src/auth.py
import os
from functools import wraps
import json
import enum
from flask import request, _request_ctx_stack, abort
from urllib.request import urlopen
from jose import jwt
class UserRole(enum.Enum):
CASTING_ASSISTANT = 1
CASTING_DIRECTOR = 2
EXECUTIVE_PRODUCER = 3
class Auth:
@staticmethod
def get_token_auth_header():
auth = request.headers.get('Authorization', None)
if not auth:
abort(401, description='Unauthorized: Authorization '
'header is expected')
parts = auth.split()
if parts[0].lower() != 'bearer':
abort(401, description='Unauthorized: Authorization header '
'must start with bearer')
elif len(parts) == 1:
abort(401, description='Unauthorized: Token not found')
elif len(parts) > 2:
abort(401, description='Unauthorized: Authorization header '
'must be bearer token')
token = parts[1]
return token
@staticmethod
def check_permissions(permission, payload):
if 'permissions' not in payload:
abort(403, description='Forbidden: Permissions payload missing')
if permission not in payload['permissions']:
abort(403, description='Forbidden: Not enough permissions')
return True
@staticmethod
def verify_decode_jwt(token):
AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
API_AUDIENCE = os.environ.get('API_AUDIENCE')
ALGORITHMS = os.environ.get('ALGORITHMS')
try:
jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')
jwks = json.loads(jsonurl.read())
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks['keys']:
if key['kid'] == unverified_header['kid']:
rsa_key = {
'kty': key['kty'],
'kid': key['kid'],
'use': key['use'],
'n': key['n'],
'e': key['e']
}
if rsa_key:
try:
payload = jwt.decode(
token,
rsa_key,
algorithms=ALGORITHMS,
audience=API_AUDIENCE,
issuer=f'https://{AUTH0_DOMAIN}/'
)
return payload
except jwt.ExpiredSignatureError:
abort(401, description='Unauthorized: JWT Token expired')
except jwt.JWTClaimsError:
abort(401, description='Unauthorized: JWT Incorrect '
'claims, please check')
except Exception:
abort(401, description='Unauthorized: JWT cannot parse token')
abort(401, description='Unauthorized: JWT unable to find kid key')
def requires_auth(permission=''):
def requires_auth_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = Auth.get_token_auth_header()
payload = Auth.verify_decode_jwt(token)
if Auth.check_permissions(permission, payload):
_request_ctx_stack.top.current_user = payload
return f(*args, **kwargs)
return wrapper
return requires_auth_decorator
| 2.5625 | 3 |
CLIStubs/SwitchBigCLI.py | vikin91/sdn-switches-benchmarking-framework | 0 | 12787761 | <gh_stars>0
import logging
from SSHConnection import SSHConnection
class SwitchBigCLI(SSHConnection):
def __init__(self, switch_description):
log_namespace = "Monitor.Switch"+switch_description["model"]+'CLI'
self.logger = logging.getLogger(log_namespace)
self.log_id = "Switch"+switch_description["model"]+":"+switch_description["ip"]+": "
SSHConnection.__init__(self,switch_description["ip"],switch_description["user"],
switch_description["password"],switch_description["cli_name"],log_namespace)
self.OF_INSTANCE = switch_description["openflow_instance_name"]
self.connect()
self.COMMAND_SHOW_CPU = "display cpu-usage"
self.COMMAND_RESET_OPENFLOW = ["openflow instance "+self.OF_INSTANCE, "undo active instance", "active instance", "quit"]
self.COMMAND_GET_FLOWS_COUNT = "display openflow instance "+str(self.OF_INSTANCE)
self.ENABLE_EXTENSIBILITY_TABLE = ["openflow instance "+self.OF_INSTANCE, "undo active instance", "flow-table extensibility 20", "active instance", "quit"]
self.ENABLE_MACIP_TABLE = ["openflow instance "+self.OF_INSTANCE, "undo active instance", "flow-table mac-ip 10", "active instance", "quit"]
self.executeCommand(["system-view"])
def resetOpenflowInstance(self):
if self.isConnected==False:
self.logger.error(self.log_id+" Not connected!")
return
command = self.COMMAND_RESET_OPENFLOW
if self.executeCommand(command):
self.logger.debug(self.log_id+"OpenFlow has been reset!")
else:
self.logger.error(self.log_id+" Error resetting OpenFlow. Connection error!")
def enableExtensibilityTable(self):
if self.isConnected==False:
self.logger.error(self.log_id+" Not connected!")
return
command = self.ENABLE_EXTENSIBILITY_TABLE
if self.executeCommand(command):
self.logger.debug(self.log_id+"Extensibility tables has been enabled, ID = 20!")
else:
self.logger.error(self.log_id+" Error changing OpenFlow table. Connection error!")
def enableMacIPTable(self):
if self.isConnected==False:
self.logger.error(self.log_id+" Not connected!")
return
command = self.ENABLE_MACIP_TABLE
if self.executeCommand(command):
self.logger.debug(self.log_id+"MAC-IP table has been enabled, ID = 10!")
else:
self.logger.error(self.log_id+" Error changing OpenFlow table. Connection error!")
def getFlowsCountOverCLI(self):
"""
:rtype: [hardware_flows_count]
"""
if(self.isConnected==False):
self.logger.error(self.log_id+"Not connected!")
return [0,0]
self.connection.sendline(self.COMMAND_GET_FLOWS_COUNT)
self.connection.expect('Flow-entry max-limit:')
s=self.connection.before
self.connection.sendline('q')
self.connection.expect(']')
try:
table_size = self.parseFlowsCountOutput(s)
return [table_size,0]
except:
self.logger.warning(self.log_id+"Unable to parse SHOW OPENFLOW output!")
return [0,0]
#
# def updateCPU(self):
# if(self.isConnected==False):
# self.logger.error("Not connected!")
# return 0
# if self.executeCommand([self.COMMAND_SHOW_CPU]):
# try:
# s = self.connection.before
# return self.parseCPUOutput(s)
# except:
# self.logger.warning("Unable to parse SHOW CPU output!")
# return 0
# else:
# self.logger.info(" Unable to obtain CPU load. Connection error!")
# return 0
def updateCPU(self):
self.connection.sendline(self.COMMAND_SHOW_CPU)
self.connection.expect('minutes')
s = self.connection.before
try:
cpu_load = self.parseCPUOutput(s)
return cpu_load
except:
self.logger.warning(self.log_id+"Unable to parse DISPLAY CPU-USAGE output!")
def executeCommand(self,command):
try:
for item in (command):
self.connection.sendline(item)
self.connection.expect(']')
return True
#except self.connection.TIMEOUT,self.connection.EOF:
except self.connection.EOF:
self.logger.error(self.log_id+"Switch CLI doesn't respond.")
self.logger.debug(self.log_id+"Reconnecting to the Switch CLI...")
self.connect()
return False
def parseCPUOutput(self,s):
lines = s.split()
if lines.__contains__("seconds"):
i = lines.index('seconds')
obj = lines[i-4]
obj = obj.split('%')
cpu_load = obj[0]
return int(cpu_load)
return 0
def parseFlowsCountOutput(self,s):
lines = s.split()
try:
return int(lines[len(lines)-1])
except:
self.logger.error(self.log_id+" Unable to parse SHOW OPENFLOW output")
| 2.40625 | 2 |
main.py | schwarz/reddit-vip-flairs | 0 | 12787762 | <filename>main.py
"""
Automatically flair submissions with comments by interesting people.
"""
from dotenv import load_dotenv, find_dotenv
import logging
import praw
import os
def main():
logging.basicConfig(format='{asctime} - {name} - [{levelname}] {message}', style='{')
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
load_dotenv(find_dotenv())
client_id = os.getenv('VIP_CLIENT_ID')
client_secret = os.getenv('VIP_CLIENT_SECRET')
flair_text = os.getenv('VIP_FLAIR_TEXT')
flair_text_sep = os.getenv('VIP_FLAIR_TEXT_SEP', ' | ')
username = os.getenv('VIP_USERNAME')
password = os.getenv('VIP_PASSWORD')
vips = os.getenv('VIP_VIPS').split(',')
subreddit = os.getenv('VIP_SUBREDDIT')
reddit = praw.Reddit(client_id=client_id, client_secret=client_secret,
password=password, user_agent='reddit-vip-flairs',
username=username)
for comment in reddit.subreddit(subreddit).stream.comments():
submission = comment.submission
if (comment.author in vips and
(not submission.link_flair_text or flair_text not in submission.link_flair_text)):
log.debug('found comment {} by u/{} on {}'.format(comment.id, comment.author, submission.id))
new_text = flair_text
# Append if flair text already set
if submission.link_flair_text:
new_text = "{}{}{}".format(
submission.link_flair_text,
flair_text_sep, flair_text)
try:
data = {'link': submission.fullname, 'text': new_text,
'css_class': submission.link_flair_css_class}
reddit.post('/r/{}/api/flair'.format(subreddit), data)
log.info('assigned flair to submission {} triggered by comment {}'.format(comment.link_id, comment.id))
except:
log.error('flair update triggered by comment {} failed'.format(comment.id))
if __name__ == "__main__":
main()
| 3.03125 | 3 |
nbexchange/plugin/list.py | jgwerner/nbexchange | 7 | 12787763 | import glob
import json
import os
import re
import sys
from urllib.parse import quote, quote_plus
import nbgrader.exchange.abc as abc
from dateutil import parser
from traitlets import Bool, Unicode
from .exchange import Exchange
# "outbound" is files released by instructors (.... but there may be local copies!)
# "inbound" is files submitted by students (on external service)
# "cached" is files submitted by students & collected by instructors (so on local disk)
class ExchangeList(abc.ExchangeList, Exchange):
def do_copy(self, src, dest):
pass
fetched_root = Unicode("", help="Root location for files to be fetched into")
# the list of assignments the exchange knows about
assignments = []
# for filtering on-disk items from exchange items
# (eg removed 'released' items if the 'fetched' item is on disk)
seen_assignments = {"fetched": [], "collected": []}
def query_exchange(self):
"""
This queries the database for all the assignments for a course
if self.inbound or self.cached are true, it returns all the 'submitted'
items, else it returns all the 'released' ones.
(it doesn't care about feedback or collected actions)
"""
if self.course_id:
"""List assignments for specific course"""
r = self.api_request(f"assignments?course_id={quote_plus(self.course_id)}")
else:
"""List assignments for all courses"""
r = self.api_request(f"assignments")
self.log.debug(f"Got back {r} when listing assignments")
try:
assignments = r.json()
except json.decoder.JSONDecodeError:
self.log.error(f"Got back an invalid response when listing assignments")
return []
return assignments["value"]
def init_src(self):
pass
# sets self.assignments to be the list of assignment records that match the
# released/submitted/cached criteria configured
def init_dest(self):
course_id = self.course_id if self.course_id else "*"
assignment_id = (
self.coursedir.assignment_id if self.coursedir.assignment_id else "*"
)
self.assignments = []
exchange_listed_assignments = self.query_exchange()
self.log.debug(
f"ExternalExchange.list.init_dest collected {exchange_listed_assignments}"
)
# if "inbound", looking for inbound (submitted) records
# elif 'cached', looking for already downloaded files
# else, looking for outbound (released) files
if self.inbound or self.cached:
for assignment in exchange_listed_assignments:
if assignment.get("status") == "submitted":
self.assignments.append(assignment)
else:
self.assignments = filter(
lambda x: x.get["status"] == "released", exchange_listed_assignments
)
def copy_files(self):
pass
# Add the path for notebooks on disk, and add the blank parameters
# Feedback details is listed in "submitted" records
def parse_assignment(self, assignment): # , on_disk_assignments=None):
# If the assignment was found on disk, we need to expand the metadata
if assignment.get("status") == "fetched":
# get the individual notebook details
assignment_dir = os.path.join(
self.assignment_dir, assignment.get("assignment_id")
)
if self.path_includes_course:
assignment_dir = os.path.join(
self.assignment_dir, self.course_id, assignment.get("assignment_id")
)
assignment["notebooks"] = []
# Find the ipynb files
for notebook in sorted(glob.glob(os.path.join(assignment_dir, "*.ipynb"))):
notebook_id = os.path.splitext(os.path.split(notebook)[1])[0]
assignment["notebooks"].append(
{
"path": notebook,
"notebook_id": notebook_id,
"has_local_feedback": False,
"has_exchange_feedback": False,
"local_feedback_path": None,
"feedback_updated": False,
}
)
return assignment
def parse_assignments(self):
# Set up some general variables
self.assignments = []
held_assignments = {"fetched": {}, "released": {}}
assignment_dir = os.path.join(self.assignment_dir)
if self.path_includes_course:
assignment_dir = os.path.join(self.assignment_dir, self.course_id)
course_id = self.course_id if self.course_id and self.course_id != "*" else None
assignment_id = (
self.coursedir.assignment_id
if self.coursedir.assignment_id and self.coursedir.assignment_id != "*"
else None
)
student_id = (
self.coursedir.student_id
if self.coursedir.student_id and self.coursedir.student_id != "*"
else None
)
# Get a list of everything from the exchange
exchange_listed_assignments = self.query_exchange()
# if "inbound" or "cached" are true, we're looking for inbound
# (submitted) records else we're looking for outbound (released)
# records
# (everything else is irrelevant for this method)
if self.inbound or self.cached:
for assignment in exchange_listed_assignments:
if assignment.get("status") == "submitted":
self.assignments.append(assignment)
else:
for assignment in exchange_listed_assignments:
if assignment.get("status") == "released":
self.assignments.append(assignment)
# We want to check the local disk for "fetched" items, not what the external server
# says we should have
interim_assignments = []
found_fetched = set([])
for assignment in self.assignments:
assignment_directory = (
self.fetched_root + "/" + assignment.get("assignment_id")
)
if assignment["status"] == "released":
# Has this release already been found on disk?
if assignment["assignment_id"] in found_fetched:
continue
# Check to see if the 'released' assignment is on disk
if os.path.isdir(assignment_directory):
assignment["status"] = "fetched"
# lets just take a note of having found this assignment
found_fetched.add(assignment["assignment_id"])
interim_assignments.append(self.parse_assignment(assignment))
self.log.debug(
f"parse_assignment singular assignment returned: {assignment}"
)
# now we build two sub-lists:
# - the last "released" per assignment_id - but only if they've not been "fetched"
#
my_assignments = []
for assignment in interim_assignments:
# Skip those not being seen
if assignment is None:
continue
assignment_directory = (
self.fetched_root + "/" + assignment.get("assignment_id")
)
# Hang onto the fetched assignment, if there is one
# Note, we'll only have a note of the _first_ one - but that's fine
# as the timestamp is irrelevant... we just need to know if we
# need to look to the local disk
if assignment.get("status") == "fetched":
held_assignments["fetched"][
assignment.get("assignment_id")
] = assignment
continue
# filter out all the released items:
if assignment.get("status") == "released":
# This is complicated:
# - If the user has "fetched" the assignment, don't keep it
# - otherwise keep the latest one
if assignment.get("assignment_id") in held_assignments["fetched"]:
continue
else:
latest = held_assignments["released"].get(
assignment.get("assignment_id"),
{"timestamp": "1990-01-01 00:00:00"},
)
if assignment.get("timestamp") > latest.get("timestamp"):
held_assignments["released"][
assignment.get("assignment_id")
] = assignment
continue
# "Submitted" assignments [may] have feedback
# If they do, we need to promote details of local [on disk] feedback
# to the "assignment" level. It would have been nice to match
# sumbission times to feedback directories.
# Note that the UI displays the "submitted" time in the table, but
# will provide a link to a folder that is the "feedback" time
# ("feedback-time" for all notebooks in one 'release' is the same)
if assignment.get("status") == "submitted":
assignment_dir = os.path.join(
assignment.get("assignment_id"), "feedback"
)
if self.path_includes_course:
assignment_dir = os.path.join(
self.course_id, assignment.get("assignment_id"), "feedback"
)
local_feedback_dir = None
local_feedback_path = None
has_local_feedback = False
has_exchange_feedback = False
feedback_updated = False
for notebook in assignment["notebooks"]:
nb_timestamp = notebook["feedback_timestamp"]
# This has to match timestamp in fetch_feedback.download
if nb_timestamp:
# get the individual notebook details
if os.path.isdir(
os.path.join(
assignment_dir,
nb_timestamp,
)
):
local_feedback_path = os.path.join(
assignment_dir,
quote(nb_timestamp),
f"{notebook['notebook_id']}.html",
)
has_local_feedback = os.path.isfile(
os.path.join(
assignment_dir,
nb_timestamp,
f"{notebook['notebook_id']}.html",
)
)
notebook["has_local_feedback"] = has_local_feedback
notebook["local_feedback_path"] = local_feedback_path
# Set assignment-level variables is any not the individual notebooks
# have them
if assignment["notebooks"]:
has_local_feedback = any(
[nb["has_local_feedback"] for nb in assignment["notebooks"]]
)
has_exchange_feedback = any(
[nb["has_exchange_feedback"] for nb in assignment["notebooks"]]
)
feedback_updated = any(
[nb["feedback_updated"] for nb in assignment["notebooks"]]
)
else:
has_local_feedback = False
has_exchange_feedback = False
feedback_updated = False
assignment["has_local_feedback"] = has_local_feedback
assignment["has_exchange_feedback"] = has_exchange_feedback
assignment["feedback_updated"] = feedback_updated
if has_local_feedback:
assignment["local_feedback_path"] = os.path.join(
assignment_dir,
quote(nb_timestamp),
)
else:
assignment["local_feedback_path"] = None
# We keep everything we've not filtered out
my_assignments.append(assignment)
# concatinate the "released" and "fetched" sublists to my_assignments
for assignment_type in ("released", "fetched"):
if held_assignments[assignment_type].items():
for assignment_id in held_assignments[assignment_type]:
my_assignments.append(
held_assignments[assignment_type][assignment_id]
)
if self.inbound or self.cached:
_get_key = lambda info: (
info["course_id"],
info["student_id"],
info["assignment_id"],
)
_match_key = lambda info, key: (
info["course_id"] == key[0]
and info["student_id"] == key[1]
and info["assignment_id"] == key[2]
)
assignment_keys = sorted(
list(set([_get_key(info) for info in my_assignments]))
)
assignment_submissions = []
for key in assignment_keys:
submissions = [x for x in my_assignments if _match_key(x, key)]
submissions = sorted(submissions, key=lambda x: x["timestamp"])
info = {
"course_id": key[0],
"student_id": key[1],
"assignment_id": key[2],
"status": submissions[0]["status"],
"submissions": submissions,
}
assignment_submissions.append(info)
my_assignments = assignment_submissions
else:
my_assignments = [
x for x in my_assignments if x.get("status") != "submitted"
]
return my_assignments
def list_files(self):
"""List files"""
self.log.debug(f"ExchaneList.list_file starting")
assignments = self.parse_assignments()
return assignments
def remove_files(self):
if self.course_id:
"""Delete assignment"""
url = f"assignment?course_id={quote_plus(self.course_id)}&assignment_id={quote_plus(self.coursedir.assignment_id)}"
r = self.api_request(url, method="DELETE")
self.log.debug(f"Got back {r.status_code} after assignment unrelease")
def start(self):
if self.path_includes_course:
self.coursedir.submitted_directory = os.path.join(
self.course_id, "collected"
)
r = self.course_id
else:
self.coursedir.submitted_directory = "collected"
r = "."
self.fetched_root = os.path.abspath(os.path.join("", r))
if self.remove:
return self.remove_files()
else:
return self.list_files()
| 2.5625 | 3 |
src/ansys/mapdl/core/xpl.py | Miiicah/pymapdl | 1 | 12787764 | """Contains the ansXpl class."""
import json
import pathlib
import random
import string
import weakref
from ansys.api.mapdl.v0 import mapdl_pb2
import numpy as np
from .common_grpc import ANSYS_VALUE_TYPE
from .errors import MapdlRuntimeError
def id_generator(size=6, chars=string.ascii_uppercase):
"""Generate a random string using only uppercase letters."""
return "".join(random.choice(chars) for _ in range(size))
MYCTYPE = {
np.int32: "I",
np.int64: "L",
np.single: "F",
np.double: "D",
np.complex64: "C",
np.complex128: "Z",
}
class ansXpl:
"""
ANSYS database explorer.
Examples
--------
>>> from ansys.mapdl.core import launch_mapdl
>>> mapdl = launch_mapdl()
>>> xpl = mapdl.xpl
Open a mode file and extract a vector.
>>> xpl.open('file.mode')
>>> vec = xpl.read('MASS')
>>> vec.asarray()
array([ 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43,
46, 49, 52, 55, 58, 1], dtype=int32)
"""
def __init__(self, mapdl):
"""Initialize the class."""
from ansys.mapdl.core.mapdl_grpc import MapdlGrpc
if not isinstance(mapdl, MapdlGrpc): # pragma: no cover
raise TypeError("Must be initialized using MapdlGrpc class")
self._mapdl_weakref = weakref.ref(mapdl)
self._filename = None
self._open = False
@property
def _mapdl(self):
"""Return the weakly referenced instance of mapdl."""
return self._mapdl_weakref()
def open(self, filename, option=""):
"""
Open an MAPDL file to explore.
Parameters
----------
filename : str
Name of the file to open.
Returns
-------
str
Response from MAPDL.
Examples
--------
>>> xpl.open('file.mode')
===============================================
===== ANSYS File Xplorer ======
===============================================
Opening the file.mode ANSYS File
"""
self._filename = filename
out = self._mapdl.run(f"*XPL,OPEN,{filename},,{option}")
self._open = True
return out
def close(self):
"""
Close the MAPDL file after opening.
Returns
-------
str
Response from MAPDL.
Examples
--------
>>> xpl.open("file.mode")
>>> xpl.close()
===== ANSYS File Xplorer : Close the file.mode ANSYS File
"""
response = self._mapdl.run("*XPL,CLOSE")
self._check_ignored(response)
self._open = False
return response
def list(self, nlev=1):
"""
List the records at the current level.
Parameters
----------
nlev: int
Number of levels to recursively explore.
Returns
-------
str
Listing of records from the current level.
Examples
--------
Open a full file and list the current records.
>>> xpl.open("file.full")
>>> xpl.list()
===== ANSYS File Xplorer : List Blocks in File file.full
::FULL::HEADER Size = 652 B Total Size = 180.297 KB
::FULL::DOFSBYNOD Size = 24 B
::FULL::BACK Size = 336 B
::FULL::STIFF::HEADER Size = 117.316 KB
::FULL::RHS Size = 1.910 KB
::FULL::DIAGK Size = 1.910 KB
::FULL::SCLK Size = 1.910 KB
::FULL::MRK Size = 984 B
::FULL::NODEEXT Size = 336 B
::FULL::PCGDOFS Size = 984 B
::FULL::BCDOFS Size = 984 B
::FULL::BCVALUES Size = 12 B
::FULL::MASS::HEADER Size = 50.801 KB
::FULL::DIAGM Size = 1.910 KB
::FULL::NGPH Size = 336 B
"""
response = self._mapdl.run(f"*XPL,LIST,{nlev}")
self._check_ignored(response)
return response
def _check_ignored(self, response):
"""Check for ignored in response."""
if "ignored" in response:
raise MapdlRuntimeError(response)
def help(self):
"""
XPL help message.
Examples
--------
>>> print(xpl.help())
"""
return self._mapdl.run("*XPL,HELP")
def step(self, where):
"""
Go down in the tree of records
Parameters
----------
where : str
Path to follow. This path can be composed of several
levels, for example ``"BRANCH1::SUBBRANCH2::.."``
Returns
-------
str
Response from MAPDL.
Examples
--------
>>> xpl.step('MASS')
>>> print(xpl.where())
===== ANSYS File Xplorer : Display Current Location
Current Location : FULL::MASS
File Location : 7644
"""
response = self._mapdl.run(f"*XPL,STEP,{where}")
if "Not Found" in response:
raise RuntimeError(response.strip())
return response
def info(self, recname, option=""):
"""
Gives details on a specific record, or all records (using ``"*"``)
Parameters
----------
recname : str
Record of interest
option : str
Options string.
Returns
-------
str
Response from MAPDL.
Examples
--------
>>> xpl.open('file.full')
>>> print(xpl.info('NGPH'))
===== ANSYS File Xplorer : Information about Block NGPH
::NGPH Size = 6.289 KB
- Record Size : 81
- Data type : integer values
"""
return self._mapdl.run(f"*XPL,INFO,{recname},{option}")
def print(self, recname):
"""
Print values of a given records, or all records (using ``"*"``).
Parameters
----------
recname : str
Record of interest
option : str
Options string.
Returns
-------
str
Response from MAPDL.
Examples
--------
>>> xpl.open('file.full')
>>> print(xpl.print('DOFSBYNOD'))
===== ANSYS File Xplorer : Print Block DOFSBYNOD
DOFSBYNOD :
Size : 3
1 2 3
"""
return self._mapdl.run(f"*XPL,PRINT,{recname}")
def json(self):
"""
Return a JSON representation of the tree or records.
Examples
--------
>>> xpl.json()
{'name': 'FULL',
'children': [{'name': 'DOFSBYNOD', 'size': 24},
{'name': 'BACK', 'size': 336},
{'name': 'STIFF', 'size': 120132},
{'name': 'RHS', 'size': 1956},
{'name': 'DIAGK', 'size': 1956},
{'name': 'SCLK', 'size': 36},
{'name': 'NODEEXT', 'size': 32},
{'name': 'PCGDOFS', 'size': 984},
{'name': 'BCDOFS', 'size': 984},
{'name': 'BCVALUES', 'size': 20},
{'name': 'MASS', 'size': 52020},
{'name': 'DIAGM', 'size': 1236},
{'name': 'NGPH', 'size': 6440}]}
"""
self._mapdl.run("*XPL,JSON,_mylocal_.json")
text = self._mapdl._download_as_raw("_mylocal_.json").decode()
return json.loads(text)
def where(self):
"""
Returns the current location in the MAPDL file.
Returns
-------
str
String containing the current location.
Examples
--------
>>> print(xpl.where())
===== ANSYS File Xplorer : Display Current Location
Current Location : FULL
File Location : 412
"""
return self._mapdl.run("*XPL,WHERE")
def up(self, nlev=1):
"""
Go up in the tree.
nlev : int
Number of levels to recursively go up, or TOP
Examples
--------
>>> print(xpl.up())
===== ANSYS File Xplorer : Go up to 1 level(s)
-> Already at the top level. Command is ignored
"""
if str(nlev).upper().strip() == "TOP":
return self._mapdl.run("*XPL,UP,TOP")
return self._mapdl.run(f"*XPL,UP,{nlev}")
def goto(self, path):
"""
Go directly to a new location in the file.
Parameters
----------
path : str
Absolute path to the new location.
Examples
--------
>>> print(xpl.goto('MASS'))
===== ANSYS File Xplorer : Go up to top level(s)
===== ANSYS File Xplorer : Step into Block MASS
"""
return self._mapdl.run(f"*XPL,GOTO,{path}")
def copy(self, newfile, option=""):
"""
Copy the current opened as a new file.
Parameters
----------
newfile : str
Name of the new file to create
option: str
Option.
Examples
--------
>>> xpl.copy('tmpfile.full')
===== ANSYS File Xplorer : Copy file.full ANSYS file to file tmpfile.full
>> Remove existing output file tmpfile.full
"""
return self._mapdl.run(f"*XPL,COPY,{newfile},{option}")
def save(self):
"""Save the current file, ignoring the marked records."""
response = self._mapdl.run("*XPL,SAVE").strip()
self._check_ignored(response)
return response
def extract(self, recordname, sets="ALL", asarray=False): # pragma: no cover
"""
Import a Matrix/Vector from a MAPDL result file.
At the moment, this only supports reading the displacement vectors from
a result file.
Parameters
----------
recordname : str
Record name. Currently only supports the ``"NSL"`` record,
displacement vectors.
sets : str or int
Number of sets. Can be ``"ALL"`` or the number of sets to load.
asarray : bool, optional
Return a :class:`numpy.ndarray` rather than a :class:`AnsMat
<ansys.mapdl.core.math.AnsMat>`. Default ``False``.
Returns
-------
numpy.ndarray or ansys.mapdl.core.math.AnsMat
A :class:`numpy.ndarray` or :class:`AnsMat
<ansys.mapdl.core.math.AnsMat>` of the displacement vectors,
depending on the value of ``asarray``.
Notes
-----
This only works on the ``"NSL"`` record of MAPDL result files.
Examples
--------
First, open a result file and extract the displacement vectors for all
sets.
>>> xpl.open("file.rst")
>>> mat = xpl.extract("NSL")
>>> mat
Dense APDLMath Matrix (243, 10)
Convert to a dense numpy array
>>> arr = mat.asarray()
>>> arr
array([[-9.30806802e-03, -2.39600770e-02, -5.37856729e-03, ...,
-5.61188243e-03, -7.17686067e-11, 3.71893252e-03],
[-1.60960014e-02, 2.00410618e-02, 8.05822565e-03, ...,
-1.26917511e-02, -5.14133724e-11, -1.38783485e-03],
[ 2.54040694e-02, 3.91901513e-03, -2.67965796e-03, ...,
-1.46365178e-02, 8.31735188e-11, -2.33109771e-03],
...,
[-2.80679551e-03, -1.45686692e-02, 8.05466291e-03, ...,
5.88196684e-03, 1.72211103e-02, 6.10079082e-03],
[-7.06675717e-03, 1.30455037e-02, -6.31685295e-03, ...,
1.08619340e-02, -1.72211102e-02, 2.52199472e-03],
[ 2.29726170e-02, 3.54392176e-03, -1.87020162e-03, ...,
1.20642736e-02, 2.58299321e-11, 9.14504940e-04]])
"""
if recordname.upper() != "NSL":
raise ValueError("Currently, the only supported recordname is 'NSL'")
rand_name = id_generator()
self._mapdl._log.info(
"Calling MAPDL to extract the %s matrix from %s", recordname, self._filename
)
num_first = 1
num_last = 1
if sets == "ALL":
num_last = -1
dtype = np.double
file_extension = pathlib.Path(self._filename).suffix[1:]
if file_extension.lower() != "rst":
raise RuntimeError(
"This method only supports extracting records from result files"
)
self._mapdl.run(
f"*DMAT,{rand_name},{MYCTYPE[dtype]},IMPORT,{file_extension},{self._filename},"
f"{num_first},{num_last},{recordname}",
mute=False,
)
return self._mapdl.math.mat(dtype=dtype, name=rand_name)
def read(self, recordname):
"""
Read a record and return either an APDL math matrix or an APDL math vector.
Returns
-------
ansys.mapdl.AnsMat or ansys.mapdl.AnsVec
A handle to the APDLMath object.
Examples
--------
>>> vec = xpl.read('MASS')
>>> vec.asarray()
array([ 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43,
46, 49, 52, 55, 58, 1], dtype=int32)
"""
rand_name = id_generator()
response = self._mapdl.run(f"*XPL,READ,{recordname},{rand_name}")
self._check_ignored(response)
data_info = self._mapdl._data_info(rand_name)
dtype = ANSYS_VALUE_TYPE[data_info.stype]
if dtype is None: # pragma: no cover
raise ValueError("Unknown MAPDL data type")
# return either vector or matrix type
if data_info.objtype == mapdl_pb2.DataType.VEC:
return self._mapdl.math.vec(dtype=dtype, name=rand_name)
elif data_info.objtype in [mapdl_pb2.DataType.DMAT, mapdl_pb2.DataType.SMAT]:
return self._mapdl.math.mat(dtype=dtype, name=rand_name)
else: # pragma: no cover
raise ValueError(f"Unhandled MAPDL matrix object type {data_info.objtype}")
def write(self, recordname, vecname):
"""
Write a given record back to an MAPDL file.
Use the write function at your own risk, you may corrupt an existing
file by changing the size of a record in the file. This method must be
used only on a non-compressed file.
Parameters
----------
recordname : str
Name of the record you want to overwrite. Your position
in the file must be set accordingly to this record location
(same as if you want to read it).
vecname : str
Name of the APDLMath vector you want to write in the MAPDL
file. Its size must be consistent with the existing record.
Returns
-------
str
Response from MAPDL.
Examples
--------
>>> xpl.write('MASS', vecname)
"""
response = self._mapdl.run(f"*XPL,WRITE,{recordname},{vecname}")
self._check_ignored(response)
return response
def __repr__(self):
txt = "MAPDL File Explorer\n"
if self._open:
txt += "\tOpen file:%s" % self._filename
txt += "\n".join(self.where().splitlines()[1:])
else:
txt += "\tNo open file"
return txt
| 2.40625 | 2 |
rnnt/args.py | lahiruts/Online-Speech-Recognition | 201 | 12787765 | <reponame>lahiruts/Online-Speech-Recognition<gh_stars>100-1000
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('name', 'rnn-t-v5', help='session name')
flags.DEFINE_enum('mode', 'train', ['train', 'resume', 'eval'], help='mode')
flags.DEFINE_integer('resume_step', None, help='model step')
# dataset
flags.DEFINE_string('LibriSpeech_train_100',
"../librispeech/LibriSpeech/train-clean-100",
help='LibriSpeech train')
flags.DEFINE_string('LibriSpeech_train_360',
"../librispeech/LibriSpeech/train-clean-360",
help='LibriSpeech train')
flags.DEFINE_string('LibriSpeech_train_500',
"../librispeech/LibriSpeech/train-other-500",
help='LibriSpeech train')
flags.DEFINE_string('LibriSpeech_test',
"../librispeech/LibriSpeech/test-clean",
help='LibriSpeech test')
flags.DEFINE_string('LibriSpeech_dev',
"../librispeech/LibriSpeech/dev-clean",
help='LibriSpeech dev')
flags.DEFINE_string('TEDLIUM_train',
"../speech_data/TEDLIUM/TEDLIUM_release1/train",
help='TEDLIUM 1 train')
flags.DEFINE_string('TEDLIUM_test',
"../speech_data/TEDLIUM/TEDLIUM_release1/test",
help='TEDLIUM 1 test')
flags.DEFINE_string('CommonVoice', "../speech_data/common_voice",
help='common voice')
flags.DEFINE_string('YT_bloomberg2', "../speech_data/common_voice",
help='common voice')
flags.DEFINE_string('YT_life', "../speech_data/common_voice",
help='common voice')
flags.DEFINE_integer('num_workers', 4, help='dataloader workers')
# learning
flags.DEFINE_bool('use_pretrained', default=False, help='Use pretrained enncoder')
flags.DEFINE_enum('optim', "adam", ['adam', 'sgd', 'sm3'], help='optimizer')
flags.DEFINE_float('lr', 1e-4, help='initial lr')
flags.DEFINE_bool('sched', True, help='lr reduce rate on plateau')
flags.DEFINE_integer('sched_patience', 1, help='lr reduce rate on plateau')
flags.DEFINE_float('sched_factor', 0.5, help='lr reduce rate on plateau')
flags.DEFINE_float('sched_min_lr', 1e-6, help='lr reduce rate on plateau')
flags.DEFINE_integer('warmup_step', 10000, help='linearly warmup lr')
flags.DEFINE_integer('epochs', 30, help='epoch')
flags.DEFINE_integer('batch_size', 8, help='batch size')
flags.DEFINE_integer('sub_batch_size', 8, help='accumulate batch size')
flags.DEFINE_integer('eval_batch_size', 4, help='evaluation batch size')
flags.DEFINE_float('gradclip', None, help='clip norm value')
# encoder
flags.DEFINE_string('enc_type', 'LSTM', help='encoder rnn type')
flags.DEFINE_integer('enc_hidden_size', 600, help='encoder hidden dimension')
flags.DEFINE_integer('enc_layers', 4, help='encoder layers')
flags.DEFINE_integer('enc_proj_size', 600, help='encoder layers')
flags.DEFINE_float('enc_dropout', 0, help='encoder dropout')
# decoder
flags.DEFINE_integer('dec_hidden_size', 150, help='decoder hidden dimension')
flags.DEFINE_integer('dec_layers', 2, help='decoder layers')
flags.DEFINE_integer('dec_proj_size', 150, help='encoder layers')
flags.DEFINE_float('dec_dropout', 0., help='decoder dropout')
# joint
flags.DEFINE_integer('joint_size', 512, help='Joint hidden dimension')
# tokenizer
flags.DEFINE_enum('tokenizer', 'char', ['char', 'bpe'], help='tokenizer')
flags.DEFINE_integer('bpe_size', 256, help='BPE vocabulary size')
flags.DEFINE_integer('vocab_embed_size', 16, help='vocabulary embedding size')
# data preprocess
flags.DEFINE_float('audio_max_length', 14, help='max length in seconds')
flags.DEFINE_enum('feature', 'mfcc', ['mfcc', 'melspec', 'logfbank'],
help='audio feature')
flags.DEFINE_integer('feature_size', 80, help='mel_bins')
flags.DEFINE_integer('n_fft', 400, help='spectrogram')
flags.DEFINE_integer('win_length', 400, help='spectrogram')
flags.DEFINE_integer('hop_length', 200, help='spectrogram')
flags.DEFINE_bool('delta', False, help='concat delta and detal of dealt')
flags.DEFINE_bool('cmvn', False, help='normalize spectrogram')
flags.DEFINE_integer('downsample', 3, help='downsample audio feature')
flags.DEFINE_integer('T_mask', 50, help='downsample audio feature')
flags.DEFINE_integer('T_num_mask', 2, help='downsample audio feature')
flags.DEFINE_integer('F_mask', 5, help='downsample audio feature')
flags.DEFINE_integer('F_num_mask', 1, help='downsample audio feature')
# apex
flags.DEFINE_bool('apex', default=True, help='fp16 training')
flags.DEFINE_string('opt_level', 'O1', help='use mix precision')
# parallel
flags.DEFINE_bool('multi_gpu', False, help='DataParallel')
# log
flags.DEFINE_integer('loss_step', 5, help='frequency to show loss in pbar')
flags.DEFINE_integer('save_step', 10000, help='frequency to save model')
flags.DEFINE_integer('eval_step', 10000, help='frequency to save model')
flags.DEFINE_integer('sample_size', 20, help='size of visualized examples')
| 1.804688 | 2 |
nodel/framework/common.py | ary4n/nodel | 0 | 12787766 | import os
import random
import string
def create_init_file(base_dir):
open(os.path.join(base_dir, '__init__.py'), 'a').close()
def create_file(base_dir, name, other):
with open(os.path.join(base_dir, name), 'w') as f:
with open(other) as o:
f.write(o.read())
def create_git_ignore(base_dir):
path = os.path.join(base_dir, '.gitignore')
if not os.path.exists(path):
with open(path, 'w') as f:
f.write(
"*.py[co]\n*.egg*\nbuild\ncache\n.script\nconfig.json\n*.db\n*.log\n.project\n.pydevproject\n.settings\n*~\n\#*\#\n/.emacs.desktop\n/.emacs.desktop.lock\n.elc\nauto-save-list\ntramp\n.\#*\n*.swp\n*.swo\n.DS_Store\n._*\nThumbs.db\nDesktop.ini\n.idea\nnode_modules\n.env\nstatic")
pass
def generate_key():
return ''.join([random.SystemRandom().choice("{}{}{}".format(string.ascii_letters, string.digits, "!#$%&'()*+,-./:;<>?@[]^_{|}~")) for i in range(50)])
| 2.34375 | 2 |
inventory/test_inventory.py | detrout/htsworkflow | 0 | 12787767 | <reponame>detrout/htsworkflow<gh_stars>0
from __future__ import absolute_import, print_function
from django.test import TestCase
from django.test.utils import setup_test_environment, \
teardown_test_environment
from django.db import connection
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.encoding import smart_text
from rdflib import Graph, Literal, URIRef
from .models import Item, Vendor
from .inventory_factory import ItemFactory, LongTermStorageFactory
from samples.samples_factory import HTSUserFactory, LibraryFactory
from experiments.experiments_factory import FlowCellFactory
from encoded_client.rdfns import inventoryOntology, libraryOntology
def localhostNode(url):
return URIRef('http://localhost%s' % (url,))
class InventoryTestCase(TestCase):
def setUp(self):
self.password = '<PASSWORD>'
self.user = HTSUserFactory.create()
self.user.set_password(self.password)
self.user.save()
def test_item(self):
item = ItemFactory()
self.assertTrue(len(item.uuid), 32)
url = '/inventory/{}/'.format(item.uuid)
self.assertTrue(self.client.login(username=self.user.username,
password=self.password))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
content = smart_text(response.content)
model = Graph()
model.parse(data=content, format="rdfa", media_type="text/html", publicID=url)
itemNode = URIRef(url)
items = list(model.objects(itemNode, inventoryOntology['item_type']))
item_type = items[0].toPython()
self.assertEqual(item_type, item.item_type.name)
def test_itemindex(self):
item = ItemFactory()
fc1 = FlowCellFactory()
lib1 = LibraryFactory()
lts = LongTermStorageFactory(flowcell=fc1,
libraries=[lib1,],
storage_devices=[item,],)
url = reverse('itemtype_index',
kwargs={'name': item.item_type.name})
disk_url = reverse('item_summary_by_uuid',
kwargs={'uuid': item.uuid})
indexNode = localhostNode(url)
diskNode = localhostNode(disk_url)
self.assertTrue(self.client.login(username=self.user.username,
password=self.password))
flowcells = self.get_flowcells_from_content(url, indexNode, diskNode)
self.assertEqual(len(flowcells), 1)
flowcell_url = reverse('flowcell_detail',
kwargs={'flowcell_id': fc1.flowcell_id})
self.assertTrue(flowcells[0].endswith(flowcell_url))
def test_add_disk(self):
item = ItemFactory()
url = reverse('itemtype_index',
kwargs={'name': item.item_type.name})
disk_url = reverse('item_summary_by_uuid',
kwargs={'uuid': item.uuid})
indexNode = localhostNode(url)
diskNode = localhostNode(disk_url)
self.assertTrue(self.client.login(username=self.user.username,
password=self.password))
flowcells = self.get_flowcells_from_content(url, indexNode, diskNode)
self.assertEqual(len(flowcells), 0)
# step two link the flowcell
flowcell = FlowCellFactory(flowcell_id='22TWOAAXX')
link_url = reverse('link_flowcell_and_device',
args=(flowcell.flowcell_id,
item.barcode_id))
link_response = self.client.get(link_url)
self.assertEqual(link_response.status_code, 200)
flowcells = self.get_flowcells_from_content(url, indexNode, diskNode)
flowcell_url = reverse('flowcell_detail',
kwargs={'flowcell_id': flowcell.flowcell_id})
self.assertEqual(len(flowcells), 1)
self.assertTrue(flowcells[0].endswith(flowcell_url))
def test_add_disk_failed_flowcell(self):
item = ItemFactory()
url = reverse('itemtype_index', kwargs={'name': item.item_type.name})
disk_url = reverse('item_summary_by_uuid', kwargs={'uuid': item.uuid})
indexNode = localhostNode(url)
diskNode = localhostNode(disk_url)
self.assertTrue(self.client.login(username=self.user.username,
password=self.password))
flowcells = self.get_flowcells_from_content(url, indexNode, diskNode)
self.assertEqual(len(flowcells), 0)
# step two link the flowcell
flowcell_id = '33THRAAXX'
flowcell = FlowCellFactory(flowcell_id=flowcell_id +' (failed)')
link_url = reverse('link_flowcell_and_device',
args=(flowcell.flowcell_id, item.barcode_id))
link_response = self.client.get(link_url)
self.assertEqual(link_response.status_code, 200)
flowcells = self.get_flowcells_from_content(url, indexNode, diskNode)
self.assertEqual(len(flowcells), 1)
flowcell_url = reverse('flowcell_detail',
kwargs={'flowcell_id': flowcell_id})
self.assertTrue(flowcells[0].endswith(flowcell_url))
def get_flowcells_from_content(self, url, rootNode, diskNode):
model = Graph()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
content = smart_text(response.content)
model.parse(data=content, format="rdfa", media_type="text/html", publicID=rootNode)
targets = model.objects(diskNode, libraryOntology['flowcell_id'])
flowcells = [ str(x) for x in targets]
return flowcells
def suite():
from unittest import TestSuite, defaultTestLoader
suite = TestSuite()
suite.addTests(defaultTestLoader.loadTestsFromTestCase(InventoryTestCase))
return suite
if __name__ == "__main__":
from unittest import main
main(defaultTest="suite")
| 2.15625 | 2 |
apps/leaflet_ts/leaflet_ts/views/main.py | earthobservatory/displacement-ts-server | 0 | 12787768 | <gh_stars>0
from datetime import datetime
from subprocess import check_output
import hashlib
from flask import render_template, Blueprint, g, redirect, session, request, url_for, flash, abort
from flask_login import login_required, login_user, logout_user, current_user
from flask import request
from leaflet_ts import app, db, lm
from leaflet_ts.models.user import User
from leaflet_ts.lib.forms import LoginForm
from leaflet_ts.lib.ldap import ldap_user_verified
import re
mod = Blueprint('views/main', __name__)
# get public ip
PUBLIC_IP = check_output(
['dig', '@resolver1.opendns.com', '+short', 'myip.opendns.com']).strip()
@lm.user_loader
def load_user(username):
return User.query.get(username)
@app.before_request
def before_request():
g.user = current_user
@app.errorhandler(404)
def page_not_found(e):
error_msg = """Error code 404: Page doesn't exist. Please check the URL.
If you feel there is an issue with our application,
please contact geraldjohn.m.manipon__at__jpl.nasa.gov."""
return render_template(
'error.html',
title='HySDS Resource Management: Encountered Error',
current_year=datetime.now().year,
error_msg=error_msg), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template(
'error.html',
title='HySDS Resource Management: Encountered Error',
current_year=datetime.now().year,
error_msg="Error code 500: " + str(e)), 500
@app.errorhandler(501)
def unimplemented(e):
return render_template(
'error.html',
title='HySDS Resource Management: Encountered Error',
current_year=datetime.now().year,
error_msg="Error code 501: " + str(e)), 501
REG = re.compile(":\d*")
@app.after_request
def add_cors_header(response):
'''
Adds a CORs response header to the response
'''
host = request.headers.get("Host")
host = REG.sub("", host)
response.headers['Access-Control-Allow-Origin'] = host
return response
@mod.route('/login', methods=['GET', 'POST'])
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('views/main.index'))
form = LoginForm()
if form.validate_on_submit():
#session['remember_me'] = form.remember_me.data
username = form.username.data
password = <PASSWORD>.data
# authenticate ops user account
if username == app.config['OPS_USER']:
ops_passwd_hex = hashlib.sha224(password).hexdigest()
if app.config['OPS_PASSWORD_HASH'] == ops_passwd_hex:
ldap_info = {}
else:
ldap_info = None
else:
# for everyone else authenticate via LDAP
ldap_info = ldap_user_verified(username, password)
if ldap_info is not None:
user = load_user(username)
#app.logger.info('user loaded: %s' % user)
if user is None:
user = User()
user.id = form.username.data
user.ldap_info = ldap_info
db.session.add(user)
db.session.commit()
#app.logger.info('user: %s' % user)
login_user(user)
flash("Successfully authenticated.")
return redirect(
request.args.get('next') or url_for('views/main.index'))
flash("Error trying to authenticate.")
else:
for error in form.errors:
flash('%s: %s' % (error, '; '.join(form.errors[error])))
return render_template(
'login.html',
title='ARIA Time-Series',
form=form,
current_year=datetime.now().year)
@mod.route('/logout')
def logout():
logout_user()
flash("Successfully logged out.")
return redirect(url_for('views/main.index'))
@mod.route('/')
def index():
#app.logger.debug("Got here")
now = datetime.utcnow()
return render_template(
'index.html',
title='ARIA Time-Series',
last_updated=now.isoformat() + 'Z',
current_year=now.year)
| 2.234375 | 2 |
setup.py | bopopescu/railguns | 0 | 12787769 | <reponame>bopopescu/railguns<gh_stars>0
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='railguns',
version='0.1.9',
install_requires=[
'Django',
'djangorestframework<3.10',
'djangorestframework-jwt',
'djangorestframework-xml',
'django-rest-swagger',
#
'django-ckeditor',
'django-filter',
'django-htmlmin',
'itunes-iap',
'mysqlclient',
'pylibmc',
'redis',
#
'djangorestframework-camel-case'
],
packages=find_packages(),
include_package_data=True,
license='MIT',
description='Only My Railgun',
long_description=README,
url='https://github.com/larryhq/railguns',
author='larryhq',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| 1.53125 | 2 |
problemsets/Codeforces/Python/A691.py | juarezpaulino/coderemite | 0 | 12787770 | """
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
n=int(input())
a=input()
print('NYOE S'[(n<2 and a.count('1')) or (n>1 and a.count('0')==1)::2]) | 3.046875 | 3 |
Python/Exercicio013.py | BarbaraGomes97/Desafios.cpp | 0 | 12787771 | # Conversor de temperatura de C° para F°
import colorama
colorama.init()
print('\033[32;1mConversor de temperaturas\033[m')
temp = float(input('Digite a temperatura em C°: '))
print(f'{temp} C° é equivalente a {(9*temp/5)+32} F°')
| 3.53125 | 4 |
making_decisions/python/bmi_calculator.py | kssim/EFP | 1 | 12787772 | <filename>making_decisions/python/bmi_calculator.py
# Pratice 19. BMI Calculator
# Output:
# Your BMI is 19.5.
# You are within the ideal weight range.
# Or
# Your BMI is 32.5.
# You are overweight. You should see your doctor.
# Formula:
# bmi = (weight / (height x height)) x 703
# Standard:
# BMI 18.5 ~ 25 is nomal weight.
# Constraint:
# - Ensure your program takes only numeric data.
# Don't let the user continue unless the data is valid.
#!/usr/bin/env python
from __future__ import division
import sys
def input_process(in_question):
return input(in_question) if sys.version_info >= (3,0) else raw_input(in_question)
if __name__ == '__main__':
try:
weight = int(input_process('What is your weight(pound)? '))
height = int(input_process('What is your height(inch)? '))
except:
print ('You must input only numbers.')
else:
bmi_convert_value = 703
bmi_raw_data = float(weight / (height * height))
bmi = bmi_raw_data * bmi_convert_value
print ('Your BMI is %s' % bmi)
if bmi < 18.5:
print ('You are within the ideal weight range.')
elif bmi > 25:
print ('You are overweight. You should see your doctor.')
else:
print ('You are nomal weight.')
| 4.5 | 4 |
measures/querysets.py | uktrade/tamato | 14 | 12787773 | <reponame>uktrade/tamato<gh_stars>10-100
from django.contrib.postgres.aggregates import StringAgg
from django.db.models import Case
from django.db.models import CharField
from django.db.models import F
from django.db.models import Func
from django.db.models import Q
from django.db.models import QuerySet
from django.db.models import Value
from django.db.models import When
from django.db.models.functions import Concat
from django.db.models.functions.text import Trim
from common.fields import TaricDateRangeField
from common.models.records import TrackedModelQuerySet
class DutySentenceMixin(QuerySet):
def with_duty_sentence(self) -> QuerySet:
"""
Annotates the query set with a human-readable string that represents the
aggregation of all of the linked components into a single duty sentence.
This operation relies on the `prefix` and `abbreviation` fields being
filled in on duty expressions and units, which are not supplied by the
TARIC3 XML by default.
Strings output by this annotation should be valid input to the
:class:`~measures.parsers.DutySentenceParser`.
The annotated field will be generated using the below SQL:
.. code:: SQL
STRING_AGG(
TRIM(
CONCAT(
CASE
WHEN (
"measures_dutyexpression"."prefix" IS NULL
OR "measures_dutyexpression"."prefix" = ''
) THEN
ELSE CONCAT("measures_dutyexpression"."prefix",' ')
END,
CONCAT(
"measures_measureconditioncomponent"."duty_amount",
CONCAT(
CASE
WHEN (
"measures_measureconditioncomponent"."duty_amount" IS NOT NULL
AND "measures_measureconditioncomponent"."monetary_unit_id" IS NULL
) THEN '%'
WHEN "measures_measureconditioncomponent"."duty_amount" IS NULL THEN ''
ELSE CONCAT(' ', "measures_monetaryunit"."code")
END,
CONCAT(
CASE
WHEN "measures_measurementunit"."abbreviation" IS NULL THEN ''
WHEN "measures_measureconditioncomponent"."monetary_unit_id" IS NULL THEN "measures_measurementunit"."abbreviation"
ELSE CONCAT(' / ', "measures_measurementunit"."abbreviation")
END,
CASE
WHEN "measures_measurementunitqualifier"."abbreviation" IS NULL THEN
ELSE CONCAT(
' / ',
"measures_measurementunitqualifier"."abbreviation"
)
END
)
)
)
)
),
) AS "duty_sentence"
"""
return self.annotate(
duty_sentence=StringAgg(
expression=Trim(
Concat(
Case(
When(
Q(components__duty_expression__prefix__isnull=True)
| Q(components__duty_expression__prefix=""),
then=Value(""),
),
default=Concat(
F("components__duty_expression__prefix"),
Value(" "),
),
),
"components__duty_amount",
Case(
When(
components__monetary_unit=None,
components__duty_amount__isnull=False,
then=Value("%"),
),
When(
components__duty_amount__isnull=True,
then=Value(""),
),
default=Concat(
Value(" "),
F("components__monetary_unit__code"),
),
),
Case(
When(
Q(components__component_measurement=None)
| Q(
components__component_measurement__measurement_unit=None,
)
| Q(
components__component_measurement__measurement_unit__abbreviation=None,
),
then=Value(""),
),
When(
components__monetary_unit__isnull=True,
then=F(
"components__component_measurement__measurement_unit__abbreviation",
),
),
default=Concat(
Value(" / "),
F(
"components__component_measurement__measurement_unit__abbreviation",
),
),
),
Case(
When(
components__component_measurement__measurement_unit_qualifier__abbreviation=None,
then=Value(""),
),
default=Concat(
Value(" / "),
F(
"components__component_measurement__measurement_unit_qualifier__abbreviation",
),
),
),
output_field=CharField(),
),
),
delimiter=" ",
ordering="components__duty_expression__sid",
),
)
class MeasuresQuerySet(TrackedModelQuerySet, DutySentenceMixin):
def with_effective_valid_between(self):
"""
In many cases the measures regulation effective_end_date overrides the
measures validity range.
Annotate the queryset with the db_effective_valid_between based on the regulations and measure.
Generates the following SQL:
.. code:: SQL
SELECT *,
CASE
WHEN (
"regulations_regulation"."effective_end_date" IS NOT NULL AND
"measures_measure"."valid_between" @> "regulations_regulation"."effective_end_date"::timestamp WITH time zone AND
NOT Upper_inf("measures_measure"."valid_between")
) THEN Daterange(Lower("measures_measure"."valid_between"), "regulations_regulation"."effective_end_date", [])
WHEN (
"regulations_regulation"."effective_end_date" IS NOT NULL AND
Upper_inf("measures_measure"."valid_between")
) THEN "measures_measure"."valid_between"
WHEN (
"measures_measure"."terminating_regulation_id" IS NOT NULL AND
NOT Upper_inf("measures_measure"."valid_between")
) THEN "measures_measure"."valid_between"
WHEN "measures_measure"."generating_regulation_id" IS NOT NULL THEN Daterange(Lower("measures_measure"."valid_between"), "regulations_regulation"."effective_end_date", [])
ELSE "measures_measure"."valid_between"
END AS "db_effective_valid_between"
FROM "measures_measure"
INNER JOIN "regulations_regulation"
ON "measures_measure"."generating_regulation_id" = "regulations_regulation"."trackedmodel_ptr_id"
INNER JOIN "common_trackedmodel"
ON "measures_measure"."trackedmodel_ptr_id" = "common_trackedmodel"."id"
"""
return self.annotate(
db_effective_valid_between=Case(
When(
valid_between__upper_inf=False,
generating_regulation__effective_end_date__isnull=False,
valid_between__contains=F(
"generating_regulation__effective_end_date",
),
then=Func(
Func(F("valid_between"), function="LOWER"),
F("generating_regulation__effective_end_date"),
Value("[]"),
function="DATERANGE",
),
),
When(
valid_between__upper_inf=False,
generating_regulation__effective_end_date__isnull=False,
then=F("valid_between"),
),
When(
valid_between__upper_inf=False,
terminating_regulation__isnull=False,
then=F("valid_between"),
),
When(
generating_regulation__isnull=False,
then=Func(
Func(F("valid_between"), function="LOWER"),
F("generating_regulation__effective_end_date"),
Value("[]"),
function="DATERANGE",
),
),
default=F("valid_between"),
output_field=TaricDateRangeField(),
),
)
class MeasureConditionQuerySet(TrackedModelQuerySet, DutySentenceMixin):
def with_reference_price_string(self):
return self.annotate(
reference_price_string=Case(
When(
duty_amount__isnull=True,
then=Value(""),
),
default=Concat(
"duty_amount",
Case(
When(
monetary_unit__isnull=True,
then=Value(""),
),
default=Concat(
Value(" "),
F("monetary_unit__code"),
),
),
Case(
When(
condition_measurement__measurement_unit__code__isnull=True,
then=Value(""),
),
default=Concat(
Value(" "),
F("condition_measurement__measurement_unit__code"),
),
),
Case(
When(
condition_measurement__measurement_unit_qualifier__code__isnull=True,
then=Value(""),
),
default=Concat(
Value(" "),
F(
"condition_measurement__measurement_unit_qualifier__code",
),
),
),
output_field=CharField(),
),
),
)
| 2.171875 | 2 |
test_utils.py | jodietrich/wgan_domain_adaptation | 4 | 12787774 | import logging
import numpy as np
import tensorflow as tf
from collections import OrderedDict
import utils
from clf_model_multitask import predict
def get_latest_checkpoint_and_log(logdir, filename):
init_checkpoint_path = utils.get_latest_model_checkpoint_path(logdir, filename)
logging.info('Checkpoint path: %s' % init_checkpoint_path)
last_step = int(init_checkpoint_path.split('/')[-1].split('-')[-1])
logging.info('Latest step was: %d' % last_step)
return init_checkpoint_path
def evaluate_scores(true_labels, prediction, measures_dict):
scores_one_exp = OrderedDict()
for measure_name, measure in measures_dict.items():
logging.info('evaluating ' + measure_name)
logging.info(measure)
scores_one_exp[measure_name] = measure(y_true = np.asarray(true_labels), y_pred = np.asarray(prediction))
return scores_one_exp
def map_labels_to_list(labels, label_list):
# label_list is a python list with the labels
# map labels in range(len(label_list)) to the labels in label_list
# E.g. [0,0,1,1] becomes [0,0,2,2] (if 1 doesnt exist in the data)
# label gets mapped to label_list[label]
label_lookup = tf.constant(np.array(label_list))
return tf.gather(label_lookup, labels)
def build_clf_graph(img_tensor_shape, clf_config, joint=False):
graph_classifier = tf.Graph()
with graph_classifier.as_default():
# image (batch size = 1)
x_clf_pl = tf.placeholder(tf.float32, img_tensor_shape, name='z')
# classification of the real source image and the fake target image
predicted_labels, softmax, age_softmaxs = predict(x_clf_pl, clf_config)
# scope = tf.get_variable_scope()
# scope.reuse_variables()
# map labels in range(len(label_list)) to the labels in label_list
# E.g. [0,0,1,1] becomes [0,0,2,2] (if 1 doesnt exist in the data)
predicted_labels_mapped = map_labels_to_list(predicted_labels, clf_config.label_list)
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a savers for writing training checkpoints.
saver = tf.train.Saver() # disc loss is scaled negative EM distance
predictions = {'label': predicted_labels_mapped, 'diag_softmax': softmax, 'age_softmaxs': age_softmaxs}
return graph_classifier, x_clf_pl, predictions, init, saver
def build_gen_graph(img_tensor_shape, gan_config):
# noise_shape
generator = gan_config.generator
graph_generator = tf.Graph()
with graph_generator.as_default():
# source image (batch size = 1)
xs_pl = tf.placeholder(tf.float32, img_tensor_shape, name='xs_pl')
if gan_config.use_generator_input_noise:
noise_shape = gan_config.generator_input_noise_shape.copy()
# adjust batch size
noise_shape[0] = img_tensor_shape[0]
noise_in_gen_pl = tf.random_uniform(shape=noise_shape, minval=-1, maxval=1)
else:
noise_in_gen_pl = None
# generated fake image batch
xf = generator(xs=xs_pl, z_noise=noise_in_gen_pl, training=False)
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a savers for writing training checkpoints.
saver = tf.train.Saver()
return graph_generator, xs_pl, xf, init, saver | 2.296875 | 2 |
RF/controllers/schedule.py | JaronrH/RF | 2 | 12787775 | <filename>RF/controllers/schedule.py<gh_stars>1-10
from flask.ext.classy import FlaskView, route
from components import featureBroker
from flask import render_template, request
from datetime import datetime
class ScheduleController(FlaskView):
route_base = '/schedule/'
interface = featureBroker.RequiredFeature('entityInterfaces', featureBroker.HasMethods('getCommand'))
scheduledCommands = featureBroker.RequiredFeature('scheduledCommands', featureBroker.HasMethods('scheduleSingleCommand'))
@route('/button/<button_ref>/<command_ref>', methods = ['POST'])
def scheduleCommand(self, button_ref, command_ref):
"""Processes a button request."""
# Get Command
command = self.interface.getCommand(button_name=button_ref, command_name=command_ref)
if (command == None):
try:
command = self.interface.getCommand(button_id=int(button_ref), command_name=command_ref)
except:
command = None
if (command == None):
try:
command = self.interface.getCommand(button_id=int(button_ref), command_id=int(command_ref))
except:
command = None
if (command == None):
try:
command = self.interface.getCommand(button_name=button_ref, command_id=int(command_ref))
except:
command = None
if (command == None):
return abort(400)
# Get JSON data from post
jsonData = request.get_json(force=True)
# Get Hours/Min from time in body
time = jsonData['time'].split(':')
if (len(time) != 2):
return abort(400)
hour = time[0]
min = time[1]
# Schedule Command
self.scheduledCommands.scheduleSingleCommand(command, 'cron', jsonData['deleteWhenDone'], hour=hour, minute=min)
return "{}", 200
featureBroker.features.Provide('controller', ScheduleController) | 2.40625 | 2 |
examples/classify_text.py | Hironsan/google-natural-language-sampler | 12 | 12787776 | <reponame>Hironsan/google-natural-language-sampler<gh_stars>10-100
import argparse
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
def main(text):
client = language.LanguageServiceClient()
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
response = client.classify_text(document=document)
for category in response.categories:
print(u'=' * 20)
print(u'{:<16}: {}'.format('name', category.name))
print(u'{:<16}: {}'.format('confidence', category.confidence))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('text', help='The text you\'d like to classify text.')
args = parser.parse_args()
main(args.text)
| 2.765625 | 3 |
intellectmoney/helpers.py | ZvonokComGroup/django-intellectmoney | 0 | 12787777 | <gh_stars>0
import hashlib
from intellectmoney import settings
def checkHashOnReceiveResult(data):
return getHashOnReceiveResult(data) == data.get('hash')
def getHashOnReceiveResult(data):
secretKey = settings.SECRETKEY
serviceName = data.get('serviceName', '')
eshopId = data.get('eshopId', '')
orderId = data.get('orderId', '')
eshopAccount = data.get('eshopAccount')
recipientAmount = data.get('recipientAmount', '')
recipientCurrency = data.get('recipientCurrency', '')
paymentStatus = data.get('paymentStatus', '')
userName = data.get('userName', '')
userEmail = data.get('userEmail', '')
paymentData = data.get('paymentData')
key = '%s::%s::%s::%s::%s::%s::%s::%s::%s::%s::%s' % (
eshopId, orderId, serviceName, eshopAccount, recipientAmount,
recipientCurrency, paymentStatus, userName, userEmail, paymentData,
secretKey,
)
key = key.encode('windows-1251', errors='ignore')
return hashlib.md5(key).hexdigest()
def getHashOnRequest(data):
secretKey = settings.SECRETKEY
serviceName = data.get('serviceName', '')
eshopId = data.get('eshopId')
orderId = data.get('orderId')
purchaseAmount = data.get('recipientAmount')
currency = data.get('recipientCurrency')
key = '%s::%s::%s::%s::%s::%s' % (
eshopId, orderId, serviceName, purchaseAmount, currency, secretKey,
)
key = key.encode('windows-1251', errors='ignore')
return hashlib.md5(key).hexdigest()
| 2.109375 | 2 |
selenium_toolbox/buster_captcha_solver/buster_captcha_solver.py | JingerTea/undetetable_selenium | 0 | 12787778 | import requests
import os
import zipfile
def buster_captcha_solver(dir, unzip = False):
url = "https://api.github.com/repos/dessant/buster/releases/latest"
r = requests.get(url)
# Chrome
name = r.json()["assets"][0]["name"]
dl_url = r.json()["assets"][0]["browser_download_url"]
path = dir + "//" + name
if not os.path.exists(path):
r = requests.get(dl_url, stream=True)
with open(path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
if unzip == True:
folder = os.path.splitext(path)[0]
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(folder)
path = os.path.abspath(folder)
else:
path = os.path.abspath(path)
return path
if __name__ == "__main__":
foo = buster_captcha_solver("..//chrome_extension")
print(foo)
| 2.875 | 3 |
users-backend/users/internal_api/tests/queries/test_user_by_email.py | pauloxnet/pycon | 2 | 12787779 | <gh_stars>1-10
from ward import test
from users.tests.api import internalapi_graphql_client
from users.tests.factories import user_factory
from users.tests.session import db
@test("correctly gets the user when sending a valid email")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="association-backend")
user_1 = await user_factory(
email="<EMAIL>", fullname="Name", is_staff=False
)
await user_factory(email="<EMAIL>", fullname="Another", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
query = """query($email: String!) {
userByEmail(email: $email) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"email": user_1.email}
)
assert not response.errors
assert response.data["userByEmail"]["id"] == str(user_1.id)
@test("returns None when the email doesn't exist")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="association-backend")
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Another", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
query = """query($email: String!) {
userByEmail(email: $email) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"email": "<EMAIL>"}
)
assert not response.errors
assert response.data["userByEmail"] is None
@test("returns None with invalid email")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
internalapi_graphql_client.force_service_login(issuer="association-backend")
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Another", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
query = """query($email: String!) {
userByEmail(email: $email) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"email": "testemail"}
)
assert not response.errors
assert response.data["userByEmail"] is None
@test("requires authentication")
async def _(
internalapi_graphql_client=internalapi_graphql_client,
db=db,
user_factory=user_factory,
):
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Another", is_staff=False)
await user_factory(email="<EMAIL>", fullname="Name", is_staff=False)
query = """query($email: String!) {
userByEmail(email: $email) {
id
}
}"""
response = await internalapi_graphql_client.query(
query, variables={"email": "testemail"}
)
assert response.errors[0]["message"] == "Forbidden"
assert response.data["userByEmail"] is None
| 2.421875 | 2 |
src/climsoft_api/api/observationfinal/router.py | faysal-ishtiaq/climsoft-api | 0 | 12787780 | <filename>src/climsoft_api/api/observationfinal/router.py<gh_stars>0
import climsoft_api.api.observationfinal.schema as observationfinal_schema
import fastapi
from climsoft_api.api import deps
from climsoft_api.services import observationfinal_service
from climsoft_api.utils.response import get_success_response, \
get_error_response, get_success_response_for_query
from fastapi import APIRouter, Depends
from sqlalchemy.orm.session import Session
from climsoft_api.utils.response import translate_schema
import logging
from climsoft_api.utils.exception import handle_exceptions
router = APIRouter()
logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
@router.get(
"/observation-finals"
)
@handle_exceptions
def get_observation_finals(
recorded_from: str = None,
described_by: int = None,
obs_datetime: str = None,
qc_status: int = None,
acquisition_type: int = None,
obs_level: int = None,
obs_value: float = None,
flag: str = None,
period: int = None,
qc_type_log: str = None,
data_form: str = None,
captured_by: str = None,
mark: bool = None,
temperature_units: str = None,
precipitation_units: str = None,
cloud_height_units: str = None,
vis_units: str = None,
data_source_timezone: int = None,
limit: int = 25,
offset: int = 0,
db_session: Session = Depends(deps.get_session),
):
total, observation_finals = observationfinal_service.query(
db_session=db_session,
recorded_from=recorded_from,
obs_datetime=obs_datetime,
qc_status=qc_status,
described_by=described_by,
acquisition_type=acquisition_type,
obs_value=obs_value,
obs_level=obs_level,
flag=flag,
period=period,
qc_type_log=qc_type_log,
data_form=data_form,
captured_by=captured_by,
mark=mark,
temperature_units=temperature_units,
precipitation_units=precipitation_units,
cloud_height_units=cloud_height_units,
vis_units=vis_units,
data_source_timezone=data_source_timezone,
limit=limit,
offset=offset,
)
return get_success_response_for_query(
limit=limit,
total=total,
offset=offset,
result=observation_finals,
message=_("Successfully fetched observation finals."),
schema=translate_schema(
_,
observationfinal_schema.ObservationFinalQueryResponse.schema()
)
)
@router.get(
"/observation-finals/{recorded_from}/{described_by}/{obs_datetime}"
)
@handle_exceptions
def get_observation_final_by_id(
recorded_from: str,
described_by: int,
obs_datetime: str,
db_session: Session = Depends(deps.get_session),
):
return get_success_response(
result=[
observationfinal_service.get(
db_session=db_session,
recorded_from=recorded_from,
described_by=described_by,
obs_datetime=obs_datetime,
)
],
message=_("Successfully fetched observation final."),
schema=translate_schema(
_,
observationfinal_schema.ObservationFinalWithChildrenResponse.schema()
)
)
@router.post(
"/observation-finals",
)
@handle_exceptions
def create_observation_final(
data: observationfinal_schema.CreateObservationFinal,
db_session: Session = Depends(deps.get_session),
):
return get_success_response(
result=[observationfinal_service.create(
db_session=db_session,
data=data
)],
message=_("Successfully created observation final."),
schema=translate_schema(
_,
observationfinal_schema.ObservationFinalResponse.schema()
)
)
@router.put(
"/observation-finals/{recorded_from}/{described_by}/{obs_datetime}"
)
@handle_exceptions
def update_observation_final(
recorded_from: str,
described_by: int,
obs_datetime: str,
data: observationfinal_schema.UpdateObservationFinal,
db_session: Session = Depends(deps.get_session),
):
return get_success_response(
result=[
observationfinal_service.update(
db_session=db_session,
recorded_from=recorded_from,
described_by=described_by,
obs_datetime=obs_datetime,
updates=data,
)
],
message=_("Successfully updated observation final."),
schema=translate_schema(
_,
observationfinal_schema.ObservationFinalResponse.schema()
)
)
@router.delete(
"/observation-finals/{recorded_from}/{described_by}/{obs_datetime}"
)
@handle_exceptions
def delete_observation_final(
recorded_from: str,
described_by: int,
obs_datetime: str,
db_session: Session = Depends(deps.get_session),
):
observationfinal_service.delete(
db_session=db_session,
recorded_from=recorded_from,
described_by=described_by,
obs_datetime=obs_datetime,
)
return get_success_response(
result=[],
message=_("Successfully deleted observation final."),
schema=translate_schema(
_,
observationfinal_schema.ObservationFinalResponse.schema()
)
)
| 2.03125 | 2 |
AlgorithmsAndDataStructures/mod3/Backpack.py | BootyAss/bmstu | 0 | 12787781 | <gh_stars>0
import math
from functools import reduce
class BackpackSolver:
def __init__(self, maxWeight):
if maxWeight < 0:
raise(Exception)
self.maxWeight = maxWeight;
self.stuff = []
def add(self, weight, value):
if weight < 0 or value < 0:
raise(Exception)
self.stuff.append([weight,value])
def calcGCD(self):
weightArr = [self.maxWeight]
for i in self.stuff:
weightArr.append(i[0])
self.gcd = reduce(math.gcd, weightArr)
def reduce_weights(self):
self.calcGCD()
self.maxWeight = int(self.maxWeight / self.gcd)
for i in self.stuff:
i[0] = int(i[0] / self.gcd)
def calcCost(self, i, j):
woutCurrent = self.matrix[i - 1][j]
cost = self.stuff[i - 1][1]
weight = j - self.stuff[i - 1][0]
if weight < 0:
return woutCurrent
wCurrent = cost + self.matrix[i - 1][weight]
return max(wCurrent, woutCurrent)
def algorithm(self):
self.reduce_weights()
rows = len(self.stuff) + 1
cols = self.maxWeight + 1
self.matrix = []
for i in range(rows):
self.matrix.append([0] * cols)
for i in range(1, rows):
for j in range(cols):
self.matrix[i][j] = self.calcCost(i, j)
chosenItems = []
totalWeight = 0
totalValue = 0
i = rows - 1
j = cols - 1
while j >= 0 and i > 0:
if self.matrix[i][j] != self.matrix[i - 1][j]:
current = self.stuff[i - 1]
j -= current[0]
chosenItems.append(i)
totalWeight += current[0]
totalValue += current[1]
i -= 1
return chosenItems, totalWeight*self.gcd, totalValue
def emptyLine(line):
line.replace(' ', '')
if line:
return False
return True
cycle = True
backpack = None
while cycle:
try:
line = input()
if emptyLine(line):
continue
cmd = line.split(' ', 1)
try:
if not backpack:
if len(cmd) == 1:
maxWeight = int(cmd[0])
backpack = BackpackSolver(maxWeight)
else:
raise(Exception)
else:
if len(cmd) == 2:
weight = int(cmd[0])
value = int(cmd[1])
backpack.add(weight, value)
else:
raise(Exception)
except Exception:
print('error')
continue
except Exception:
cycle = False
if backpack:
items, weigth, value = backpack.algorithm()
print(weigth, value)
for i in range(len(items) - 1, -1, -1):
print(items[i])
| 2.875 | 3 |
backend/providers.py | sshaman1101/what-about-blank | 0 | 12787782 | <filename>backend/providers.py<gh_stars>0
import json
import asyncio
import threading
from datetime import datetime
from urllib.parse import urlparse
from aiohttp import ClientSession
from backend import config, storage, const
class BaseJSONProvider:
def __init__(self, url, prov_id=None, headers=None):
self._url = urlparse(url)
self._id = prov_id if prov_id else self._url.netloc
self._headers = headers if headers else {'content-type': 'application/json'}
def __str__(self) -> str:
return self._id
def _wrap(self, data) -> dict:
"""wraps fetched data into storage-friendly dict"""
return dict(
id=self._id,
data=data,
timestamp=datetime.now().isoformat(),
)
async def _fetch_json(self, url, session) -> dict:
"""asynchronously fetches data by given url, then parses JSON response into dict"""
async with session.get(url, headers=self._headers) as response:
data = await response.read()
return json.loads(data)
def process(self, data) -> dict:
"""override this method if any processing of fetched data is required"""
return self._wrap(data)
async def collect(self) -> dict:
"""the entrypoint of the Provider class, does all work of data-gathering"""
async with ClientSession() as session:
t = asyncio.ensure_future(self._fetch_json(self._url.geturl(), session))
data = await asyncio.gather(t)
return self.process(data)
class GithubPullRequestsProvider(BaseJSONProvider):
def process(self, data):
"""override process method to extract only meaningful data
from Github's PR list response.
"""
processed = list()
for pull in data[0]:
processed.append(dict(
id=pull['number'],
author=pull['user']['login'],
created=pull['created_at'],
title=pull['title'],
))
return self._wrap(processed)
async def collect_updates(cfg: config.Config, db: storage.Storage):
# TODO: this should run forever
#
# TODO: wanna to look at previous update timestamp
# TODO: and decide starting related provider or not.
tasks = [p.collect() for p in init_providers(cfg)]
for fut in asyncio.as_completed(tasks):
result = await fut
pid = result['id']
print('saving "%s" provider data' % pid)
db.save_key(pid, result)
def init_providers(cfg: config.Config):
return [
GithubPullRequestsProvider(
url=cfg.github_repo_path,
prov_id=const.GITHUB_PULLS_PROVIDER_ID,
headers={'accept': 'application/vnd.github.mercy-preview+json'}
),
]
def threaded_main(loop: asyncio.AbstractEventLoop, cfg: config.Config, db: storage.Storage):
# bing given event loop to thread
asyncio.set_event_loop(loop)
# run async tasks bound to separate thread
loop.run_until_complete(collect_updates(cfg, db))
def start_background_updates(cfg: config.Config, db: storage.Storage) -> threading.Thread:
"""start background processing bound to another thread,
returns thread handle to be able to gracefully stop it
on application shutdown."""
# FIXME: need to find proper logging config and replace any `print`s
print("starting background processing thread ...")
loop = asyncio.get_event_loop()
t = threading.Thread(target=threaded_main, args=(loop, cfg, db))
t.start()
return t
| 2.84375 | 3 |
session-5/tests/test_5.py | jasoriya/CADL_Kadenze | 1 | 12787783 | <reponame>jasoriya/CADL_Kadenze
import matplotlib
matplotlib.use('Agg')
import tensorflow as tf
import numpy as np
from libs import utils
from libs import dataset_utils
from libs import charrnn
from libs import vaegan
from libs import celeb_vaegan
def test_alice():
charrnn.test_alice()
def test_trump():
charrnn.test_trump()
def test_vaegan_training():
utils.get_celeb_files()
vaegan.test_celeb(n_epochs=1,
crop_shape=[32, 32, 3],
n_filters=[10],
filter_sizes=[3])
def test_celeb_vaegan():
g = tf.Graph()
with tf.Session(graph=g) as sess:
net = celeb_vaegan.get_celeb_vaegan_model()
tf.import_graph_def(
net['graph_def'],
name='net',
input_map={
'encoder/variational/random_normal:0':
np.zeros(512, dtype=np.float32)
}
)
names = [op.name for op in g.get_operations()]
print(names)
| 2.15625 | 2 |
Pacote Dawload/Projeto progamas Python/ex1095 Sequencia I J1 For.py | wagnersistemalima/Exercicios-Python-URI-Online-Judge-Problems---Contests | 1 | 12787784 | # ex1095 Sequencia i j1
numero = 1
for c in range(60, -1, -5): # para cada número entre 0, 60, faça a contagem regressiva de 60, saltiando de 5 em 5
print('I={} J={}'.format(numero, c))
numero = numero + 3 # Numero começa com 1, salta de 3 em 3, ate a contagem regressiva chegar a 0 | 3.46875 | 3 |
src/reuters.py | Ekkehard/DL-Benchmarks | 1 | 12787785 | <filename>src/reuters.py
# Python Implementation: Reuters benchmark
# -*- coding: utf-8 -*-
##
# @file reuters.py
#
# @version 1.0.1
#
# @par Purpose
# Run a Reuters newswires classification task using keras.
#
# @par Comments
# This is the third experiment of Chollet's book featuring a text
# classification task with 46 categories.
#
# This is Python 3 code!
# Known Bugs: none
#
# @author <NAME> <<EMAIL>> (C) 2019-2021
#
# @copyright See COPYING file that comes with this distribution
#
# File history:
#
# Date | Author | Modification
# -----------------+----------------+------------------------------------------
# Sat Jul 06 2019 | <NAME> | converted from Chollet's book
# Thu Jul 01 2021 | <NAME> | omitted pickle-fix on Mac
# | |
from sys import platform
import time
import numpy as np
from keras import models
from keras import layers
from keras.utils import to_categorical
from keras.datasets import reuters
def vectorizeSequences( sequences, dtype, dimension=10000 ):
results = np.zeros( (len( sequences ), dimension), dtype=dtype )
for i, sequence in enumerate( sequences ):
results[i, sequence] = 1.
return results
def testRun( dtype ):
if platform != "darwin":
# save np.load on everything but Mac, which takes care of that in their
# own TensorFlow
npLoadOld = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: npLoadOld( *a, allow_pickle=True, **k )
# call load_data with allow_pickle implicitly set to true
(trainData, trainLabels), (testData, testLabels) = \
reuters.load_data( num_words=10000 )
if platform != "darwin":
# restore np.load for future normal usage
np.load = npLoadOld
xTrain = vectorizeSequences( trainData, dtype )
xTest = vectorizeSequences( testData, dtype )
trainLabels = to_categorical( trainLabels ).astype( dtype )
testLabels = to_categorical( testLabels ).astype( dtype )
network = models.Sequential()
network.add( layers.Dense( 64, activation="relu", input_shape=(10000,) ) )
network.add( layers.Dense( 64, activation="relu" ) )
network.add( layers.Dense( 46, activation="softmax" ) )
network.compile( optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"] )
start = time.time()
network.fit( xTrain, trainLabels, epochs=9, batch_size=512 )
trainingTime = time.time() - start
start = time.time()
# loss is e.g. least squares error, accuracy is after non-linear decision
testLoss, testAccuracy = network.evaluate( xTest, testLabels )
testTime = time.time() - start
return (len( xTrain ), len( xTest ),
trainingTime, testTime, testAccuracy, network)
| 2.78125 | 3 |
word_embedding_train.py | Astromis/PhraseSegmentation | 0 | 12787786 | # -*- coding: utf-8 -*-
from __future__ import division
import gensim
import nltk
import smart_open
import json
from sentence_extracor import segment_sentences_tok
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
def read_corpus(fname, tokens_only=False):
with smart_open.smart_open(fname) as f: #encoding="iso-8859-1"
for i, line in enumerate(f):
if tokens_only:
yield gensim.utils.simple_preprocess(line)
else:
# For training data, add tags
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [i])
def read_list_corpus(list_corp, tokens_only=False):
for i, paragraph in enumerate(list_corp):
if tokens_only:
yield gensim.utils.simple_preprocess(paragraph[0])
else:
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(paragraph[0]), [i])
'''
#this block is for buildeing sentence embeddings
#f = open("dataset.txt")
#data = f.read().decode("utf8")
#f.close()
#tokens = nltk.word_tokenize(data)
#sents = segment_sentences_tok(tokens)
'''
'''
dataset = json.load(open("./datasets/dataset_paragraphs.json"))
model = gensim.models.Word2Vec(size=256, window=15, min_count=2, workers=4) #window=10 window=20
model.build_vocab(sents)
print("String training word2vec model...")
model.train(sents, total_examples=model.corpus_count, epochs=50)
model.save("./word2vec_size-100_window-5_min-count-1_workers-4.model")
model = gensim.models.FastText(size=256, window=15, min_count=2, workers=4) #window=10 window=20
model.build_vocab(sents)
print("String training fasttext model...")
model.train(sents, total_examples=model.corpus_count, epochs= 50)
model.save("./fasttext")
#train_corpus = list(read_corpus('sents_file.txt'))
train_corpus = list(read_list_corpus(dataset))
model = gensim.models.doc2vec.Doc2Vec(vector_size=256, min_count=2, workers=4)
model.build_vocab(train_corpus)
print("Starting training doc2vec model...")
model.train(train_corpus, total_examples=model.corpus_count, epochs=50)
model.save('./my_model.doc2vec')
#dataset = list(read_corpus('sents_file.txt', tokens_only=True))
dataset = list(read_list_corpus(dataset, tokens_only=True))
dct = Dictionary(dataset)
corpus = [dct.doc2bow(line) for line in dataset]
model = TfidfModel(corpus)
matrix = model[corpus]
print dir(matrix)
#model.save("./tfidf")'''
| 2.5 | 2 |
src/block/celery/address.py | Andyye-jx/block_onchain | 0 | 12787787 | import re
import json
import inject
import logging
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from celery import Celery
from block.celery import APITask
from block.config import RedisCache, Config
from block.libs.dingding import DingDing
logger = logging.getLogger(__name__)
current_app = inject.instance(Celery)
DEFAULT_OPTS = {
"bind": True,
"exchange": "block",
"base": APITask,
}
@current_app.task(name="address.query_address", **DEFAULT_OPTS)
def query_address_by_etherscan(self, address, code):
del self
# 获取当前redis存在的币种
monitor_cache = inject.instance(RedisCache)
currencys = monitor_cache.hget(code, address)
if not currencys:
return
currency_list = json.loads(currencys.decode())
ua = UserAgent()
headers = {"user-agent": ua.chrome}
url = Config.scan_url.get(code)
resp = requests.get(url + address, headers=headers)
result = resp.text
soup = BeautifulSoup(result, 'lxml')
data = soup.select("ul.list-unstyled > li.list-custom > a.link-hover")
new_list, need_push = [], False
regex1 = re.compile(r"\/token\/(.+)\?")
for i in data:
url = regex1.findall(i.get("href"))[0]
coin = (i.select("span.list-amount")[0].string).split(" ", 1)[-1]
if coin.upper() not in currency_list:
currency_list.append(coin.upper())
new_list.append((coin, url))
need_push = True
if need_push:
monitor_cache.hset(code, address, json.dumps(currency_list))
dingding = DingDing(Config.ding_url)
dingding.send_message(address, code, new_list)
return
| 2.1875 | 2 |
notebooks/fpqNodeMap.py | Leguark/map2loop | 0 | 12787788 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 14:29:18 2020
fpqNodeMap
plots a map of nodes from an input file of x,y nodes
input:
name of ascii text file of x,y nodes
options:
colour of nodes
grid, on/off
@author: davidhealy
"""
import fracpaq as fpq
import matplotlib.pylab as plt
import sys
# defaults
sColour = 'b'
bGrid = True
# process command line arguments; filename is mandatory
bFilename = False
for sArg in sys.argv[1:]:
# input data file
if "-I" in sArg:
fn = sArg[2:]
bFilename = True
# line colour for traces
if "-C" in sArg:
sColour = sArg[2:]
# grid on
if "-G" in sArg:
bGrid = True
# filename is mandatory
if not bFilename:
sys.exit('Usage: python3 fpqNodeMap.py -I<inputfilename>')
# get the x,y nodes from the file
nodes = fpq.getNodes(fn)
nodexlist = nodes[0]
nodeylist = nodes[1]
# get some stats
nNodes = int(sum(len(x) for x in nodexlist))
# get plot limits
nodelist = nodexlist.copy()
nodelist.extend(nodeylist.copy())
xmin, xmax, ymin, ymax = fpq.getPlotLimits(nodelist)
# plot the traces
plt.figure(figsize=(6,6))
for node in range(0, len(nodexlist)):
plt.plot(nodexlist[node], nodeylist[node], sColour+'o')
plt.xlabel('X, pixels')
plt.ylabel('Y, pixels')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.gca().set_aspect('equal')
plt.grid(bGrid)
plt.title('Node map, n=%i' % nNodes)
plt.savefig("fpqNodeMap.png", dpi=300)
print('Plotted %5i nodes' % nNodes) | 2.921875 | 3 |
PS/this_is_coding_test.py | tkxkd0159/dsalgo | 0 | 12787789 | # This is coding test
class Greedy:
def __init__(self):
pass
@staticmethod
def change(n):
count = 0
coin_types = [500, 100, 50, 10]
for c in coin_types:
count += n // c
n %= c
return count
@staticmethod
def max_plus(case):
n, m, k = map(int, input().split())
data = list(map(int, input().split()))
data.sort(reverse=True)
first = data[0]
second = data[1]
res = 0
if case == 1:
for i in range(1, m+1):
if i % (k+1) == 0:
res += second
print(i)
else:
res += first
elif case == 2:
count = int(m / (k + 1)) * k
count += m % (k + 1)
res += count * first
res += (m - count) * second
return res
@staticmethod
def num_cardgame():
h, w = map(int, input().split())
res = []
for _ in range(h):
res.append(min(list(map(int, input().split()))))
return max(res)
@staticmethod
def to_one(n, k, case):
cnt = 0
if case == 1:
while (n != 1):
if n % k == 0:
n /= k
cnt += 1
else:
n -= k
cnt += 1
if case == 2:
while True:
target = (n // k) * k
cnt += (n - target)
n = target
if n < k:
break
cnt += 1
n //= k
cnt += (n - 1)
return cnt
class Implementations:
def __init__(self) -> None:
pass
@staticmethod
def udlr(n, path, case):
"""
Args:
n (int): the size of map
path (list): sequences of moving
"""
limit = [1, n]
coord = [1, 1]
if case == 'mine':
for do_ in path:
if do_ == 'D':
coord[0] += 1
if coord[0] > limit[1]:
coord[0] -= 1
elif do_ == 'U':
coord[0] -= 1
if coord[0] < limit[0]:
coord[0] += 1
elif do_ == 'L':
coord[1] -= 1
if coord[1] < limit[0]:
coord[1] += 1
elif do_ == 'R':
coord[1] += 1
if coord[1] > limit[1]:
coord[1] -= 1
elif case == 'solution':
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
move_types = ['L', 'R', 'U', 'D']
for do_ in path:
for i in range(len(move_types)):
if do_ == move_types[i]:
nx = coord[0] + dx[i]
ny = coord[1] + dy[i]
if nx < limit[0] or ny < limit[0] or nx > limit[1] or ny > limit[1]:
continue
coord[0], coord[1] = nx, ny
return coord
def clock(h, case):
"""Find time who include '3' its time
Args:
n (int): 00:00:00 to N:59:59
"""
if case == 1:
incl3_hour = [3, 13, 23]
incl_3 = 5 * 1 + 10
not_3 = 60 - incl_3
under_min_case = incl_3 * not_3 * 2 + incl_3 * incl_3
res = 0
for i in range(h + 1):
if i in incl3_hour:
res += 3600
else:
res += under_min_case
elif case == 2:
res = 0
for i in range(h + 1):
for j in range(60):
for k in range(60):
if '3' in str(i) + str(j) + str(k):
res += 1
return res
def knight_of_royal(pos): # c2 : 6, a1 : 2
conv_char = {"a":1, "b":2, "c":3, "d":4, "e":5, "f":6, "g":7, "h":8}
res = 0
x, y = conv_char[pos[0]], int(pos[1])
limit = [1, 8]
move_type = [(-2, -1), (-2, 1), (2, -1), (2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2)]
for dx, dy in move_type:
nx = x + dx
ny = y + dy
if nx < limit[0] or ny < limit[0] or nx > limit[1] or ny > limit[1]:
continue
else:
res += 1
return res
def grid_game(map_size, character, map):
# test case
# Implementations.grid_game([4, 4], [1, 1, 0], [[1,1,1,1],[1,0,0,1],[1,1,0,1],[1,1,1,1]])
y, x = character[0], character[1]
direction = character[2]
mymap = map
mymap[y][x] = 2
move_type = [(-1, 0), (0, 1), (1, 0), (0, -1)]
cnt = 0
move_sig = False
dir_seq = [0, 3, 2, 1]
while True:
for i in range(1, 5):
nx = x + move_type[-i+direction][1]
ny = y + move_type[-i+direction][0]
idx = dir_seq.index(direction)
if mymap[ny][nx] != 1 and mymap[ny][nx] != 2:
x = nx
y = ny
direction = dir_seq[(idx + i) % 4]
move_sig = True
mymap[ny][nx] = 2
cnt += 1
break
if not move_sig:
idx = dir_seq.index(direction)
print(type(idx))
nx = x + move_type[(idx + 2) % 4][1]
ny = y + move_type[(idx + 2) % 4][0]
if mymap[ny][nx] == 1:
break
else:
x = nx
y = ny
cnt += 1
move_sig = False
return cnt
| 3.421875 | 3 |
homeassistant/components/rituals_perfume_genie/const.py | RubenKelevra/core | 1 | 12787790 | """Constants for the Rituals Perfume Genie integration."""
DOMAIN = "rituals_perfume_genie"
COORDINATORS = "coordinators"
DEVICES = "devices"
ACCOUNT_HASH = "account_hash"
HUBLOT = "hublot"
SENSORS = "sensors"
| 0.882813 | 1 |
corenlp_client/__corenlp_client.py | Jason3900/corenlp_client | 12 | 12787791 | # -*- coding:UTF-8 -*-
import requests
import warnings
import os
import re
from nltk import Tree
from subprocess import Popen
import subprocess
import time
import shlex
import multiprocessing
from urllib import parse
class CoreNLP:
def __init__(self, url=None, lang="en", annotators=None, corenlp_dir=None, local_port=9000, max_mem=4, threads=multiprocessing.cpu_count(), timeout=150000):
if url:
self.url = url.rstrip("/")
self.annotators_list = ["tokenize","ssplit","pos","ner","parse","depparse","openie"]
self.lang = lang
self.corenlp_subprocess = None
self.timeout = timeout
if annotators and self._check_annotators_format(annotators):
self.annotators = annotators
else:
self.annotators = ",".join(self.annotators_list)
if corenlp_dir:
try:
os.path.exists(corenlp_dir)
except:
raise OSError("please check corenlp local path is correct! ")
if self._launch_local_server(corenlp_dir, local_port, max_mem, threads):
self.url = f"http://127.0.0.1:{local_port}"
self._request_corenlp(data="", annotators=self.annotators)
def __enter__(self):
return self
def __exit__(self, type, value, trace):
if self.corenlp_subprocess:
self.corenlp_subprocess.kill()
self.corenlp_subprocess.wait()
# os.killpg(os.getpgid(self.corenlp_subprocess.pid), 9)
def __del__(self):
if self.corenlp_subprocess:
self.corenlp_subprocess.kill()
self.corenlp_subprocess.wait()
def _check_annotators_format(self, annotators):
annotators = annotators.split(",")
for i in annotators:
if i not in self.annotators_list:
return False
return True
def _check_server_status(self):
if requests.get(self.url, verify=False).status_code != 200:
raise ConnectionError("please check your network connection, or the corenlp server is started before launching!")
@staticmethod
def _deal_path_suffix(path):
if "\\" in path:
path = path.rstrip("\\") + "\\"
else:
path = path.rstrip("/") + "/"
return path
def _launch_local_server(self, corenlp_dir, port, max_mem, threads):
corenlp_dir = self._deal_path_suffix(os.path.abspath(corenlp_dir))
tmp_dir = "tmp"
if not os.path.exists("tmp"):
os.mkdir(tmp_dir)
try:
os.system("java -version")
except:
raise AssertionError("Java is required to launch corenlp server! ")
cmd = f'java -Djava.io.tmpdir={tmp_dir} -mx{max_mem}g ' + \
f'-cp "{corenlp_dir}*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer ' + \
f'-threads {threads} -port {port} -timeout 150000 -lazy false'
print(cmd)
cmd = shlex.split(cmd)
self.corenlp_subprocess = Popen(cmd)
time.sleep(1)
return True
def _request_corenlp(self, data, annotators):
params = {"properties": '{"annotators": "%s"}' % annotators, "pipelineLanguage": self.lang}
res = requests.post(url=self.url, params=params, data=parse.quote(data), timeout=self.timeout, verify=False)
ann_result = res.json()
return ann_result
def annotate(self, data):
ann_result = self._request_corenlp(data, self.annotators)
annotation = Annotation(ann_result)
return annotation
def tokenize(self, data, ssplit=True):
if ssplit:
annotators = "tokenize,ssplit"
else:
annotators = "tokenize"
ann_result = self._request_corenlp(data, annotators)
if ssplit:
annotation = [[token["word"] for token in sent["tokens"]] for sent in ann_result["sentences"]]
else:
annotation = [token["word"] for token in ann_result["tokens"]]
return annotation
def pos_tag(self, data):
annotators = "tokenize,ssplit,pos"
ann_result = self._request_corenlp(data, annotators)
annotation = [[token["pos"] for token in sent["tokens"]] for sent in ann_result["sentences"]]
return annotation
def ner(self, data):
annotators = "tokenize,ssplit,pos,ner"
ann_result = self._request_corenlp(data, annotators)
annotation = []
for sent in ann_result["sentences"]:
sent_ner = []
if "entitymentions" in sent:
for entity in sent["entitymentions"]:
span = (entity["characterOffsetBegin"], entity["characterOffsetEnd"])
ner = entity["ner"]
ner_entity = entity["text"]
sent_ner.append({(ner_entity,span): ner})
annotation.append(sent_ner)
return annotation
@staticmethod
def pretty_print_tree(tree):
Tree.fromstring(tree).pretty_print()
def close(self):
if self.corenlp_subprocess:
self.corenlp_subprocess.kill()
self.corenlp_subprocess.wait()
class Annotation():
def __init__(self, ann_result):
self.ann_result = ann_result
self.tokens=[]
self.parse_tree=[]
self.bi_parse_tree=[]
self.basic_dep=[]
self.enhanced_dep=[]
self.enhanced_pp_dep=[]
self.entities = []
self.openie = []
self._extract_ann()
def _extract_ann(self):
ann_dict = dict()
if "sentences" in self.ann_result:
for ann_sent in self.ann_result["sentences"]:
self.tokens.append(ann_sent["tokens"])
if "parse" in ann_sent:
self.parse_tree.append(re.sub(r"\s+", " ", ann_sent["parse"]))
if "binaryParse" in ann_sent:
self.bi_parse_tree.append(re.sub(r"\s+", " ", ann_sent["binaryParse"]))
if "basicDependencies" in ann_sent:
self.basic_dep.append(ann_sent["basicDependencies"])
if "enhancedDependencies" in ann_sent:
self.enhanced_dep.append(ann_sent["enhancedDependencies"])
if "enhancedPlusPlusDependencies" in ann_sent:
self.enhanced_pp_dep.append(ann_sent["enhancedPlusPlusDependencies"])
if "entitymentions" in ann_sent:
self.entities.append(ann_sent["entitymentions"])
if "openie" in ann_sent:
self.openie.append(ann_sent["openie"])
else:
self.tokens = self.ann_result["tokens"]
return ann_dict
| 2.484375 | 2 |
ai4good/webapp/apps.py | titorenko/compartmental-model | 0 | 12787792 | import dash
import dash_bootstrap_components as dbc
from flask import Flask
from ai4good.runner.facade import Facade
from ai4good.webapp.model_runner import ModelRunner
flask_app = Flask(__name__)
dash_app = dash.Dash(
__name__,
server=flask_app,
routes_pathname_prefix='/sim/',
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
facade = Facade.simple()
model_runner = ModelRunner(facade)
| 1.929688 | 2 |
__init__.py | SsnL/amcmc | 0 | 12787793 | __all__ = ['structure', 'inference']
| 1.015625 | 1 |
freeze.py | ayushb1126/memoji_fer2013 | 0 | 12787794 | <gh_stars>0
recursive function to find hcf of two numbers.
| 1.289063 | 1 |
helper/gtrans.py | shivaroast/AidenBot | 0 | 12787795 | <gh_stars>0
'''
Google Translate helper module
(c) 2018 - laymonage
'''
from googletrans import Translator
def translate(text):
'''
Translate astr from src language to dest.
'''
text = text.split(maxsplit=2)
gtrans = Translator()
try:
result = gtrans.translate(text[2], src=text[0], dest=text[1])
except IndexError:
return "Wrong format."
if text[0] == 'auto':
result.origin = '({}) {}'.format(gtrans.detect(text[2]).lang,
result.origin)
return result.origin + ' -> ' + result.text
| 3 | 3 |
flaskblog/forms.py | jianwang0212/wp4 | 0 | 12787796 | <gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, RadioField, SelectMultipleField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError, Required
from flaskblog.models import User, Category
from wtforms.widgets import ListWidget, CheckboxInput
from flaskblog import app, db, bcrypt
class MultiCheckboxField(SelectMultipleField):
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('<PASSWORD>',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
'That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class QuestionsForm(FlaskForm):
answer_1 = MultiCheckboxField('Which label best applies?', choices=[])
submit = SubmitField('submit answers')
class LabelDeleteForm(FlaskForm):
answer_1 = MultiCheckboxField(
'Which label do you want to delete?', choices=[])
submit = SubmitField('submit answers')
class CategoryForm(FlaskForm):
category = StringField(
'What category do you want to add into the ML annotation?')
submit = SubmitField('submit answers')
class CategoryDeleteForm(FlaskForm):
category_delete = MultiCheckboxField(
'Which category do you want to delete? ', choices=[])
submit = SubmitField('submit answers')
class KeywordForm(FlaskForm):
keyword = StringField('What keyword do you want to add?')
submit = SubmitField('submit keyword')
class KeywordDeleteForm(FlaskForm):
keyword_delete = MultiCheckboxField(
'Which keyword do you want to delete? ', choices=[])
submit = SubmitField('submit answers')
| 2.8125 | 3 |
mysite/myapp/admin.py | pl3nny/csc648-team05_pl3nny | 0 | 12787797 | from django.contrib import admin
from .models import UserData
from .models import Posts
from .models import HazardType
from .models import Message
from .models import Comments
from .models import PostImageCollection
# Register your models here.
admin.site.register(HazardType)
admin.site.register(UserData)
admin.site.register(Posts)
admin.site.register(Comments)
admin.site.register(Message)
admin.site.register(PostImageCollection)
| 1.445313 | 1 |
example.py | zlgenuine/python_xmind_2 | 32 | 12787798 | #-*- coding: utf-8 -*-
import xmind
from xmind.core.const import TOPIC_DETACHED
from xmind.core.markerref import MarkerId
w = xmind.load("test.xmind") # load an existing file or create a new workbook if nothing is found
s1=w.getPrimarySheet() # get the first sheet
s1.setTitle("first sheet") # set its title
r1=s1.getRootTopic() # get the root topic of this sheet
r1.setTitle("we don't care of this sheet") # set its title
s2=w.createSheet() # create a new sheet
s2.setTitle("second sheet")
r2=s2.getRootTopic()
r2.setTitle("root node")
# Empty topics are created from the root element and then filled.
# Examples:
# Create a topic with a link to the first sheet given by s1.getID()
t1 = r2.addSubTopic()
t1.setTopicHyperlink(s1.getID())
t1.setTitle("redirection to the first sheet") # set its title
# Create a topic with a hyperlink
t2 = r2.addSubTopic()
t2.setTitle("second node")
t2.setURLHyperlink("https://xmind.net")
# Create a topic with notes
t3 = r2.addSubTopic()
t3.setTitle("third node")
t3.setPlainNotes("notes for this topic")
t3.setTitle("topic with \n notes")
# Create a topic with a file hyperlink
t4 = r2.addSubTopic()
t4.setFileHyperlink("logo.jpeg")
t4.setTitle("topic with a file")
# Create topic that is a subtopic of another topic
t41 = t4.addSubTopic()
t41.setTitle("a subtopic")
# create a detached topic whose (invisible) parent is the root
d1 = r2.addSubTopic(topics_type = TOPIC_DETACHED)
d1.setTitle("detached topic")
d1.setPosition(0,20)
# loop on the (attached) subTopics
topics=r2.getSubTopics()
# Demonstrate creating a marker
for topic in topics:
topic.addMarker(MarkerId.starBlue)
# create a relationship
rel=s2.createRelationship(t1.getID(),t2.getID(),"test")
# and we save
xmind.save(w,"test2.xmind")
| 2.6875 | 3 |
setup.py | yukihiko-shinoda/asynccpu | 3 | 12787799 | <gh_stars>1-10
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup # type: ignore
with open("README.md", encoding="utf-8") as readme_file:
readme = readme_file.read()
# Since new process created by multiprocessing calls setuptools.setup method
# when the test is executed via setup.py and this affects something.
# see:
# - Answer: How to get `setup.py test` working with `multiprocessing` on Windows?
# https://stackoverflow.com/a/50402381/12721873
if __name__ == "__main__":
setup(
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: AsyncIO",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed",
],
dependency_links=[],
description="Supports async / await pattern for CPU-bound operations.", # noqa: E501 pylint: disable=line-too-long
exclude_package_data={"": ["__pycache__", "*.py[co]", ".pytest_cache"]},
include_package_data=True,
install_requires=["psutil"],
keywords="asynccpu",
long_description=readme,
long_description_content_type="text/markdown",
name="asynccpu",
packages=find_packages(include=["asynccpu", "asynccpu.*"]),
package_data={"asynccpu": ["py.typed"]},
python_requires=">=3.8",
test_suite="tests",
tests_require=["pytest>=3"],
url="https://github.com/yukihiko-shinoda/asynccpu",
version="1.2.2",
zip_safe=False,
)
| 1.617188 | 2 |
gsm_layer3_protocol/sms_protocol/sms_submit.py | matan1008/gsm-layer3-protocol | 0 | 12787800 | <reponame>matan1008/gsm-layer3-protocol<filename>gsm_layer3_protocol/sms_protocol/sms_submit.py
from construct import *
import gsm_layer3_protocol.sms_protocol.tpdu_parameters as tpdu_parameters
from gsm_layer3_protocol.enums import tp_mti as tp_mti_enum
from gsm_layer3_protocol.sms_protocol.tp_user_data import tp_ud_struct, TpUserData
class SmsSubmit(Container):
def __init__(self, tp_rp, tp_srr, tp_rd, tp_mr, tp_da, tp_pid, tp_dcs, tp_vp=None, tp_ud=None):
tp_vpf = tpdu_parameters.tp_vpf.NOT_PRESENT if tp_vp is None else tp_vp._tp_vpf
if isinstance(tp_ud, bytes):
tp_ud = TpUserData(tp_ud)
elif tp_ud is None:
tp_ud = TpUserData(b"")
tp_udhi = tp_ud.user_data_header is not None
super().__init__(tp_mti=tp_mti_enum.SMS_SUBMIT_OR_REPORT, tp_rp=tp_rp, tp_udhi=tp_udhi, tp_srr=tp_srr,
tp_vpf=tp_vpf, tp_rd=tp_rd, tp_mr=tp_mr, tp_da=tp_da, tp_pid=tp_pid, tp_dcs=tp_dcs,
tp_vp=tp_vp, tp_ud=tp_ud)
sms_submit_tpdu_struct = BitStruct(
"tp_rp" / tpdu_parameters.tp_rp,
"tp_udhi" / tpdu_parameters.tp_udhi,
"tp_srr" / tpdu_parameters.tp_srr,
"tp_vpf" / tpdu_parameters.tp_vpf,
"tp_rd" / tpdu_parameters.tp_rd,
"tp_mti" / Const(tp_mti_enum.SMS_SUBMIT_OR_REPORT, tpdu_parameters.tp_mti),
"tp_mr" / tpdu_parameters.tp_mr,
"tp_da" / Bytewise(tpdu_parameters.tp_da),
"tp_pid" / tpdu_parameters.tp_pid,
"tp_dcs" / tpdu_parameters.tp_dcs,
"tp_vp" / Switch(
this.tp_vpf,
{
tpdu_parameters.tp_vpf.ENHANCED_FORMAT: Bytewise(tpdu_parameters.tp_vp_enhanced),
tpdu_parameters.tp_vpf.RELATIVE_FORMAT: tpdu_parameters.tp_vp_relative,
tpdu_parameters.tp_vpf.ABSOLUTE_FORMAT: tpdu_parameters.tp_vp_absolute
}
),
"tp_ud" / Bytewise(tp_ud_struct)
)
| 2.140625 | 2 |
src/Acts/orchestrator.py | sumantp89/SelfielessActs | 0 | 12787801 | <filename>src/Acts/orchestrator.py
from flask import Flask, request
import requests
from time import sleep
import os
import json
from threading import Thread, Lock
BASE_URL = '/api/v1/'
MIGRATIONS_FOLDER = os.path.abspath('db_migrations')
open_ports = []
curr_port_index = 0
requests_count = 0
port_lock = Lock()
requests_count_lock = Lock()
app = Flask(__name__)
# Load Balancer
def log_requests(url, method):
print('URL called: {}, Method: {}'.format(url, method))
def make_request(api_url, method, payload = None, log_requests = False):
global port_lock, curr_port_index
port_lock.acquire()
if curr_port_index >= len(open_ports):
curr_port_index = 0
port_lock.release()
port = open_ports[curr_port_index]
url = 'http://localhost:' + str(port) + BASE_URL + api_url
if method == 'GET':
resp = requests.get(url)
if log_requests:
log_requests(url, method)
elif method == 'POST':
resp = requests.post(url = url, json = payload)
if log_requests:
log_requests(url, method)
else:
resp = requests.delete(url)
if log_requests:
log_requests(url, method)
if resp.status_code == 204:
json_resp = {}
else:
if resp.status_code >= 400:
json_resp = {}
else:
json_resp = resp.json()
port_lock.acquire()
curr_port_index = (curr_port_index + 1) % len(open_ports)
port_lock.release()
return json_resp, resp.status_code
@app.route(BASE_URL + '<path:api>', methods = ["GET", "POST", "DELETE"])
def load_balancer(api):
global requests_count
requests_count_lock.acquire()
requests_count += 1
requests_count_lock.release()
payload = None
if request.method == 'POST':
payload = request.get_json(force = True)
resp, status = make_request(api, request.method, payload)
return json.dumps(resp), status
# Fault Tolerance
def fault_tolerance():
global open_ports
sleep(5)
try:
while True:
port_lock.acquire()
len_ports = len(open_ports)
port_lock.release()
i = 0
while i < len_ports:
port = open_ports[i]
url = 'http://localhost:' + str(port) + BASE_URL + '_health'
resp = requests.get(url)
# Restart Container if it is crashed
if resp.status_code == 500:
print("Container crashed on port : ", port)
port_lock.acquire()
del open_ports[i]
port_lock.release()
delete_command = 'docker rm -f acts-' + str(port)
print(delete_command)
os.system(delete_command)
run_command = 'docker run -d --name acts-{} -p {}:5000 -v {}:/db_migrations acts'.format(port, port, MIGRATIONS_FOLDER)
print(run_command)
os.system(run_command)
port_lock.acquire()
open_ports.append(port)
open_ports.sort()
port_lock.release()
else:
print("Health Check: Container {} fine".format(port))
port_lock.acquire()
len_ports = len(open_ports)
port_lock.release()
i += 1
if i>= len_ports:
break
print()
i = 0
sleep(5)
except Exception as e:
print(e)
# Scaling
def auto_scaling():
global requests_count
while requests_count == 0:
pass
while True:
sleep(15)
curr_count = requests_count
print("Count in last 15 secs: ", curr_count)
num_diff = (curr_count // 20) - len(open_ports) + 1
print("Difference: ", num_diff)
if num_diff < 0:
num_diff = abs(num_diff)
for i in range(num_diff):
port_lock.acquire()
port_to_delete = open_ports.pop()
port_lock.release()
command = 'docker rm -f acts-' + str(port_to_delete)
print("Deleting: ", command)
os.system(command)
print()
else:
for i in range(num_diff):
port_lock.acquire()
new_port = open_ports[-1] + 1
run_command = 'docker run -d --name acts-{} -p {}:5000 -v {}:/db_migrations acts'.format(new_port, new_port, MIGRATIONS_FOLDER)
print("Creating: ", run_command)
os.system(run_command)
open_ports.append(new_port)
port_lock.release()
print()
requests_count_lock.acquire()
requests_count = 0
requests_count_lock.release()
def init():
command = 'docker run -d -p 8000:5000 --name {} -v {}:/db_migrations acts'.format('acts-8000', MIGRATIONS_FOLDER)
os.system(command)
command = 'docker run -d -p 8001:5000 --name {} -v {}:/db_migrations acts'.format('acts-8001', MIGRATIONS_FOLDER)
os.system(command)
open_ports.append(8000)
open_ports.append(8001)
def run_app():
app.run(host = '0.0.0.0', port = 5000)
if __name__ == '__main__':
init()
load_balancer_th = Thread(target = run_app)
load_balancer_th.start()
fault_tolerance_th = Thread(target = fault_tolerance)
fault_tolerance_th.start()
auto_scaling_th = Thread(target = auto_scaling)
auto_scaling_th.start()
| 2.28125 | 2 |
ctrace/runner.py | gzli929/ContactTracing | 4 | 12787802 | import concurrent.futures
import csv
from ctrace.utils import max_neighbors
import functools
import itertools
import logging
import time
from collections import namedtuple
from typing import Dict, Callable, List, Any, NamedTuple
import traceback
import shortuuid
import tracemalloc
from tqdm import tqdm
from ctrace import PROJECT_ROOT
DEBUG = False
def debug_memory(logger, label=""):
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
logger.debug(f"[{label}]: {top_stats[:5]}")
class GridExecutor():
"""
Usage: Create a new GridExecutor with config, in_schema, out_schema and func.
GridExecutor is an abstract class for running a cartesian product of lists of arguments.
Input and output arguments specified by schemas are assumed to have pretty __str__.
"""
def __init__(self, config: Dict, in_schema: List[str], out_schema: List[str], func: Callable[..., NamedTuple]):
"""
Parameters
----------
config
A dictionary mapping string attributes to arrays of different parameters.
Each item of the dictionary must be an array of arguments
in_schema
A list describing what and the order input attributes would be printed
out_schema
A list describing what and the order output attributes would be printed
func
A function to execute in parallel. Input arguments must match config keys.
Output arguments must be a namedtuple. namedtuple must encompass all attributes in out_schema
"""
self.compact_config = config.copy()
# Schemas need to be consistent with input_param_formatter and output_param_formatter
self.in_schema = in_schema.copy()
self.out_schema = out_schema.copy()
self.func = func
self.init_output_directory()
print(f"Logging Directory Initialized: {self.output_directory}")
# Expand configurations
self.expanded_config = list(GridExecutor.cartesian_product(self.compact_config))
# TODO: Hack Fix
self._track_duration = False
# TODO: Change post initialization method?
@classmethod
def init_multiple(cls, config: Dict[str, Any], in_schema: List[str],
out_schema: List[str], func: Callable, trials: int):
"""
Runs each configuration trials number of times. Each trial is indexed by a "trial_id"s
"""
compact_config = config.copy()
compact_config["trial_id"] = list(range(trials))
in_schema.append("trial_id")
return cls(compact_config, in_schema, out_schema, func)
# TODO: Find a workaround for decorations???
# <================== Problem ====================>
def track_duration(self):
"""Adds a wrapper to runner to track duration, and another column to out_schema for run_duration"""
# raise NotImplementedError
self.out_schema.append("run_duration")
self._track_duration = True
# self.runner = GridExecutor.timer(self.runner)
@staticmethod
def timer(func):
"""A decorator that adds an duration attribute to output of a runner"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter() # 1
formatted_param, formatted_output = func(*args, **kwargs)
end_time = time.perf_counter() # 2
formatted_output["run_duration"] = str(end_time - start_time)
return formatted_param, formatted_output
return wrapper_timer
# <================== Problem ====================>
@staticmethod
def cartesian_product(dicts):
"""Expands an dictionary of lists into a list of dictionaries through a cartesian product"""
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def input_param_formatter(self, in_param):
"""Uses in_schema and __str__ to return a formatted dict"""
filtered = {}
for key in self.in_schema:
if key == "G":
filtered[key] = in_param[key].NAME
elif key == "agent":
filtered[key] = in_param[key].__name__
else:
filtered[key] = str(in_param[key])
return filtered
def output_param_formatter(self, out_param):
"""Uses out_schema and __str__ to return a formatted dict"""
filtered = {}
for key in self.out_schema:
filtered[key] = str(out_param[key])
return filtered
def init_output_directory(self):
# Initialize Output
self.run_id = shortuuid.uuid()[:5]
# Setup output directories
self.output_directory = PROJECT_ROOT / "output" / f'run_{self.run_id}'
self.output_directory.mkdir(parents=True, exist_ok=True)
self.result_path = self.output_directory / 'results.csv'
self.logging_path = self.output_directory / 'run.log'
def init_logger(self):
# Setup up Parallel Log Channel
self.logger = logging.getLogger("Executor")
self.logger.setLevel(logging.DEBUG)
# Set LOGGING_FILE as output
fh = logging.FileHandler(self.logging_path)
fh.setLevel(logging.DEBUG)
self.logger.addHandler(fh)
# TODO: Encapsulate writer and its file into one object
# TODO: Find a way to move it to the constructor (use file open and close?)
def init_writer(self, result_file):
raise NotImplementedError
# TODO: provide a single method write result and flush to file
def write_result(self, in_param, out_param):
raise NotImplementedError
def _runner(self, param: Dict[str, Any]):
"""A runner method that returns a tuple (formatted_param, formatted_output)"""
formatted_param = self.input_param_formatter(param)
self.logger.info(f"Launching => {formatted_param}")
try:
out = self.func(**param)._asdict()
except Exception as e:
# Find a way to export culprit data?
self.logger.error(traceback.format_exc())
out = {x: None for x in self.out_schema}
# TODO: Added as a hack to allow output_param_formatter not to crash
if self._track_duration:
out["run_duration"] = None
# output_param_formatter assumes out to be consistent with out_schema
formatted_output = self.output_param_formatter(out)
return formatted_param, formatted_output
def runner(self, param):
"""TODO: Temporary workaround because of multiprocessing issues with decorators and lambdas"""
if self._track_duration:
return GridExecutor.timer(self._runner)(param)
else:
return self._runner(param)
def exec(self):
raise NotImplementedError
class GridExecutorParallel(GridExecutor):
# Override the exec
def exec(self, max_workers=20):
with concurrent.futures.ProcessPoolExecutor(max_workers) as executor, \
open(self.result_path, "w+") as result_file: # TODO: Encapsulate "csv file"
self.init_logger()
# TODO: Encapsulate "initialize csv writer" - perhaps use a context managers
row_names = self.in_schema + self.out_schema
writer = csv.DictWriter(result_file, fieldnames=row_names)
writer.writeheader()
results = [executor.submit(self.runner, arg) for arg in self.expanded_config]
for finished_task in tqdm(concurrent.futures.as_completed(results), total=len(self.expanded_config)):
(in_param, out_param) = finished_task.result()
# TODO: Encapsulate "writer"
writer.writerow({**in_param, **out_param})
result_file.flush()
self.logger.info(f"Finished => {in_param}")
# debug_memory(self.logger, "run")
class GridExecutorLinear(GridExecutor):
# Override the exec
def exec(self):
with open(self.result_path, "w") as result_file: # TODO: Encapsulate "csv file"
self.init_logger()
# TODO: Encapsulate "initialize csv writer" - perhaps use a context managers
writer = csv.DictWriter(result_file, fieldnames=self.in_schema + self.out_schema)
writer.writeheader()
for arg in tqdm(self.expanded_config):
(in_param, out_param) = self.runner(arg)
# TODO: Encapsulate "writer"
writer.writerow({**in_param, **out_param})
result_file.flush()
self.logger.info(f"Finished => {in_param}") | 2.390625 | 2 |
acmicpc.net/problem/2606.py | x86chi/problem-solving | 1 | 12787803 | <gh_stars>1-10
from typing import List
def solution(graph: List[List[int]]):
length = len(graph)
check = [False] * length
count = [0]
def dfs(x=0):
check[x] = True
for y in graph[x]:
if not check[y]:
count[0] += 1
dfs(y)
dfs()
return count[0]
def main():
N = int(input())
M = int(input())
graph = [[] for __ in range(N)]
for __ in range(M):
A, B = map(int, input().split())
graph[A-1].append(B-1)
graph[B-1].append(A-1)
return solution(graph)
def test_main():
import io
import sys
sys.stdin = io.StringIO("""7
6
1 2
2 3
1 5
5 2
5 6
4 7
""")
assert main() == 4
if __name__ == '__main__':
print(main())
| 3.28125 | 3 |
apps/jetpack/tests/test_views.py | mozilla/FlightDeck | 6 | 12787804 | <filename>apps/jetpack/tests/test_views.py<gh_stars>1-10
import os
import commonware
import json
from jinja2 import UndefinedError
from nose.tools import eq_
from nose import SkipTest
from mock import patch, Mock
from test_utils import TestCase
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from jetpack.models import Package, PackageRevision, Module, save_first_revision
from base.helpers import hashtag
log = commonware.log.getLogger('f.test')
def next_revision(revision):
number = revision.revision_number
return (PackageRevision.objects.filter(revision_number__gt=number,
package=revision.package)
.order_by('-revision_number')[:1])[0]
class TestPackage(TestCase):
fixtures = ('mozilla_user', 'core_sdk', 'users', 'packages')
def setUp(self):
self.hashtag = hashtag()
self.check_download_url = reverse('jp_check_download_xpi',
args=[self.hashtag])
@patch('os.path.isfile')
def test_package_check_download(self, isfile):
isfile.return_value = False
r = self.client.get(self.check_download_url)
eq_(r.status_code, 200)
eq_(r.content, '{"ready": false}')
isfile.return_value = True
r = self.client.get(self.check_download_url)
eq_(r.status_code, 200)
eq_(r.content, '{"ready": true}')
def test_package_browser_no_user(self):
"""If user does not exist raise 404
"""
r = self.client.get(
reverse('jp_browser_user_addons', args=['not_a-user']))
eq_(r.status_code, 404)
def test_author_can_edit_package(self):
user = User.objects.get(username='jan')
user.set_password('<PASSWORD>')
user.save()
addon = Package.objects.create(author=user, type='a')
# not logged in
response = self.client.get(addon.get_absolute_url())
assert 'save_url' not in response.content
# after log in
self.client.login(username=user.username, password='<PASSWORD>')
response = self.client.get(addon.get_absolute_url())
assert 'save_url' in response.content
# after setting the addon to private
response = self.client.get(reverse('jp_package_disable',
args=[addon.pk]))
self.client.login(username=user.username, password='<PASSWORD>')
response = self.client.get(addon.get_absolute_url())
assert 'save_url' in response.content
def test_display_deleted_package(self):
author = User.objects.get(username='john')
author.set_password('<PASSWORD>')
author.save()
user = User.objects.get(username='jan')
user.set_password('<PASSWORD>')
user.save()
addon = Package.objects.create(author=user, type='a')
lib = Package.objects.create(author=author, type='l')
addon.latest.dependency_add(lib.latest)
# logging in the author
self.client.login(username=author.username, password='<PASSWORD>')
# deleting lib
response = self.client.get(reverse('jp_package_delete', args=[lib.pk]))
eq_(response.status_code, 200)
response = self.client.get(lib.get_absolute_url())
# lib deleted - shouldn't be visible by author
eq_(response.status_code, 404)
# logging in the addon owner
self.client.login(username=user.username, password='<PASSWORD>')
# addon used lib - its author should be able to see it
response = self.client.get(lib.get_absolute_url())
eq_(response.status_code, 200)
def test_display_disabled_package(self):
author = User.objects.get(username='john')
author.set_password('<PASSWORD>')
author.save()
user = User.objects.get(username='jan')
user.set_password('secure')
user.save()
lib = Package.objects.create(author=author, type='l')
# logging in the author
self.client.login(username=author.username, password='<PASSWORD>')
# private on
response = self.client.get(reverse('jp_package_disable',
args=[lib.pk]))
eq_(response.status_code, 200)
response = self.client.get(lib.get_absolute_url())
# lib private - should be visible by author
eq_(response.status_code, 200)
# logging in the other user
self.client.login(username=user.username, password='<PASSWORD>')
# lib private - shouldn't be visible by others
response = self.client.get(lib.get_absolute_url())
eq_(response.status_code, 404)
def test_display_disabled_library_in_addon(self):
author = User.objects.get(username='john')
author.set_password('<PASSWORD>')
author.save()
user = User.objects.get(username='jan')
user.set_password('<PASSWORD>')
user.save()
lib = Package.objects.create(author=author, type='l')
addon = Package.objects.create(author=user, type='a')
addon.latest.dependency_add(lib.latest)
# logging in the author
self.client.login(username=author.username, password='<PASSWORD>')
# private on
response = self.client.get(reverse('jp_package_disable',
args=[lib.pk]))
eq_(response.status_code, 200)
# logging in the user
self.client.login(username=user.username, password='<PASSWORD>')
# addon depends on lib should be visable
response = self.client.get(lib.get_absolute_url())
eq_(response.status_code, 200)
def test_ability_to_see_revisions_list(self):
user = User.objects.get(username='jan')
user.set_password('<PASSWORD>')
user.save()
# Public add-on
addon = Package.objects.create(
full_name='Public Add-on', author=user, type='a')
response = self.client.get(reverse('jp_revisions_list_html',
args=[addon.latest.pk,]))
eq_(response.status_code, 200)
# Private add-on
addon = Package.objects.create(
full_name='Priv Add-on', author=user, type='a', active=False)
# not logged in
response = self.client.get(reverse('jp_revisions_list_html',
args=[addon.latest.pk,]))
eq_(response.status_code, 404)
# authenticated
self.client.login(username=user.username, password='<PASSWORD>')
response = self.client.get(reverse('jp_revisions_list_html',
args=[addon.latest.pk,]))
eq_(response.status_code, 200)
def test_urls(self):
user = User.objects.get(username='jan')
addon = Package.objects.create(author=user, type='a')
revision = addon.latest
log.debug(revision.get_absolute_url())
eq_(revision.get_absolute_url(),
'/package/%d/' % revision.package.pk)
revision.save()
eq_(revision.get_absolute_url(),
'/package/%d/revision/%d/' % (revision.package.pk,
revision.revision_number))
revision.set_version('test')
version = PackageRevision.objects.get(pk=revision.pk)
version_pk = version.pk
eq_(revision.get_absolute_url(),
'/package/%d/' % revision.package.pk)
revision.save()
eq_(version.pk, version_pk)
eq_(revision.get_absolute_url(),
'/package/%d/revision/%s/' % (revision.package.pk,
revision.revision_number))
revision.set_version('test2')
eq_(revision.get_absolute_url(),
'/package/%d/' % revision.package.pk)
eq_(version.get_absolute_url(),
'/package/%d/version/%s/' % (version.package.pk,
version.version_name))
class TestEmptyDirs(TestCase):
fixtures = ['mozilla_user', 'users', 'core_sdk', 'packages']
def setUp(self):
if not os.path.exists(settings.UPLOAD_DIR):
os.makedirs(settings.UPLOAD_DIR)
self.author = User.objects.get(username='john')
self.author.set_password('password')
self.author.save()
self.package = self.author.packages_originated.addons()[0:1].get()
self.revision = self.package.revisions.all()[0]
self.client.login(username=self.author.username, password='password')
def post(self, url, data):
return self.client.post(url, data)
def add_one(self, name='tester', root_dir='l'):
self.post(self.get_add_url(self.revision.revision_number),
{'name': name, 'root_dir': root_dir})
self.revision = next_revision(self.revision)
return self.revision
def get_add_url(self, revision_number):
revision = self.package.revisions.get(revision_number=revision_number)
return reverse('jp_package_revision_add_folder', args=[revision.pk])
def get_delete_url(self, revision_number):
revision = self.package.revisions.get(revision_number=revision_number)
return reverse('jp_package_revision_remove_folder', args=[revision.pk])
def test_add_folder(self):
res = self.post(self.get_add_url(self.revision.revision_number),
{'name': 'tester', 'root_dir': 'l'})
eq_(res.status_code, 200)
json.loads(res.content)
revision = next_revision(self.revision)
folder = revision.folders.all()[0]
eq_(folder.name, 'tester')
def test_remove_folder(self):
self.add_one()
res = self.post(self.get_delete_url(self.revision.revision_number),
{'name': 'tester', 'root_dir': 'l'})
eq_(res.status_code, 200)
json.loads(res.content)
revision = next_revision(self.revision)
eq_(revision.folders.count(), 0)
def test_folder_sanitization(self):
revision = self.add_one(name='A"> <script src="google.com">/m@l!c!ous')
eq_(revision.folders.all()[0].name,
'A-script-src-googlecom-/m-l-c-ous')
revision.folder_remove(revision.folders.all()[0])
revision = self.add_one(name='/absolute///and/triple/')
eq_(revision.folders.all()[0].name, 'absolute/and/triple')
class TestEditing(TestCase):
fixtures = ('mozilla_user', 'core_sdk', 'users', 'packages')
def setUp(self):
self.hashtag = hashtag()
def _login(self):
self.author = User.objects.get(username='jan')
self.author.set_password('<PASSWORD>')
self.author.save()
self.client.login(username=self.author.username, password='<PASSWORD>')
return self.author
def test_revision_list_contains_added_modules(self):
author = User.objects.get(username='john')
addon = Package(author=author, type='a')
addon.save()
mod = Module.objects.create(
filename='test_filename',
author=author,
code='// test')
rev = addon.latest
rev.module_add(mod)
r = self.client.get(
reverse('jp_revisions_list_html', args=[addon.latest.pk]))
assert 'test_filename' in r.content
def test_package_name_change(self):
author = self._login()
addon1 = Package(author=author, type='a')
addon1.save()
rev1 = addon1.latest
log.debug(addon1.latest.get_save_url())
response = self.client.post(addon1.latest.get_save_url(), {
'full_name': 'FULL NAME'})
eq_(response.status_code, 200)
addon2 = Package.objects.get(pk=addon1.pk)
eq_(len(addon2.revisions.all()), 2)
eq_(addon2.full_name, addon2.latest.full_name)
assert rev1.name != addon2.latest.name
def test_package_jid_change(self):
jid = 'somejid'
author = self._login()
addon1 = Package(author=author, type='a')
addon1.save()
response = self.client.post(addon1.latest.get_save_url(), {
'jid': jid})
eq_(response.status_code, 200)
addon2 = Package.objects.get(pk=addon1.pk)
# no change in revision
eq_(len(addon2.revisions.all()), 1)
eq_(addon2.jid, jid)
# check adding an existing JID
addon3 = Package(author=author, type='a')
addon3.save()
response = self.client.post(addon1.latest.get_save_url(), {
'jid': jid})
eq_(response.status_code, 403)
def test_package_extra_json_change(self):
author = self._login()
addon = Package(author=author, type='a')
addon.save()
pk = addon.pk
homepage = 'https://builder.addons.mozilla.org'
extra_json = '{"homepage": "%s"}' % homepage
response = self.client.post(addon.latest.get_save_url(), {
'package_extra_json': extra_json})
addon = Package.objects.get(pk=pk) # old one is cached
eq_(addon.latest.extra_json, extra_json)
def test_package_remove_extra_json(self):
author = self._login()
addon = Package(author=author, type='a')
addon.save()
pk = addon.pk
homepage = 'https://builder.addons.mozilla.org'
extra_json = '{"homepage": "%s"}' % homepage
addon.latest.extra_json = extra_json
addon.latest.save()
response = self.client.post(addon.latest.get_save_url(), {
'package_extra_json': ''})
addon = Package.objects.get(pk=pk) # old on is cached
eq_(addon.latest.extra_json, '')
def test_package_invalid_extra_json(self):
author = self._login()
addon = Package(author=author, type='a')
addon.save()
extra_json = '{ foo: bar }'
response = self.client.post(addon.latest.get_save_url(), {
'package_extra_json': extra_json})
eq_(response.status_code, 400)
assert 'invalid JSON' in response.content
class TestRevision(TestCase):
fixtures = ('mozilla_user', 'core_sdk', 'users', 'packages')
def setUp(self):
self.hashtag = hashtag()
self.xpi_file = os.path.join(settings.XPI_TARGETDIR,
"%s.xpi" % self.hashtag)
self.zip_file = os.path.join(settings.XPI_TARGETDIR,
"%s.zip" % self.hashtag)
def tearDown(self):
if os.path.exists(self.xpi_file):
os.remove(self.xpi_file)
if os.path.exists(self.zip_file):
os.remove(self.zip_file)
def test_copy_revision(self):
author = User.objects.get(username='john')
addon = Package(author=author, type='a')
addon.save()
# unauthenticated
response = self.client.get(addon.latest.get_copy_url())
eq_(response.status_code, 302)
# authenticated
author.set_password('<PASSWORD>')
author.save()
self.client.login(username=author.username, password='<PASSWORD>')
log.debug(addon.latest.get_copy_url())
response = self.client.get(addon.latest.get_copy_url())
eq_(response.status_code, 200)
assert 'Add-on' in response.content
assert 'copied' in response.content
def test_dashboard_with_broken_package(self):
# fixable add-on - no latest given
author = User.objects.get(username='john')
addon = Package(
full_name='NOLATEST',
author=author, type='a')
addon.save()
# adding a new version
addon.latest.save()
eq_(addon.revisions.count(), 2)
addon.latest.set_version('1.0')
latest = addon.latest
# removing addon.latest
addon.latest = None
addon.version = None
addon.save()
assert not addon.latest
self.assertRaises(UndefinedError,
self.client.get, author.get_profile().get_profile_url())
# fix latest will assign last revision to latest
addon.fix_latest()
response = self.client.get(author.get_profile().get_profile_url())
eq_(response.status_code, 200)
addon = Package.objects.get(full_name='NOLATEST')
assert addon.latest
eq_(addon.latest, latest)
self.assertRaises(AttributeError, addon.latest.get_absolute_url)
# fix version will assign revision with a highest version_name to
# version
addon.fix_version()
eq_(response.status_code, 200)
addon = Package.objects.get(full_name='NOLATEST')
assert addon.version
eq_(addon.version.version_name, '1.0')
# package with no version at all
post_save.disconnect(save_first_revision, sender=Package)
addon = Package(
full_name='NOREVISION',
name='broken',
author=author, type='a')
addon.save()
post_save.connect(save_first_revision, sender=Package)
assert not addon.latest
self.assertRaises(UndefinedError,
self.client.get, author.get_profile().get_profile_url())
# fix latest (it will remove the package)
addon.fix_latest()
response = self.client.get(author.get_profile().get_profile_url())
eq_(response.status_code, 200)
self.assertRaises(Package.DoesNotExist,
Package.objects.get, full_name='NOREVISION')
def test_non_unique_fixable_packages(self):
# multiple fixable packages with the same name
# no idea how to create them in database
# duplicate packages are denied on MySQL level
if True:
# hide "Unreachable code" pylint warning
raise SkipTest()
# this is how the test would run if no IntegrityError would be raised
author = User.objects.get(username='john')
addon = Package.objects.create(
full_name='Integrity Error',
author=author, type='a')
# addon has 2 revisions
addon.latest.save()
latest = addon.latest
backup = Package.full_clean
Package.full_clean = Mock()
addon2 = Package.objects.create(
full_name='Integrity Error',
author=author, type='a')
addon2.latest = None
addon2.save()
Package.full_clean = backup
# requesting author's profile
self.assertRaises(Package.MultipleObjectsReturned,
self.client.get, author.get_profile().get_profile_url())
# fix uniqueness (it will rename addon2 as it has less revisions)
addon.fix_uniqueness()
response = self.client.get(author.get_profile().get_profile_url())
eq_(response.status_code, 200)
addon = Package.objects.get(full_name='Integrity Error')
# displaying the broken addon should fix it
assert addon.latest
eq_(addon.latest, latest)
# there should be other package with the name created from FIXABLE
eq_(Package.objects.filter(
author=author, full_name__contains='Integrity Error').count(), 2)
def test_prepare_zip_file(self):
author = User.objects.get(username='john')
addon = Package(author=author, type='a')
addon.save()
prepare_url = addon.latest.get_prepare_zip_url()
response = self.client.post(prepare_url, {'hashtag': self.hashtag})
eq_(response.status_code, 200)
eq_(response.content, '{"delayed": true}')
def test_check_zip_file(self):
author = User.objects.get(username='john')
addon = Package(author=author, type='a')
addon.save()
check_url = reverse('jp_revision_check_zip', args=[self.hashtag,])
response = self.client.get(check_url)
eq_(response.content, '{"ready": false}')
addon.latest.zip_source(hashtag=self.hashtag)
response = self.client.get(check_url)
eq_(response.status_code, 200)
eq_(response.content, '{"ready": true}')
def test_download_zip_file(self):
author = User.objects.get(username='john')
addon = Package(author=author, type='a')
addon.save()
addon.latest.zip_source(hashtag=self.hashtag)
download_url = reverse('jp_revision_download_zip', args=[self.hashtag, 'x'])
response = self.client.get(download_url)
eq_(response.status_code, 200)
eq_(response['Content-Disposition'], 'attachment; filename="x.zip"')
| 2.0625 | 2 |
tests/rl/test_logger.py | SunsetWolf/qlib | 1 | 12787805 | <filename>tests/rl/test_logger.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from random import randint, choice
from pathlib import Path
import re
import gym
import numpy as np
import pandas as pd
from gym import spaces
from tianshou.data import Collector, Batch
from tianshou.policy import BasePolicy
from qlib.log import set_log_with_config
from qlib.config import C
from qlib.constant import INF
from qlib.rl.interpreter import StateInterpreter, ActionInterpreter
from qlib.rl.simulator import Simulator
from qlib.rl.utils.data_queue import DataQueue
from qlib.rl.utils.env_wrapper import InfoDict, EnvWrapper
from qlib.rl.utils.log import LogLevel, LogCollector, CsvWriter, ConsoleWriter
from qlib.rl.utils.finite_env import vectorize_env
class SimpleEnv(gym.Env[int, int]):
def __init__(self):
self.logger = LogCollector()
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.step_count = 0
return 0
def step(self, action: int):
self.logger.reset()
self.logger.add_scalar("reward", 42.0)
self.logger.add_scalar("a", randint(1, 10))
self.logger.add_array("b", pd.DataFrame({"a": [1, 2], "b": [3, 4]}))
if self.step_count >= 3:
done = choice([False, True])
else:
done = False
if 2 <= self.step_count <= 3:
self.logger.add_scalar("c", randint(11, 20))
self.step_count += 1
return 1, 42.0, done, InfoDict(log=self.logger.logs(), aux_info={})
class AnyPolicy(BasePolicy):
def forward(self, batch, state=None):
return Batch(act=np.stack([1] * len(batch)))
def learn(self, batch):
pass
def test_simple_env_logger(caplog):
set_log_with_config(C.logging_config)
for venv_cls_name in ["dummy", "shmem", "subproc"]:
writer = ConsoleWriter()
csv_writer = CsvWriter(Path(__file__).parent / ".output")
venv = vectorize_env(lambda: SimpleEnv(), venv_cls_name, 4, [writer, csv_writer])
with venv.collector_guard():
collector = Collector(AnyPolicy(), venv)
collector.collect(n_episode=30)
output_file = pd.read_csv(Path(__file__).parent / ".output" / "result.csv")
assert output_file.columns.tolist() == ["reward", "a", "c"]
assert len(output_file) >= 30
line_counter = 0
for line in caplog.text.splitlines():
line = line.strip()
if line:
line_counter += 1
assert re.match(r".*reward 42\.0000 \(42.0000\) a .* \((4|5|6)\.\d+\) c .* \((14|15|16)\.\d+\)", line)
assert line_counter >= 3
class SimpleSimulator(Simulator[int, float, float]):
def __init__(self, initial: int, **kwargs) -> None:
self.initial = float(initial)
def step(self, action: float) -> None:
import torch
self.initial += action
self.env.logger.add_scalar("test_a", torch.tensor(233.0))
self.env.logger.add_scalar("test_b", np.array(200))
def get_state(self) -> float:
return self.initial
def done(self) -> bool:
return self.initial % 1 > 0.5
class DummyStateInterpreter(StateInterpreter[float, float]):
def interpret(self, state: float) -> float:
return state
@property
def observation_space(self) -> spaces.Box:
return spaces.Box(0, np.inf, shape=(), dtype=np.float32)
class DummyActionInterpreter(ActionInterpreter[float, int, float]):
def interpret(self, state: float, action: int) -> float:
return action / 100
@property
def action_space(self) -> spaces.Box:
return spaces.Discrete(5)
class RandomFivePolicy(BasePolicy):
def forward(self, batch, state=None):
return Batch(act=np.random.randint(5, size=len(batch)))
def learn(self, batch):
pass
def test_logger_with_env_wrapper():
with DataQueue(list(range(20)), shuffle=False) as data_iterator:
env_wrapper_factory = lambda: EnvWrapper(
SimpleSimulator,
DummyStateInterpreter(),
DummyActionInterpreter(),
data_iterator,
logger=LogCollector(LogLevel.DEBUG),
)
# loglevel can be debug here because metrics can all dump into csv
# otherwise, csv writer might crash
csv_writer = CsvWriter(Path(__file__).parent / ".output", loglevel=LogLevel.DEBUG)
venv = vectorize_env(env_wrapper_factory, "shmem", 4, csv_writer)
with venv.collector_guard():
collector = Collector(RandomFivePolicy(), venv)
collector.collect(n_episode=INF * len(venv))
output_df = pd.read_csv(Path(__file__).parent / ".output" / "result.csv")
assert len(output_df) == 20
# obs has a increasing trend
assert output_df["obs"].to_numpy()[:10].sum() < output_df["obs"].to_numpy()[10:].sum()
assert (output_df["test_a"] == 233).all()
assert (output_df["test_b"] == 200).all()
assert "steps_per_episode" in output_df and "reward" in output_df
| 2.265625 | 2 |
tests/config/test_environment.py | henry1jin/alohamora | 5 | 12787806 | <filename>tests/config/test_environment.py
import tempfile
from blaze.config.environment import PushGroup, Resource, ResourceType, EnvironmentConfig
from tests.mocks.config import get_env_config
def create_resource(url):
return Resource(url=url, size=1024, order=1, group_id=0, source_id=0, type=ResourceType.HTML)
class TestResourceType:
def test_has_int_type(self):
for val in list(ResourceType):
assert isinstance(val, int)
class TestResource:
def test_compiles(self):
r = create_resource("http://example.com")
assert isinstance(r, Resource)
def test_equality(self):
r_1 = create_resource("http://example.com")
r_2 = create_resource("http://example.com")
r_3 = create_resource("http://example.com/test")
assert r_1 is not r_2
assert r_1 is not r_3
assert r_1 == r_2
assert r_1 != r_3
assert len(set([r_1, r_2, r_3])) == 2
class TestPushGroup:
def test_compiles(self):
p = PushGroup(id=0, name="example.com", resources=[])
assert isinstance(p, PushGroup)
class TestEnvironmentConfig:
def test_compiles(self):
c = EnvironmentConfig(replay_dir="/replay/dir", request_url="http://example.com", push_groups=[])
assert isinstance(c, EnvironmentConfig)
def test_trainable_push_groups(self):
c = get_env_config()
assert all(group.trainable for group in c.trainable_push_groups)
assert all(group in c.push_groups for group in c.trainable_push_groups)
assert all(group not in c.trainable_push_groups for group in c.push_groups if not group.trainable)
def test_pickle(self):
c = get_env_config()
with tempfile.NamedTemporaryFile() as tmp_file:
c.save_file(tmp_file.name)
loaded_c = EnvironmentConfig.load_file(tmp_file.name)
assert c.request_url == loaded_c.request_url
assert c.replay_dir == loaded_c.replay_dir
assert len(c.push_groups) == len(loaded_c.push_groups)
for i, group in enumerate(c.push_groups):
assert loaded_c.push_groups[i].name == group.name
assert len(loaded_c.push_groups[i].resources) == len(group.resources)
for j, res in enumerate(group.resources):
assert loaded_c.push_groups[i].resources[j] == res
| 2.359375 | 2 |
metamodels/lstm.py | Jackil1993/metainventory | 3 | 12787807 | from simulations import simulation, simulation2
from pandas import DataFrame
from pandas import Series
from pandas import concat
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Bidirectional
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
import numpy
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(Bidirectional(LSTM(50, activation='relu'), batch_input_shape=(batch_size, X.shape[1], X.shape[2])))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
print('Epoch {}'.format(i))
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0, 0]
# load dataset
sim = simulation2.Simulator(50)
sim.simulate()
s = simulation.Simulation([[1, 1]], # to_plot, to_report
[[0.1, [0.2, 0.1], [15, 2], [30, 2]]], # interarrivals, demand, replenishment_lead, expiry
[[70.0, 110.0, 5.0, 30.0, 100.0, 100.0]], # purchase price, sales price, handling, backorder, overflow, recycle
[[50, 35]]) # storage, reorder point
s.simulate()
#raw_values = sim.stats.inventory_vector
raw_values = s.w.products[0].stats.storage
raw_values = raw_values[0::30]
print(len(raw_values))
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-30], supervised_values[-30:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# fit the model
lstm_model = fit_lstm(train_scaled, 1, 10)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
#lstm_model.predict(train_reshaped, batch_size=1)
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat)
expected = raw_values[len(train) + i + 1]
print('Time=%d, Predicted=%f, Expected=%f' % (i + 1, yhat, expected))
# report performance
mse = mean_squared_error(raw_values[-30:-2], predictions[1:-1])
rmse = sqrt(mse)
ape = []
real_values = raw_values[-30:-2]
raw_value = raw_values[-30:-2]
predictions = predictions[1:-1]
for i in range(len(predictions)):
value = abs(predictions[i]-real_values[i])/real_values[i]
if value < 1:
ape.append(value)
mape = sum(ape)/len(ape)*100
print('Test RMSE: %.3f' % rmse)
print('Test MSE: %.3f' % mse)
print('Mean absolute percentage error: ', round(mape,2), "%")
# plot
pyplot.plot(raw_values[-30:-2], label='simulation')
pyplot.plot(predictions[1:-1], label='predicted by LSTM neural network')
pyplot.xlabel('time')
pyplot.ylabel('inventory level')
pyplot.grid()
pyplot.legend()
pyplot.show() | 2.9375 | 3 |
app/database.py | faylau/microblog | 0 | 12787808 | #coding=utf-8
"""
1. SQLAlchemy-migration现在是openstack社区维护的一个项目,主要用于实现SQLAlchemy相
关数据误置的创建、版本管理、迁移等功能;它对SQLAlchemy的版本有一定要求;它对于一般项
目而言并不是必需的;
2. 下面的db_create、db_migrate、db_upgrade、db_downgrade等方法均使用SQLAlchemy-
migration实现;
3. 如果不需要实现数据库版本管理及迁移,可以不使用SQLAlchemy-migration。
"""
import os.path
# from migrate.versioning import api
import imp
# from config import SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO
from app import db, app
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], convert_unicode=True, echo=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import models
Base.metadata.create_all(bind=engine)
init_db()
# def db_create():
# """
# :summary: 使用SQLAlchmy-migration进行数据库创建及版本管理
# """
# db.create_all()
# if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
# api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
# api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# else:
# api.version_control(SQLALCHEMY_DATABASE_URI,
# SQLALCHEMY_MIGRATE_REPO,
# api.version(SQLALCHEMY_MIGRATE_REPO))
#
#
# def db_migrate():
# """
# :summary: SQLAlchemy-migrate 迁移的方式就是比较数据库(在本例中从app.db中获取)
# 与我们模型的结构(从文件 app/models.py 获取);两者间的不同将会被记录成一个迁移
# 脚本存放在迁移仓库中;迁移脚本知道如何去迁移或撤销它,所以它始终是可能用于升级
# 或降级一个数据库。
# """
# migration = SQLALCHEMY_MIGRATE_REPO\
# + '/versions/%03d_migration.py' \
# % (api.db_version(SQLALCHEMY_DATABASE_URI,
# SQLALCHEMY_MIGRATE_REPO) + 1)
# tmp_module = imp.new_module('old_model')
# old_model = api.create_model(SQLALCHEMY_DATABASE_URI,
# SQLALCHEMY_MIGRATE_REPO)
# exec old_model in tmp_module.__dict__
# script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,
# SQLALCHEMY_MIGRATE_REPO,
# tmp_module.meta,
# db.metadata)
# open(migration, 'wt').write(script)
# api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# print 'New migration saved as {0}.'.format(migration)
# print 'Current database version: {0}'.format(
# str(api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO))
# )
#
#
# def db_upgrade():
# """
# :summary: 数据库升级,使用SQLAlchemy-migration实现。
# """
# api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# print 'Current database version: ' + str(
# api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# )
#
#
# def db_downgrade():
# """
# :summary: 数据库降级,使用SQLAlchemy-migration实现。
# """
# v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
# print 'Current database version: ' + str(
# api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# )
if __name__ == '__main__':
init_db()
| 2.453125 | 2 |
tools/todos.py | mrjrty/rpaframework | 518 | 12787809 | <gh_stars>100-1000
#!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
from collections import defaultdict
from contextlib import contextmanager
from io import StringIO
from pathlib import Path
from pylint.lint import Run
TODO_PATTERN = re.compile(r"(todo|fixme|xxx)[\:\.]?\s*(.+)", re.IGNORECASE)
@contextmanager
def redirect():
stdout = sys.stdout
sys.stdout = StringIO()
try:
yield sys.stdout
finally:
sys.stdout.close()
sys.stdout = stdout
def todo_msg(msg):
match = TODO_PATTERN.match(msg)
if match:
return match.group(2)
else:
return msg
def main():
parser = argparse.ArgumentParser(description="Write all todo items as rst")
parser.add_argument("input", help="Path to source files")
parser.add_argument("output", help="Path to output rst file")
args = parser.parse_args()
cmd = [
"pylint",
"--disable=all",
"--enable=fixme",
"--exit-zero",
"-f",
"json",
Path(args.input).name,
]
cwd = os.getcwd()
os.chdir(Path(args.input).parent)
try:
with redirect() as stdout:
Run(cmd, exit=False)
result = json.loads(stdout.getvalue())
finally:
os.chdir(cwd)
todos = defaultdict(list)
for item in result:
# Remove given search path from module path
name = ".".join(item["module"].split(".")[1:])
message = todo_msg(item["message"])
todos[name].append({"message": todo_msg(item["message"]), "line": item["line"]})
output = ["****", "TODO", "****", ""]
for module, items in sorted(todos.items()):
items.sort(key=lambda item: item["line"])
output.append(f"{module}:")
output.append("=" * (len(module) + 1))
output.append("")
output.append(".. csv-table::")
output.append(" :header: \"Line\", \"Message\"")
output.append(" :widths: 10, 40")
output.append("")
for item in items:
output.append(" \"{line}\", \"{message}\"".format(**item))
output.append("")
with open(args.output, "w") as outfile:
outfile.write("\n".join(output))
if __name__ == "__main__":
main()
| 2.390625 | 2 |
accounts/tests/tests_logout.py | oratosquilla-oratoria/django-blog | 0 | 12787810 | <filename>accounts/tests/tests_logout.py
from django.contrib.auth import get_user
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
from .. import views
class LogOutTest(TestCase):
def setUp(self):
username = 'Vasyan'
password = '<PASSWORD>'
User.objects.create_user(username, '<EMAIL>', password)
self.client.login(username=username, password=password)
self.url_page = reverse('page', kwargs={'page': 1})
self.url_logout = reverse('logout')
def test_logout_view(self):
"""Tests if the logout link maps LogoutView"""
view = resolve('/accounts/logout/')
self.assertEqual(view.func.view_class, views.MyLogoutView)
def test_after_logout_redirect_home(self):
"""Tests the redirection after logging out"""
response = self.client.get(self.url_logout)
self.assertRedirects(response, self.url_page)
def test_user_is_not_authenticated(self):
"""Tests if the user is not authenticated"""
self.client.get(self.url_logout)
user = get_user(self.client)
self.assertFalse(user.is_authenticated)
| 2.59375 | 3 |
viruses/phage_num/marine_deep_subsurface/marine_deep_subusrface_phage_num.py | milo-lab/biomass_distribution | 21 | 12787811 | <reponame>milo-lab/biomass_distribution
# coding: utf-8
# In[1]:
# Load dependencies
import pandas as pd
import numpy as np
from scipy.stats import gmean
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
import sys
sys.path.insert(0, '../../../statistics_helper')
from CI_helper import *
# # Estimating the number of phages in subseafloor sediments
# To estimate the total number of phages in subseafloor sediments, we rely on two recent papers which measured the ratio between the number of prokaryotes in subseafloor sediments and the number of phage like particles ([Engelhardt et al.](http://dx.doi.org/10.1038/ismej.2013.245) and [Middelboe et al.](https://doi.org/10.3354/ame01485). We extracted the data from figure 3 of Engelhardt et al. and from figure 2 of Middelboe et al.:
# In[2]:
# Load data extracted from Engelhardt et al.
data = pd.read_excel('marine_deep_subsurface_phage_data.xlsx',skiprows=1)
# Load data extracted from Middelboe et al.
middelboe = pd.read_excel('marine_deep_subsurface_phage_data.xlsx','Middelboe',skiprows=1,index_col=0)
# Plot the data
plt.loglog(data['Cells concentration [cells cm^-3]'],data['Phage concentration [virions cm^-3]'],'.',label='Engelhardt data')
plt.loglog(middelboe['Prokaryote abundance [cm^-3]'],middelboe['Viral abundance [cm^-3]'],'.',label='Middelboe data')
# Plot the fit Engelhardt et al. used for the data
fit_xdata = 10**np.linspace(np.log10(data['Cells concentration [cells cm^-3]'].min()),np.log10(data['Cells concentration [cells cm^-3]'].max()),100)
plt.loglog(fit_xdata,271.8*fit_xdata**0.768,label='Engelhardt et al. fit')
plt.xlabel(r'Cell concentration [cells cm$^{-3}$]')
plt.ylabel(r'Phage-like particle concentration [particles cm$^{-3}$]')
plt.legend()
# As the data from the two studies seem to correspond well to the same fit used in Engelhardt et al., we combined the data from the two studies and calculate the geometic mean of the ratios between phage-like particles and prokaryotes across measurements in Engelhardt et al.:
# In[3]:
# Merge data from Engelhardt et al. and Middelboe et al.
merged_data = pd.concat([(data['Phage concentration [virions cm^-3]']/data['Cells concentration [cells cm^-3]']),(middelboe['Viral abundance [cm^-3]']/middelboe['Prokaryote abundance [cm^-3]'])])
geo_mean_ratio = gmean(merged_data)
print('Our best estimate for the ratio between the concentration of phage-like particles and cells in subseafloor sediments is ≈%.0f.' %geo_mean_ratio)
# To calculate the total number of phages in subseafloor sediments, we multiply the ratio of phage-like particles to prokaryotes by our estimate for the total number of prokaryotes in subseafloor sediments.
# In[4]:
prokaryote_estimate = pd.read_excel('../../../bacteria_archaea/marine_deep_subsurface/marine_deep_subsurface_prok_biomass_estimate.xlsx')
best_estimate = prokaryote_estimate.loc[0]['Value']*geo_mean_ratio
print('Our best estimate for the total number of phages in subseafloor sediments is ≈%.0e' %best_estimate)
old_results = pd.read_excel('../phage_num_estimate.xlsx')
result = old_results.copy()
result.loc[1] = pd.Series({
'Parameter': 'Total number of phages in the marine deep subsurface',
'Value': best_estimate,
'Units': 'Number of individuals',
'Uncertainty': np.nan
})
result.to_excel('../phage_num_estimate.xlsx',index=False)
| 2.453125 | 2 |
src/lambda_tests.py | erikj23/lambda-manager | 1 | 12787812 |
import boto3
import json
def get_client() -> boto3.Session:
return boto3.client("lambda")
def external_lambda_tests() -> None:
basic_call()
def basic_call() -> None:
lambda_client = get_client()
response = lambda_client.list_functions(
MaxItems=10
)
pretty_print(response)
def pretty_print(response: str) -> None:
print(json.dumps(response, indent=4, sort_keys=True))
if __name__ == "__main__":
external_lambda_tests()
| 2.171875 | 2 |
probe.py | moisesbenzan/python-sdk | 0 | 12787813 | import re
import os
import sys
import time
import atexit
import platform
import traceback
import logging
import base64
import random
from contextlib import contextmanager
from blackfire import profiler, VERSION, agent, generate_config, DEFAULT_CONFIG_FILE
from blackfire.utils import IS_PY3, get_home_dir, ConfigParser, \
urlparse, urljoin, urlencode, get_load_avg, get_logger, quote, \
parse_qsl, Request, urlopen, json_prettify, get_probed_runtime
from blackfire.exceptions import BlackfireApiException
from blackfire import BlackfireConfiguration
log = get_logger(__name__)
# globals
_config = None
_probe = None
_DEFAULT_OMIT_SYS_PATH = True
_DEFAULT_PROFILE_TITLE = 'unnamed profile'
__all__ = [
'get_traces', 'clear_traces', 'is_enabled', 'enable', 'end', 'reset',
'disable', 'run', 'initialize'
]
class Probe(object):
def __init__(self, config):
self._config = config
self._agent_conn = None
self._enabled = False
def is_enabled(self):
return self._enabled
def get_agent_prolog_response(self):
'''Returns the first response of the Agent in prolog dialogue'''
assert self._agent_conn is not None
return self._agent_conn.agent_response
def enable(self):
if self._enabled:
raise BlackfireApiException('Another probe is already profiling')
self._enabled = True
# connect agent
if not self._agent_conn:
try:
self._agent_conn = agent.Connection(
self._config.agent_socket, self._config.agent_timeout
)
self._agent_conn.connect(config=self._config)
except Exception as e:
self._enabled = False
self._agent_conn = None
raise e
self._req_start = time.time()
# pass start options from _config.args, set defaults as necessary
builtins = not bool(int(self._config.args.get('flag_no_builtins', '0')))
profile_cpu = bool(int(self._config.args.get('flag_cpu', '0')))
profile_memory = bool(int(self._config.args.get('flag_memory', '0')))
fn_args_enabled = bool(int(self._config.args.get('flag_fn_args', '0')))
# only enable timespan if this is the last profile of multiple sample profiles.
# we look at 'continue': 'false' from the agent response
profile_timespan = False
timespan_threshold = profiler.MAX_TIMESPAN_THRESHOLD # not probable number
if self._agent_conn.agent_response.status_val_dict.get(
'first_sample'
) == 'true':
profile_timespan = bool(
int(self._config.args.get('flag_timespan', '0'))
)
timespan_threshold = int(
self._config.args.get('timespan_threshold', 10)
)
# timespan_selectors is a dict of set of prefix/equal regex selectors.
timespan_selectors = {'^': set(), '=': set()}
if profile_timespan:
ts_selectors = self._agent_conn.agent_response.args.get(
'Blackfire-Timespan', []
)
for ts_sel in ts_selectors:
if ts_sel[0] not in ['^', '=']:
log.warning(
"Ignoring invalid timespan selector '%s'.", ts_sel
)
continue
timespan_selectors[ts_sel[0]].add(ts_sel[1:])
# instrumented_funcs is a dict of {func_name:[list of argument IDs]}
instrumented_funcs = {}
if fn_args_enabled:
# convert the fn-args string to dict for faster lookups on C side
fn_args = self._agent_conn.agent_response.args.get(
'Blackfire-Fn-Args', []
)
for fn_arg in fn_args:
fn_name, arg_ids_s = fn_arg.split()
fn_name = fn_name.strip()
if fn_name in instrumented_funcs:
log.warning(
"Function '%s' is already instrumented. Ignoring fn-args directive %s.",
fn_name, fn_arg
)
continue
arg_ids = []
for arg_id in arg_ids_s.strip().split(','):
if arg_id.isdigit():
arg_ids.append(int(arg_id))
else:
arg_ids.append(arg_id)
instrumented_funcs[fn_name] = arg_ids
profiler.start(
builtins=builtins,
profile_cpu=profile_cpu,
profile_memory=profile_memory,
profile_timespan=profile_timespan,
instrumented_funcs=instrumented_funcs,
timespan_selectors=timespan_selectors,
timespan_threshold=timespan_threshold,
)
# TODO: 'Blackfire-Error: 103 Samples quota is out'
log.debug(
"profiler started. [instrumented_funcs:%s, timespan_selectors:%s]",
json_prettify(instrumented_funcs),
json_prettify(timespan_selectors),
)
def disable(self):
self._enabled = False
profiler.stop()
def clear_traces(self):
profiler.clear_traces()
def end(self, headers={}, omit_sys_path_dirs=_DEFAULT_OMIT_SYS_PATH):
if not self._agent_conn:
return
log.debug("probe.end() called.")
self.disable()
traces = get_traces(omit_sys_path_dirs=omit_sys_path_dirs)
self.clear_traces()
# write main prolog
profile_title = self._config.args.get(
'profile_title', _DEFAULT_PROFILE_TITLE
)
end_headers = {
'file-format': 'BlackfireProbe',
'Probed-Runtime': get_probed_runtime(),
'Probed-Language': 'python',
'Probed-Os': platform.platform(),
'Probe-version': VERSION,
'Probed-Features': self._config.args_raw,
'Request-Start': self._req_start,
'Request-End': time.time(),
'Profile-Title': profile_title,
}
load_avg = get_load_avg()
if load_avg:
end_headers['Request-Sys-Load-Avg'] = load_avg
end_headers.update(headers)
context_dict = {'script': sys.executable, 'argv[]': sys.argv}
# middlewares populate the Context dict?
if 'Context' in end_headers:
context_dict.update(end_headers['Context'])
end_headers['Context'] = urlencode(context_dict, doseq=True)
profile_data_req = agent.BlackfireRequest(
headers=end_headers, data=traces
)
self._agent_conn.send(profile_data_req.to_bytes())
self._agent_conn.close()
self._agent_conn = None
return traces
def get_traces(self, omit_sys_path_dirs=_DEFAULT_OMIT_SYS_PATH):
return profiler.get_traces(omit_sys_path_dirs=omit_sys_path_dirs)
def get_traces(omit_sys_path_dirs=_DEFAULT_OMIT_SYS_PATH):
return profiler.get_traces(omit_sys_path_dirs=omit_sys_path_dirs)
def clear_traces():
profiler.clear_traces()
# used from testing to set Probe state to a consistent state
def reset():
global _config, _probe
_config = None
_probe = None
def add_marker(label=''):
pass
def generate_subprofile_query():
global _config
if not _config:
raise BlackfireApiException(
'Unable to create a subprofile query as profiling is not enabled.'
)
args_copy = _config.args.copy()
parent_sid = ''
if 'sub_profile' in args_copy:
parent_sid = args_copy['sub_profile'].split(':')[1]
args_copy.pop('aggreg_samples')
s = ''.join(chr(random.randint(0, 255)) for _ in range(7))
if IS_PY3:
s = bytes(s, agent.Protocol.ENCODING)
sid = base64.b64encode(s)
sid = sid.decode("ascii")
sid = sid.rstrip('=')
sid = sid.replace('+', 'A')
sid = sid.replace('/', 'B')
sid = sid[:9]
args_copy['sub_profile'] = '%s:%s' % (parent_sid, sid)
result = "%s&signature=%s&%s" % (
_config.challenge,
_config.signature,
urlencode(args_copy),
)
return result
def initialize(
query=None,
client_id=None,
client_token=None,
agent_socket=None,
agent_timeout=None,
endpoint=None,
log_file=None,
log_level=None,
config_file=DEFAULT_CONFIG_FILE,
_method="manual",
):
global _config, log, _probe
if log_file or log_level:
log = get_logger(__name__, log_file=log_file, log_level=log_level)
log.warning(
"DeprecationWarning: 'LOG_FILE' and 'LOG_LEVEL' params are no longer used from 'probe.initialize' API. "
"Please use 'BLACKFIRE_LOG_FILE'/'BLACKFIRE_LOG_LEVEL' environment variables."
"These settings will be removed in the next version."
)
log.debug("probe.initialize called. [method:'%s']", _method)
_config = generate_config(
query,
client_id,
client_token,
agent_socket,
agent_timeout,
endpoint,
log_file,
log_level,
config_file,
)
log.debug(
"Probe Configuration initialized. [%s]",
json_prettify(_config.__dict__)
)
_probe = Probe(_config)
def is_enabled():
global _probe
if not _probe:
return False
return _probe.is_enabled()
def enable(end_at_exit=False):
global _config, _probe
if not _config:
raise BlackfireApiException(
'No configuration set. initialize should be called first.'
)
log.debug("probe.enable() called.")
_probe.enable()
if end_at_exit: # used for profiling CLI scripts
# patch sys module to get the exit code/stdout/stderr output lengths
from blackfire.hooks.sys.patch import patch
from blackfire.hooks.sys import SysHooks
patch()
def _deinitialize():
headers = {}
headers['Response-Code'] = SysHooks.exit_code
headers['Response-Bytes'
] = SysHooks.stdout_len + SysHooks.stderr_len
try:
end(headers=headers)
except:
# we do not need to return if any error happens inside end()
# but it would be nice to see the traceback
log.warn(traceback.format_exc())
logging.shutdown()
# Note: The functions registered via this module are not called when the
# program is killed by a signal not handled by Python, when a Python fatal
# internal error is detected, or when os._exit() is called.
atexit.register(_deinitialize)
def disable():
global _probe
if not _probe:
return
_probe.disable()
log.debug("probe.disable() called.")
def end(headers={}, omit_sys_path_dirs=_DEFAULT_OMIT_SYS_PATH):
'''
headers: additional headers to send along with the final profile data.
'''
global _probe
if not _probe:
return
log.debug("probe.end() called.")
return _probe.end()
@contextmanager
def run(call_end=True):
enable()
try:
yield
finally:
disable()
if call_end:
end()
| 1.835938 | 2 |
basta/apps.py | lorenzosp93/basta_app | 1 | 12787814 | <gh_stars>1-10
from django.apps import AppConfig
class BastaConfig(AppConfig):
name = 'basta'
| 1.15625 | 1 |
src/price_scraper.py | AndPerCast/DeepPantry | 1 | 12787815 | <reponame>AndPerCast/DeepPantry<gh_stars>1-10
"""Real-time web scraper for product prices.
This module aims at easing product price
information gathering from a certain website.
Author:
<NAME>
"""
from bs4 import BeautifulSoup
import requests
from typing import Tuple, List
SOURCE_URL: str = "https://www.trolley.co.uk"
"""Base website url to scrape data from."""
def scrape_prices(product_names: List[str],
parser: str = "lxml") -> List[Tuple[str, str, float, str]]:
"""Retrieves price information for given products.
Args:
product_names: Common names for products.
parser: HTML parser used by the scraper.
Returns:
Product name, purchase link, price and currency.
Default values will be returned if such gathering
process fails, as shown in the example.
Example::
>>> scrape_prices(["soda"])
('soda', 'https://www.trolley.co.uk/product/vive-soda-water/FTB465', 0.25, '£')
>>> scrape_prices(["?"])
('?', '', 0.0, '')
"""
result: List[Tuple[str, str, float, str]] = []
with requests.Session() as s:
for name in product_names:
# Make a complete url to fetch data for current product.
url: str = f"{SOURCE_URL}/search/?q={name.lower()}"
try:
response: requests.Response = s.get(url)
response.raise_for_status()
soup = BeautifulSoup(response.text, parser)
except (requests.ConnectionError, requests.Timeout, requests.HTTPError):
result.append((name, "", 0.0, ""))
continue
product_data: List[Tuple[str, str, float, str]] = []
# Find all product entries and extract data from their child tags.
for product in soup.find_all("div", class_="product-listing"):
link: str = SOURCE_URL + product.a["href"]
# Ignore extra price data from descendent tags.
price_str = str(product.a.find("div", class_="_price").contents[0])
price = float(price_str[1:])
currency = price_str[0]
product_data.append((name, link, price, currency))
# Find product with lowest price.
result.append(min(product_data, key=lambda data: data[2],
default=(name, "", 0.0, "")))
return result
| 3.234375 | 3 |
dsketch/experiments/classifiers/models.py | yumaloop/DifferentiableSketching | 0 | 12787816 | <reponame>yumaloop/DifferentiableSketching<filename>dsketch/experiments/classifiers/models.py<gh_stars>0
import importlib
import torch.nn as nn
import torch.nn.functional as F
from dsketch.experiments.shared.utils import list_class_names
class MnistCNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 30, (5, 5), padding=0, stride=1)
self.conv2 = nn.Conv2d(30, 15, (5, 5), padding=0, stride=1)
self.fc1 = nn.Linear(6000, 128)
self.fc2 = nn.Linear(128, 50)
self.fc3 = nn.Linear(50, 10)
def lock_features(self, lock):
self.conv1.requires_grad_(not lock)
self.conv2.requires_grad_(not lock)
self.fc1.requires_grad_(not lock)
self.fc2.requires_grad_(not lock)
def get_feature(self, x):
out = self.conv1(x)
out = F.relu(out)
out = self.conv2(out)
out = F.relu(out)
out = out.view(out.shape[0], -1)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
return out
def forward(self, x):
out = self.get_feature(x)
out = F.relu(out)
out = self.fc3(out)
return out
class ScaledMnistCNN(MnistCNN):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 30, (5, 5), padding=0, stride=2)
self.conv2 = nn.Conv2d(30, 15, (5, 5), padding=0, stride=2)
self.fc1 = nn.Linear(55815, 128)
class OmniglotCNN(nn.Module):
"""
Omniglot DCN as described in the sup. mat. of the paper.
This is the "larger" variant for the full 30 alphabet pretraining on 28x28 images. I've guessed there was no zero
padding and the dropout probability was 0.5.
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 120, (5, 5), padding=0, stride=1)
self.conv2 = nn.Conv2d(120, 300, (5, 5), padding=0, stride=1)
self.mp = nn.MaxPool2d((2, 2), stride=(2, 2))
self.fc1 = nn.Linear(30000, 3000)
self.drop = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(3000, 964)
def lock_features(self, lock):
self.conv1.requires_grad_(not lock)
self.conv2.requires_grad_(not lock)
self.fc1.requires_grad_(not lock)
def get_feature(self, x):
out = self.conv1(x)
out = F.relu(out)
out = self.conv2(out)
out = F.relu(out)
out = self.mp(out)
out = out.view(out.shape[0], -1)
out = self.fc1(out)
out = F.relu(out)
return out
def forward(self, x):
out = self.get_feature(x)
out = self.drop(out)
out = self.fc2(out)
return out
class LakeThesisCNN(nn.Module):
"""
Omniglot CCN as described in Lake's thesis
This is for the full 30 alphabet pretraining on 28x28 images. I've guessed there was no zero padding and the dropout
probability was 0.5.
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 200, (10, 10), padding=0, stride=1)
self.mp = nn.MaxPool2d((2, 2), stride=(2, 2))
self.fc1 = nn.Linear(16200, 400)
self.drop = nn.Dropout(p=0.5)
self.fc2 = nn.Linear(400, 964)
def lock_features(self, lock):
self.conv1.requires_grad_(not lock)
self.fc1.requires_grad_(not lock)
def get_feature(self, x):
out = self.conv1(x)
out = F.relu(out)
out = self.mp(out)
out = out.view(out.shape[0], -1)
out = self.fc1(out)
out = F.relu(out)
return out
def forward(self, x):
out = self.get_feature(x)
out = self.drop(out)
out = self.fc2(out)
return out
class _BetterCNN(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.cnn = nn.Sequential(
nn.Conv2d(1, 64, (3, 3), padding=1, stride=1),
nn.BatchNorm2d(64, affine=True),
nn.ReLU(),
nn.Conv2d(64, 64, (3, 3), padding=1, stride=1),
nn.BatchNorm2d(64, affine=True),
nn.ReLU(),
nn.Conv2d(64, 64, (3, 3), padding=1, stride=1),
nn.BatchNorm2d(64, affine=True),
nn.ReLU(),
nn.Conv2d(64, 64, (3, 3), padding=1, stride=1),
nn.BatchNorm2d(64, affine=True),
nn.ReLU(),
nn.AdaptiveAvgPool2d((8, 8)),
)
self.fc1 = nn.Linear(8 * 8 * 64, 1024)
self.fc2 = nn.Linear(1024, nclasses)
def lock_features(self, lock):
self.cnn.requires_grad_(not lock)
self.fc1.requires_grad_(not lock)
def get_feature(self, x):
x = self.cnn(x)
x = x.view(x.shape[0], -1)
x = self.fc1(x)
return x
def forward(self, x):
x = self.get_feature(x)
x = F.relu(x)
x = self.fc2(x)
return x
class MNISTBetterCNN(_BetterCNN):
def __init__(self):
super().__init__(10)
class OmniglotBetterCNN(_BetterCNN):
def __init__(self):
super().__init__(964)
def get_model(name):
# load a model class by name
module = importlib.import_module(__package__ + '.models')
return getattr(module, name)
def model_choices():
return list(filter(lambda x: not x.startswith('_'), list_class_names(nn.Module, __package__ + '.models')))
| 2.25 | 2 |
third_party/a2c_ppo_acktr/main.py | jyf588/SimGAN | 30 | 12787817 | # MIT License
#
# Copyright (c) 2017 <NAME> and (c) 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import time
from collections import deque
import gym
import numpy as np
import torch
from third_party.a2c_ppo_acktr import algo, utils
from third_party.a2c_ppo_acktr.arguments import get_args
from third_party.a2c_ppo_acktr.envs import make_vec_envs
from third_party.a2c_ppo_acktr.model import Policy
from third_party.a2c_ppo_acktr.storage import RolloutStorage
from my_pybullet_envs import utils as gan_utils
import logging
import sys
from my_pybullet_envs.laikago import mirror_obs, mirror_action
sys.path.append("third_party")
def main():
args, extra_dict = get_args()
# this file for normal ppo training, sim-gan(gail-dyn) training in main_gail_dyn_ppo.py
assert not args.gail
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
log_dir = os.path.expanduser(args.log_dir)
eval_log_dir = log_dir + "_eval"
utils.cleanup_log_dir(log_dir)
utils.cleanup_log_dir(eval_log_dir)
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
Tensor = torch.cuda.FloatTensor if args.cuda else torch.FloatTensor
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, device, False, render=False, **extra_dict)
if args.warm_start == '':
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy, 'hidden_size': args.hidden_size})
actor_critic.to(device)
else:
# TODO: assume no state normalize ob_rms
if args.cuda:
actor_critic, _ = torch.load(args.warm_start)
else:
actor_critic, _ = torch.load(args.warm_start, map_location='cpu')
actor_critic.reset_critic(envs.observation_space.shape)
if args.warm_start_logstd is not None:
actor_critic.reset_variance(envs.action_space, args.warm_start_logstd)
actor_critic.to(device)
dummy = gym.make(args.env_name, render=False, **extra_dict)
save_path = os.path.join(args.save_dir, args.algo)
print("SAVE PATH:")
print(save_path)
try:
os.makedirs(save_path)
except FileExistsError:
print("warning: path existed")
# input("warning: path existed")
except OSError:
exit()
pathname = os.path.join(save_path, "source_test.py")
text_file = open(pathname, "w+")
text_file.write(dummy.getSourceCode())
text_file.close()
print("source file stored")
# input("source file stored press enter")
dummy.reset()
# dummy.close()
log_formatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
file_handler = logging.FileHandler("{0}/{1}.log".format(save_path, "console_output"))
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(
actor_critic,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
if args.loss_sym > 0.0:
agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
symmetry_coef=args.loss_sym,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm,
mirror_act=mirror_action,
mirror_obs=mirror_obs
)
else:
agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(
actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True)
else:
agent = None
feat_select_func = None
obs = envs.reset()
obs_feat = gan_utils.replace_obs_with_feat(obs, args.cuda, feat_select_func, return_tensor=True)
feat_len = obs_feat.size(1) # TODO: multi-dim obs broken
if args.dup_sym:
buffer_np = args.num_processes * 2
else:
buffer_np = args.num_processes
rollouts = RolloutStorage(args.num_steps, buffer_np,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size,
feat_len)
rollouts.to(device)
if args.dup_sym:
obs_s = gan_utils.mirror_obsact_batch(obs, args.cuda, mirror_obs, augment=True)
obs_feat_s = obs_feat.repeat(2, 1)
rollouts.obs[0].copy_(obs_s)
rollouts.obs_feat[0].copy_(obs_feat_s)
else:
rollouts.obs[0].copy_(obs)
rollouts.obs_feat[0].copy_(obs_feat)
episode_rewards = deque(maxlen=10000)
total_num_episodes = 0
j = 0
max_num_episodes = args.num_episodes if args.num_episodes else np.infty
start = time.time()
num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
while j < num_updates and total_num_episodes < max_num_episodes:
if args.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
agent.optimizer, j, num_updates,
agent.optimizer.lr if args.algo == "acktr" else args.lr)
for step in range(args.num_steps):
# print(args.num_steps) 300*8
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step, :args.num_processes, :],
rollouts.recurrent_hidden_states[step, :args.num_processes, :],
rollouts.masks[step, :args.num_processes, :])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
obs_feat = gan_utils.replace_obs_with_feat(obs, args.cuda, feat_select_func, return_tensor=True)
for info in infos:
if 'episode' in info.keys():
episode_rewards.append(info['episode']['r'])
# If done then clean the history of observations.
masks = Tensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = Tensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
if args.dup_sym:
obs_s = gan_utils.mirror_obsact_batch(obs, args.cuda, mirror_obs, augment=True)
action_s = gan_utils.mirror_obsact_batch(action, args.cuda, mirror_action, augment=True)
recurrent_hidden_states_s = recurrent_hidden_states.repeat(2, 1)
action_log_prob_s = action_log_prob.repeat(2, 1)
value_s = value.repeat(2, 1)
reward_s = reward.repeat(2, 1)
masks_s = masks.repeat(2, 1)
bad_masks_s = bad_masks.repeat(2, 1)
obs_feat_s = obs_feat.repeat(2, 1)
rollouts.insert(obs_s, recurrent_hidden_states_s, action_s,
action_log_prob_s, value_s, reward_s, masks_s, bad_masks_s, obs_feat_s)
else:
rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, reward, masks, bad_masks, obs_feat)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, not args.no_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "":
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'ob_rms', None)
], os.path.join(save_path, args.env_name + ".pt"))
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'ob_rms', None)
], os.path.join(save_path, args.env_name + "_" + str(j) + ".pt"))
if j % args.log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args.num_processes * args.num_steps
end = time.time()
root_logger.info(
("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes:" +
" mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}, " +
"dist en {}, l_pi {}, l_vf {} \n").format(
j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards), np.mean(episode_rewards),
np.median(episode_rewards), np.min(episode_rewards),
np.max(episode_rewards), dist_entropy, value_loss,
action_loss
)
)
# actor_critic.dist.logstd._bias,
total_num_episodes += len(episode_rewards)
episode_rewards.clear()
j += 1
if __name__ == "__main__":
main()
| 1.367188 | 1 |
test/unit/test_apply.py | asmacdo/openshift-restclient-python | 1 | 12787818 | <reponame>asmacdo/openshift-restclient-python
# Test ConfigMapHash and SecretHash equivalents
# tests based on https://github.com/kubernetes/kubernetes/pull/49961
from openshift.dynamic.apply import merge
tests = [
dict(
last_applied = dict(
kind="ConfigMap",
metadata=dict(name="foo"),
data=dict(one="1", two="2")
),
desired = dict(
kind="ConfigMap",
metadata=dict(name="foo"),
data=dict(one="1", two="2")
),
expected = {}
),
dict(
last_applied = dict(
kind="ConfigMap",
metadata=dict(name="foo"),
data=dict(one="1", two="2")
),
desired = dict(
kind="ConfigMap",
metadata=dict(name="foo"),
data=dict(one="1", two="2", three="3")
),
expected = dict(data=dict(three="3"))
),
dict(
last_applied = dict(
kind="ConfigMap",
metadata=dict(name="foo"),
data=dict(one="1", two="2")
),
desired = dict(
kind="ConfigMap",
metadata=dict(name="foo"),
data=dict(one="1", three="3")
),
expected = dict(data=dict(two=None, three="3"))
),
dict(
last_applied = dict(
kind="Service",
metadata=dict(name="foo"),
spec=dict(ports=[dict(port=8080, name="http")])
),
actual = dict(
kind="Service",
metadata=dict(name="foo"),
spec=dict(ports=[dict(port=8080, protocol='TCP', name="http")])
),
desired = dict(
kind="Service",
metadata=dict(name="foo"),
spec=dict(ports=[dict(port=8080, name="http")])
),
expected = {}
),
dict(
last_applied = dict(
kind="Service",
metadata=dict(name="foo"),
spec=dict(ports=[dict(port=8080, name="http")])
),
actual = dict(
kind="Service",
metadata=dict(name="foo"),
spec=dict(ports=[dict(port=8080, protocol='TCP', name="http")])
),
desired = dict(
kind="Service",
metadata=dict(name="foo"),
spec=dict(ports=[dict(port=8081, name="http")])
),
expected = dict(spec=dict(ports=[dict(port=8081, name="http")]))
),
# This last one is based on a real world case where definition was mostly
# str type and everything else was mostly unicode type (don't ask me how)
dict(
last_applied = {
u'kind': u'ConfigMap',
u'data': {u'one': '1', 'three': '3', 'two': '2'},
u'apiVersion': u'v1',
u'metadata': {u'namespace': u'apply', u'name': u'apply-configmap'}
},
actual = {
u'kind': u'ConfigMap',
u'data': {u'one': '1', 'three': '3', 'two': '2'},
u'apiVersion': u'v1',
u'metadata': {u'namespace': u'apply', u'name': u'apply-configmap',
u'resourceVersion': '1714994',
u'creationTimestamp': u'2019-08-17T05:08:05Z', u'annotations': {},
u'selfLink': u'/api/v1/namespaces/apply/configmaps/apply-configmap',
u'uid': u'fed45fb0-c0ac-11e9-9d95-025000000001'}
},
desired = {
'kind': u'ConfigMap',
'data': {'one': '1', 'three': '3', 'two': '2'},
'apiVersion': 'v1',
'metadata': {'namespace': 'apply', 'name': 'apply-configmap'}
},
expected = dict()
),
]
def test_merges():
for test in tests:
assert(merge(test['last_applied'], test['desired'], test.get('actual', test['last_applied'])) == test['expected'])
| 2.484375 | 2 |
pgcolorbar/__init__.py | franzhaas/pgcolorbar | 5 | 12787819 | """ PgColorbar. A colorbar to use in PyQtGraph
"""
from .misc import __version__
| 1.132813 | 1 |
Teoria/3/3.py | camilaffonseca/Learning_Python | 1 | 12787820 | <reponame>camilaffonseca/Learning_Python
# coding: utf-8
# Estruturas condicionais
# mensagem = input('Você > ')
# while mensagem != 'sair':
# if mensagem == 'ola':
# print('Robô - Olá também!')
# elif mensagem == 'bom dia':
# print('Robô - Bom dia para você também!')
# elif mensagem == 'tchau':
# print('Robô - Que pena que vai embora :(\nTchau!')
# else:
# print('Robô - Não consigo entender o que você disse :/')
# mensagem = input('Você > ')
# senha = '<PASSWORD>'
# senha_gerente = input('Digite a senha do sistema: ')
# if senha == senha_gerente:
# salario = float(input('Qual o salário do funcionário em reais? '))
# while True:
# print('''
# Selecione a opção do aumento correspondetnte:
# 1 - Aumento de 5%
# 2 - Aumento de 10%
# ''')
# opcao = input('Digite a opção > ')
# if opcao == '1':
# aumento = 5
# salario2 = salario + (salario * 5 / 100)
# print(f'O salário de R${salario:.2f} com aumento de {aumento} porcento ficaria \
# no valor de R${salario2:.2f}')
# break
# elif opcao == '2':
# aumento = 10
# salario2 = salario + (salario * 10 / 100)
# print(f'O salário de R${salario:.2f} com aumento de {aumento} porcento ficaria \
# no valor de R${salario2:.2f}')
# break
# else:
# print('Opção inválida!')
# else:
# print('Senha incorreta! Saia daqui!')
| 4.125 | 4 |
src/perfomance_test/main.py | roman-baldaev/elastic_vs_sphinx_test | 0 | 12787821 | from search_test import SearchTest, SearchTestElastic
if __name__ == "__main__":
test = SearchTestElastic(timeout=50, file_for_save=
'/home/roman/Projects/ElasticMongoTest/test_results_csv/ElasticsearchTest.csv')
# test.search_substrings_or(['Colorado', 'USA', 'President', 'Washington', 'December',
# 'Book', 'Ford', 'million', 'Apple', 'Official',
# 'year', 'Bank', 'Study', 'University', 'blood'],
# )
# test.search_substring(['Washington', 'Russia', 'USA', 'MTV', 'London', 'Crime', 'Science',
# 'good', 'kosdfsd', 'luck'], 'news100gb')
# print(test.size_of_object('news10gb'))
test.size_of_object('news14gb', 'message')
print(test.size)
# test.search_substrings_or(['MTV', 'London'],
# )
# test.show_results()
| 2.625 | 3 |
eight/main.py | yumenetwork/isn-tkinter | 0 | 12787822 | <gh_stars>0
from tkinter import *
root = Tk()
root.geometry("300x300")
# Fonctions
def quitter():
root.quit()
root.destroy()
def left(event):
x1, y1, x2, y2 = draw.coords(ball)
draw.coords(ball, x1 - 5, y1, x2 - 5, y2)
def right(event):
x1, y1, x2, y2 = draw.coords(ball)
draw.coords(ball, x1 + 5, y1, x2 + 5, y2)
def up(event):
x1, y1, x2, y2 = draw.coords(ball)
draw.coords(ball, x1, y1 - 5, x2, y2 - 5)
def down(event):
x1, y1, x2, y2 = draw.coords(ball)
draw.coords(ball, x1, y1 + 5, x2, y2 + 5)
# Widgets
draw = Canvas(root)
boutonQuitter = Button(root, text='Quitter', command=quitter)
# Affichage des Widgets
draw.pack()
boutonQuitter.pack()
# Main
ball = draw.create_oval(100, 100, 150, 150, fill='red')
root.bind("<Left>", left)
root.bind("<Right>", right)
root.bind("<Up>", up)
root.bind("<Down>", down)
root.mainloop()
| 3.015625 | 3 |
services/web/server/src/simcore_service_webserver/version_control_models_snapshots.py | Surfict/osparc-simcore | 0 | 12787823 | <filename>services/web/server/src/simcore_service_webserver/version_control_models_snapshots.py<gh_stars>0
import warnings
from datetime import datetime
from typing import Any, Callable, Optional, Union
from uuid import UUID, uuid3
from pydantic import (
AnyUrl,
BaseModel,
Field,
PositiveInt,
StrictBool,
StrictFloat,
StrictInt,
)
from pydantic.main import BaseConfig
from yarl import URL
warnings.warn(
"version_control_*_snapshots.py modules are the first generation of vc."
"It is just temporarily kept it functional until it gets fully replaced",
DeprecationWarning,
)
BuiltinTypes = Union[StrictBool, StrictInt, StrictFloat, str]
## Domain models --------
class BaseSnapshot(BaseModel):
class Config(BaseConfig):
orm_mode = True
# parses with alias and/or field name -> can parse from API or db data models
allow_population_by_field_name = True
@classmethod
def as_field(cls, name) -> Any:
return cls.__fields__[name].field_info
class Snapshot(BaseSnapshot):
id: PositiveInt = Field(None, description="Unique snapshot identifier")
label: Optional[str] = Field(
None, description="Unique human readable display name", alias="name"
)
created_at: datetime = Field(
default_factory=datetime.utcnow,
description="Timestamp of the time snapshot was taken from parent."
"Notice that parent might change with time",
)
parent_uuid: UUID = Field(..., description="Parent's project uuid")
project_uuid: UUID = Field(..., description="Current project's uuid")
# TODO: can project_uuid be cached_property??
# SEE BaseCustomSettings.Config and do not forget keep_untouched option!
@staticmethod
def compose_project_uuid(
parent_uuid: Union[UUID, str], snapshot_timestamp: datetime
) -> UUID:
if isinstance(parent_uuid, str):
parent_uuid = UUID(parent_uuid)
return uuid3(parent_uuid, f"snapshot.{snapshot_timestamp}")
## API models ----------
class SnapshotPatch(BaseSnapshot):
label: str = Snapshot.as_field("label")
class SnapshotItem(Snapshot):
"""API model for an array item of snapshots"""
url: AnyUrl
url_parent: AnyUrl
url_project: AnyUrl
@classmethod
def from_snapshot(
cls, snapshot: Snapshot, url_for: Callable[..., URL], prefix: str
) -> "SnapshotItem":
# TODO: is this NOT the right place? requires pre-defined routes
# how to guarantee routes names
return cls(
url=url_for(
f"{prefix}.get_project_snapshot_handler",
project_id=snapshot.parent_uuid,
snapshot_id=snapshot.id,
),
url_parent=url_for("get_project", project_id=snapshot.parent_uuid),
url_project=url_for("get_project", project_id=snapshot.project_uuid),
**snapshot.dict(by_alias=True),
)
| 2.078125 | 2 |
flamingo/core/errors.py | rohieb/flamingo | 0 | 12787824 | <gh_stars>0
class FlamingoError(Exception):
pass
class DataModelError(FlamingoError):
pass
class MultipleObjectsReturned(DataModelError):
def __init__(self, query, *args, **kwargs):
self.query = query
return super().__init__(*args, **kwargs)
def __str__(self):
return 'multiple objects returned for query {}'.format(self.query)
class ObjectDoesNotExist(DataModelError):
def __init__(self, query, *args, **kwargs):
self.query = query
return super().__init__(*args, **kwargs)
def __str__(self):
return 'no object returned for query {}'.format(self.query)
| 2.796875 | 3 |
tests/exploratory/grad_fns.py | varun19299/FeatherMap | 14 | 12787825 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
V1 = Parameter(torch.randn(3, 3, requires_grad=True))
V2 = Parameter(torch.randn(3, 3, requires_grad=True))
W = torch.randn(2, 2)
bias = torch.zeros(2)
def update(V, W):
V = torch.matmul(V1, V2.transpose(0, 1))
i = 0
V = V.view(-1, 1)
W = W.view(-1, 1)
for j in range(len(W)):
W[j] = V[i]
i += 1
def forward(x, W, bias):
return F.linear(x, W, bias)
print("V {}".format(V))
print("W {}".format(W))
update(V, W)
print("V {}".format(V))
print("W {}".format(W))
x = torch.randn(2)
g = torch.ones(2)
print(x)
print(forward(x, W, bias).norm)
y = forward(x, W, bias)
print(y)
print(y.reshape(-1,1))
loss_fn = F.cross_entropy(y.reshape(1, -1), torch.ones(1, 2))
print(loss_fn)
forward(x, W, bias).backward(g)
| 2.96875 | 3 |
opslib/restparser.py | OpenSwitchNOS/openswitch-ops-restd | 0 | 12787826 | #!/usr/bin/env python
# Copyright (C) 2015-2016 Hewlett-Packard Enterprise Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import getopt
import re
import string
import sys
import inflect
from copy import deepcopy
import ovs.daemon
from ovs.db import error, types
import ovs.db.idl
import ovs.dirs
import ovs.util
# Global variables
inflect_engine = inflect.engine()
# Schema constants
OVSDB_SCHEMA_CONFIG = 'configuration'
OVSDB_SCHEMA_STATS = 'statistics'
OVSDB_SCHEMA_STATUS = 'status'
OVSDB_SCHEMA_REFERENCE = 'reference'
OVSDB_CATEGORY_PERVALUE = 'per-value'
OVSDB_CATEGORY_FOLLOWS = 'follows'
# Relationship type map
RELATIONSHIP_MAP = {
'1:m': 'child',
'm:1': 'parent',
'reference': 'reference'
}
# On demand fetched tables
FETCH_TYPE_PARTIAL = 0
FETCH_TYPE_FULL = 1
ON_DEMAND_FETCHED_TABLES = {
"BGP_Route": FETCH_TYPE_PARTIAL,
"BGP_Nexthop": FETCH_TYPE_PARTIAL,
"Route": FETCH_TYPE_PARTIAL,
"Nexthop": FETCH_TYPE_PARTIAL
}
# Convert name into all lower case and into plural (default) or singular format
def normalizeName(name, to_plural=True):
lower_case = name.lower()
# Assuming table names use underscore to link words
words = string.split(lower_case, '_')
if to_plural:
words[-1] = inflect_engine.plural_noun(words[-1])
else:
words[-1] = inflect_engine.singular_noun(words[-1])
return(string.join(words, '_'))
class OVSColumn(object):
'''
An instance of OVSColumn represents a column
from the OpenSwitch Extended Schema. Attributes:
- name: the column's name
- category: the column's category
- is_optional: whether the column is required to have a value
- mutable: whether the column is modifiable after creation
- enum: possible values for the column or column's keys
- type: the column's (or key's) base type
- rangeMin: the column's (or key's) data range
- rangeMax: the column's (or key's) data range
- value_type: if a map, the value's base type
- valueRangeMin: if a map, the value's data range
- valueRangeMax: if a map, the value's data range
- is_dict: whether the column is a map/dictionary
- is_list: whether the column is a list
- n_min: the column's minimum number of elements
- n_max: the column's maximum number of elements
- kvs: if a map, this holds each key's value type information
- keyname: name used to identify the reference (if a kv reference)
- desc: the column's documentation/description text
- emptyValue: value assumed for the column, if is_optional=True and empty
'''
def __init__(self, table_name, column_name, ovs_base_type,
is_optional=True, mutable=True, category=None,
emptyValue=None, valueMap=None, keyname=None,
col_doc=None, group=None, loadDescription=False):
key_type = ovs_base_type.key
value_type = ovs_base_type.value
self.name = column_name
self.category = category
self.is_optional = is_optional
self.emptyValue = emptyValue
self.mutable = mutable
self.enum = key_type.enum
self.keyname = keyname
# Process the column's (or key's) base type
self.type, self.rangeMin, self.rangeMax = self.process_type(key_type)
# If a map, process the value's base type
self.value_type = None
if value_type is not None:
self.value_type, self.valueRangeMin, self.valueRangeMax = \
self.process_type(value_type)
# Information regarding the column's nature and number of elements
self.is_dict = self.value_type is not None
self.is_list = (not self.is_dict) and ovs_base_type.n_max > 1
self.n_max = ovs_base_type.n_max
self.n_min = ovs_base_type.n_min
self.kvs = {}
self.process_valuemap(valueMap, loadDescription)
self.desc = col_doc
def process_valuemap(self, valueMap, loadDescription):
'''
Processes information from the valueMap data structure in the
extended schema and fills the kvs dictionary for this column
'''
for key, value in valueMap.iteritems():
self.kvs[key] = {}
# Process the values's type
base_type = types.BaseType.from_json(value['type'])
_type, _min, _max = self.process_type(base_type)
enum = base_type.enum
# Store this key's type information in kvs
self.kvs[key]['type'] = _type
self.kvs[key]['rangeMin'] = _min
self.kvs[key]['rangeMax'] = _max
self.kvs[key]['enum'] = enum
# Setting is_optional per key so that eventually
# it can be set per key from data in the schema,
# REST's validation should already check this.
self.kvs[key]['is_optional'] = self.is_optional
# Process this key's documentation information
self.kvs[key]['desc'] = None
self.kvs[key]['group'] = None
if loadDescription:
if 'doc' in value:
self.kvs[key]['desc'] = ' '.join(value['doc'])
if 'group' in value:
self.kvs[key]['group'] = value['group']
def process_type(self, base):
__type = base.type
rangeMin = None
rangeMax = None
if __type == types.StringType:
if base.min_length is None:
rangeMin = 0
else:
rangeMin = base.min_length
if base.max_length is None:
rangeMax = sys.maxint
else:
rangeMax = base.max_length
elif __type == types.UuidType:
rangeMin = None
rangeMax = None
elif __type != types.BooleanType:
if base.min is None:
rangeMin = 0
else:
rangeMin = base.min
if base.max is None:
rangeMax = sys.maxint
else:
rangeMax = base.max
return (__type, rangeMin, rangeMax)
class OVSReference(OVSColumn):
'''
An instance of OVSReference represents a column from the OpenSwitch
Extended Schema that contains references to other tables. Attributes not
inherited from OVSColumn:
- kv_type: whether this is a kv reference
- kv_key_type: if a kv reference, the type of the key
- ref_table: the table to reference
- relation: relationship type between this column and the referenced table
- is_plural: whether the column is plural
'''
def __init__(self, table_name, column_name, ovs_base_type,
is_optional=True, mutable=True, category=None, valueMap=None,
keyname=None, col_doc=None, group=None,
relation=OVSDB_SCHEMA_REFERENCE, loadDescription=False):
super(OVSReference, self).__init__(table_name, column_name,
ovs_base_type, is_optional, mutable,
category, None, valueMap, keyname,
col_doc, group, loadDescription)
key_type = ovs_base_type.key
# Information of the table being referenced
self.kv_type = False
if key_type.type != types.UuidType:
# referenced table name must be in value part of KV pair
self.kv_type = True
self.kv_key_type = key_type.type
key_type = ovs_base_type.value
self.ref_table = key_type.ref_table_name
# Overwrite parsed type from parent class processing
self.type = key_type
# Relationship of the referenced to the current table
# one of child, parent or reference
if relation not in RELATIONSHIP_MAP.values():
raise error.Error('unknown table relationship %s' % relation)
else:
self.relation = relation
# The number of instances
self.is_plural = (self.n_max != 1)
class OVSColumnCategory(object):
def __init__(self, category):
self.dynamic = False
self.value = None
self.validate(category)
# Process category type
if isinstance(category, dict):
per_value_list = category.get(OVSDB_CATEGORY_PERVALUE,
None)
self.per_value = {}
if per_value_list:
for value_dict in per_value_list:
self.check_category(value_dict['category'])
self.per_value[value_dict['value']] = \
value_dict['category']
self.follows = category.get(OVSDB_CATEGORY_FOLLOWS,
None)
self.value = OVSDB_SCHEMA_CONFIG
self.dynamic = True
elif isinstance(category, (str, unicode)):
self.value = category
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
elif isinstance(other, (str, unicode)):
return self.value == other
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def validate(self, category):
if category:
if isinstance(category, dict):
if not (OVSDB_CATEGORY_PERVALUE in category or
OVSDB_CATEGORY_FOLLOWS in category):
raise error.Error('Unknown category object '
'attributes')
elif isinstance(category, (str, unicode)):
self.check_category(category)
else:
raise error.Error('Unknown category type %s' % type(category))
def check_category(self, category):
if category not in [OVSDB_SCHEMA_CONFIG,
OVSDB_SCHEMA_STATS,
OVSDB_SCHEMA_STATUS]:
raise error.Error('Unknown category: %s' % value)
class OVSTable(object):
'''__init__() functions as the class constructor'''
def __init__(self, name, is_root, is_many=True, desc=None,
groupsDesc=None):
self.name = name
self.plural_name = normalizeName(name)
self.is_root = is_root
# List of all column names
self.columns = []
# List of read-only column names
self.readonly_columns = []
# Is the table in plural form?
self.is_many = is_many
# Dictionary of configuration attributes (RW)
# column name to OVSColumn object mapping
self.config = {}
# Copy of configuration attributes
self.default_config = {}
# Dictionary of status attributes (Read-only)
# column name to OVSColumn object mapping
self.status = {}
# Dictionary of statistics attributes (Read-only)
# column name to OVSColumn object mapping
self.stats = {}
# Dictionay with category that is an object type
self.dynamic = {}
# Parent table name
self.parent = None
# Child table list
self.children = []
# List of table referenced
# table name to OVSReference object mapping
self.references = {}
# TODO: index columns are those columns that
# OVSDB uses for indexing rows in a table.
self.index_columns = None
# TODO: indexes was introduced to create unique URIs for
# resources. This is not always equal to index_columns
# and is a source of confusion. This should be removed
# eventually.
self.indexes = None
# Table's documentation strings. Named 'desc' to keep
# consistency with the 'desc' attribute in OVSColumn
self.desc = desc
# Table's documentation strings for group descriptions
self.groupsDesc = groupsDesc
@staticmethod
def from_json(_json, name, loadDescription):
parser = ovs.db.parser.Parser(_json, 'schema of table %s' % name)
columns_json = parser.get('columns', [dict])
mutable = parser.get_optional('mutable', [bool], True)
is_root = parser.get_optional('isRoot', [bool], False)
max_rows = parser.get_optional('maxRows', [int])
indexes_json = parser.get_optional('indexes', [list], [[]])
doc = None
groupsDoc = None
# Though these will not be used if documentation is not
# loaded, they have to be parsed or OVS' Parser will fail
_title = parser.get_optional('title', [str, unicode])
_doc = parser.get_optional('doc', [list, str, unicode])
_groups_doc = parser.get_optional('groupDoc', [dict])
if loadDescription:
doc = []
if _title:
doc = [_title]
if _doc:
doc.extend(_doc)
doc = ' '.join(doc)
if _groups_doc:
groupsDoc = _groups_doc
parser.finish()
if max_rows is None:
max_rows = sys.maxint
elif max_rows <= 0:
raise error.Error('maxRows must be at least 1', _json)
if not columns_json:
raise error.Error('table must have at least one column', _json)
table = OVSTable(name, is_root, max_rows != 1, desc=doc,
groupsDesc=groupsDoc)
table.index_columns = indexes_json[0]
for column_name, column_json in columns_json.iteritems():
parser = ovs.db.parser.Parser(column_json, 'column %s' % name)
# The category can be a str or a object. The object inside can
# have the following keys:
# per-value: matches the possible value with the desired category
# follows: Reference to the column used to determine the column
# category
category = OVSColumnCategory(parser.get_optional('category',
[str, unicode,
dict]))
relationship = parser.get_optional('relationship', [str, unicode])
mutable = parser.get_optional('mutable', [bool], True)
# Ephemereal is not used (yet) in REST, but it's
# parsed so that the parser does not give an error
parser.get_optional('ephemeral', [bool], False)
emptyValue = parser.get_optional('emptyValue',
[int, str, unicode, bool])
keyname = parser.get_optional('keyname', [str, unicode])
# Pre-process type, cleaning it up from OPS modifications
# (e.g. 'valueMap', valueType, adding back 'set' to the
# enum format)
_type = parser.get('type', [dict, str, unicode])
convert_enums(_type)
valueMap = {}
if isinstance(_type, dict):
_type.pop('omitCodeGeneration', None)
valueMap = _type.pop('valueMap', {})
if valueMap:
_type['key'] = 'string'
_type['value'] = _type.pop('valueType', 'string')
# Load OVS type from type dictionary
_type = types.Type.from_json(_type)
# Parse global description for the column
col_doc = None
group = None
# Though these will not be used if documentation is not
# loaded, they have to be parsed or OVS' Parser will fail
_col_doc = parser.get_optional('doc', [list])
_group = parser.get_optional('group', [list, str, unicode])
if loadDescription:
if _col_doc:
col_doc = ' '.join(_col_doc)
group = _group
parser.finish()
is_column_skipped = False
is_readonly_column = False
is_optional = False
if isinstance(column_json['type'], dict):
if ('min' in column_json['type'] and
column_json['type']['min'] == 0):
is_optional = True
# An attribute will be able to get marked with relationship
# and category tags simultaneously. We are utilizing the
# new form of tagging as a second step.
# For now, we are using only one tag.
_mutable = mutable
if relationship is not None:
# A non-configuration OVSDB_SCHEMA_REFERENCE is never mutable,
# otherwise the parsed mutable flag is used
if relationship == OVSDB_SCHEMA_REFERENCE and \
category != OVSDB_SCHEMA_CONFIG:
_mutable = False
_relationship = RELATIONSHIP_MAP[relationship]
table.references[column_name] = OVSReference(table.name,
column_name,
_type,
is_optional,
_mutable,
category,
valueMap,
keyname,
col_doc,
group,
_relationship,
loadDescription)
else:
# Status and statistics columns are always mutable
if category != OVSDB_SCHEMA_CONFIG:
_mutable = True
ovs_column = OVSColumn(table.name, column_name, _type,
is_optional, _mutable, category,
emptyValue, valueMap, keyname,
col_doc, group, loadDescription)
# Save the column in its category group
if category == OVSDB_SCHEMA_CONFIG:
if name in ON_DEMAND_FETCHED_TABLES and \
ON_DEMAND_FETCHED_TABLES[name] == FETCH_TYPE_FULL:
is_readonly_column = True
table.config[column_name] = ovs_column
elif category == OVSDB_SCHEMA_STATUS:
is_readonly_column = True
table.status[column_name] = ovs_column
elif category == OVSDB_SCHEMA_STATS:
is_readonly_column = True
table.stats[column_name] = ovs_column
else:
# Skip columns that do not have a handled relationship or
# category.
is_column_skipped = True
# Add to the array the name of the dynamic column
if category.dynamic:
table.dynamic[column_name] = category
# If the column is readonly, check if it is an index. Indexes
# should not be registered as readonly columns in the case of a
# partial fetching. In full fetch, no columns are subscribed to, so
# consider all columns as readonly columns
if name in ON_DEMAND_FETCHED_TABLES and is_readonly_column:
if ON_DEMAND_FETCHED_TABLES[name] == FETCH_TYPE_PARTIAL and \
column_name in table.index_columns:
pass
else:
table.readonly_columns.append(str(column_name))
if not is_column_skipped:
table.columns.append(str(column_name))
# deepcopy of config attributes to prevent modification
# of config attributes when updating dynamic categories
table.default_config = deepcopy(table.config)
# Validate dynamic categories consistency
for column_name, category in table.dynamic.iteritems():
if category.follows is not None\
and category.follows not in table.columns:
raise error.Error('Follows column "%s"'
'doesn\'t exists at table "%s"'
% (category.follows, name))
# TODO: indexes should be removed eventually
table.indexes = []
if not table.index_columns:
table.indexes = ['uuid']
else:
for item in table.index_columns:
if item in table.references and\
table.references[item].relation == 'parent':
continue
table.indexes.append(item)
return table
class RESTSchema(object):
'''Schema for REST interface from an OVSDB database.'''
def __init__(self, name, version, tables, doc=None):
self.name = name
self.version = version
self.doc = doc
# A dictionary of table name to OVSTable object mappings
self.ovs_tables = tables
# get a table name map for all references
self.reference_map = {}
for table in self.ovs_tables:
for k, v in self.ovs_tables[table].references.iteritems():
if k not in self.reference_map:
self.reference_map[k] = v.ref_table
# tables that has the references to one table
self.references_table_map = {}
for table in self.ovs_tables:
tables_references = get_references_tables(self, table)
self.references_table_map[table] = tables_references
# get a plural name map for all tables
self.plural_name_map = {}
for table in self.ovs_tables.itervalues():
self.plural_name_map[table.plural_name] = table.name
@staticmethod
def from_json(_json, loadDescription):
parser = ovs.db.parser.Parser(_json, 'extended OVSDB schema')
# These are not used (yet), but the parser fails if they are not parsed
parser.get_optional('$schema', [str, unicode])
parser.get_optional('id', [str, unicode])
name = parser.get('name', ['id'])
version = parser.get_optional('version', [str, unicode])
tablesJson = parser.get('tables', [dict])
doc = None
# Though these will not be used if documentation is not
# loaded, they have to be parsed or OVS' Parser will fail
_doc = parser.get_optional('doc', [list])
if loadDescription:
if _doc:
doc = ' '.join(_doc)
parser.finish()
if (version is not None and
not re.match('[0-9]+\.[0-9]+\.[0-9]+$', version)):
raise error.Error('schema version "%s" not in format x.y.z'
% version)
tables = {}
for tableName, tableJson in tablesJson.iteritems():
tables[tableName] = OVSTable.from_json(tableJson, tableName,
loadDescription)
# Backfill the parent/child relationship info, mostly for
# parent pointers which cannot be handled in place.
for tableName, table in tables.iteritems():
for columnName, column in table.references.iteritems():
if column.relation == 'child':
table.children.append(columnName)
if tables[column.ref_table].parent is None:
tables[column.ref_table].parent = tableName
elif column.relation == 'parent':
if tableName not in tables[column.ref_table].children:
tables[column.ref_table].children.append(tableName)
table.parent = column.ref_table
return RESTSchema(name, version, tables, doc)
def convert_enums(_type):
'''
Looks for enums recursively in the dictionary and
converts them from a list of keywords, to an OVS 'set'.
E.g. from 'enum': [<keywords>] to 'enum': ['set', [<keywords>]]
'''
if isinstance(_type, dict):
if 'enum' in _type:
_type['enum'] = ['set', _type['enum']]
else:
for key in _type:
if isinstance(_type[key], dict):
convert_enums(_type[key])
def get_references_tables(schema, ref_table):
table_references = {}
for table in schema.ovs_tables:
columns = []
references = schema.ovs_tables[table].references
for column_name, reference in references.iteritems():
if reference.ref_table == ref_table:
columns.append(column_name)
if columns:
table_references[table] = columns
return table_references
def is_immutable(table, schema):
'''
A table is considered IMMUTABLE if REST API cannot add or
delete a row from it
'''
table_schema = schema.ovs_tables[table]
# ROOT table
if table_schema.is_root:
# CASE 1: if there are no indices, a root table is considered
# IMMUTABLE for REST API
# CASE 2: if there is at least one index of category
# OVSDB_SCHEMA_CONFIG, a root table is considered
# MUTABLE for REST API
# NOTE: an immutable table can still be modified by other daemons
# running on the switch. For example, system daemon can modify
# FAN table although REST cannot
return not _has_config_index(table, schema)
else:
# top level table e.g. Port
if table_schema.parent is None:
return not _has_config_index(table, schema)
else:
# child e.g. Bridge
# check if the reference in 'parent'
# is of category OVSDB_SCHEMA_CONFIG
parent = table_schema.parent
parent_schema = schema.ovs_tables[parent]
children = parent_schema.children
regular_children = []
for item in children:
if item in parent_schema.references:
regular_children.append(item)
ref = None
if table not in parent_schema.children:
for item in regular_children:
if parent_schema.references[item].ref_table == table:
ref = item
break
if parent_schema.references[ref].category == \
OVSDB_SCHEMA_CONFIG:
return False
else:
# back children
return not _has_config_index(table, schema)
return True
def _has_config_index(table, schema):
'''
return True if table has at least one index column of category
configuration
'''
for index in schema.ovs_tables[table].index_columns:
if index in schema.ovs_tables[table].config:
return True
elif index in schema.ovs_tables[table].references:
if schema.ovs_tables[table].references[index].category == \
OVSDB_SCHEMA_CONFIG:
return True
# no indices or no index columns with category configuration
return False
def parseSchema(schemaFile, title=None, version=None, loadDescription=False):
schema = RESTSchema.from_json(ovs.json.from_file(schemaFile),
loadDescription)
if title is None:
title = schema.name
if version is None:
version = 'UNKNOWN'
# add mutable flag to OVSTable
for name, table in schema.ovs_tables.iteritems():
table.mutable = not is_immutable(name, schema)
return schema
def usage():
print '''\
%(argv0)s: REST API meta schema file parser
Parse the meta schema file based on OVSDB schema to obtain category and
relation information for each REST resource.
usage: %(argv0)s [OPTIONS] SCHEMA
where SCHEMA is an extended OVSDB schema in JSON format.
The following options are also available:
--title=TITLE use TITLE as title instead of schema name
--version=VERSION use VERSION to display on document footer
-h, --help display this help message\
''' % {'argv0': sys.argv[0]}
sys.exit(0)
if __name__ == '__main__':
try:
try:
options, args = getopt.gnu_getopt(sys.argv[1:], 'h',
['title=', 'version=', 'help'])
except getopt.GetoptError, geo:
sys.stderr.write('%s: %s\n' % (sys.argv[0], geo.msg))
sys.exit(1)
title = None
version = None
for key, value in options:
if key == '--title':
title = value
elif key == '--version':
version = value
elif key in ['-h', '--help']:
usage()
else:
sys.exit(0)
if len(args) != 1:
sys.stderr.write('Exactly 1 non-option arguments required '
'(use --help for help)\n')
sys.exit(1)
schema = parseSchema(args[0])
print('Groups: ')
for group, doc in schema.groups_doc.iteritems():
print('%s: %s' % (group, doc))
for table_name, table in schema.ovs_tables.iteritems():
print('Table %s: ' % table_name)
print('Parent = %s' % table.parent)
print('Description = %s' % table.desc)
print('Configuration attributes: ')
for column_name, column in table.config.iteritems():
print('Col name = %s: %s' % (column_name,
'plural' if column.is_list else 'singular'))
print('n_min = %d: n_max = %d' % (column.n_min, column.n_max))
print('key type = %s: min = %s, max = %s' % (column.type,
column.rangeMin, column.rangeMax))
print('key enum = %s' % column.enum)
print('key emptyValue = %s' % column.emptyValue)
print('key keyname = %s' % column.keyname)
print('key kvs = %s' % column.kvs)
if column.value_type is not None:
print('value type = %s: min = %s, max = %s' %
(column.value_type,
column.valueRangeMin,
column.valueRangeMax))
print('Status attributes: ')
for column_name, column in table.status.iteritems():
print('Col name = %s: %s' % (column_name,
'plural' if column.is_list else 'singular'))
print('n_min = %d: n_max = %d' % (column.n_min, column.n_max))
print('key type = %s: min = %s, max = %s' %
(column.type, column.rangeMin, column.rangeMax))
if column.value_type is not None:
print('value type = %s: min = %s, max = %s' %
(column.value_type,
column.valueRangeMin,
column.valueRangeMax))
print('Stats attributes: ')
for column_name, column in table.stats.iteritems():
print('Col name = %s: %s' % (column_name,
'plural' if column.is_list else 'singular'))
print('n_min = %d: n_max = %d' % (column.n_min, column.n_max))
print('key type = %s: min = %s, max = %s' %
(column.type, column.rangeMin, column.rangeMax))
if column.value_type is not None:
print('value type = %s: min = %s, max = %s' %
(column.value_type,
column.valueRangeMin,
column.valueRangeMax))
print('Subresources: ')
for column_name, column in table.references.iteritems():
print('Col name = %s: %s, %s, keyname=%s' %
(column_name, column.relation,
'plural' if column.is_plural else 'singular',
column.keyname))
print('\n')
except error.Error, e:
sys.stderr.write('%s: %s\n' % (e.msg, e.json))
sys.exit(1)
# Local variables:
# mode: python
# End:
| 1.960938 | 2 |
py_script/myplot.py | zhz03/software_development | 0 | 12787827 | <gh_stars>0
"""
This is my plotting obj/feature
"""
import matplotlib.pyplot as plt
class myplot():
def __init__(self):
pass
def plot2y1x(self,x,y1,y2):
"""
This function is to plot 2 data with the same x variable in the same figurse
:param x:
:param y1:
:param y2:
:return: a plot
"""
plt.figure()
plt.scatter(x,y1)
plt.plot(x,y2)
plt.show()
if __name__ == '__main__':
x = [0,1,2,3,4,5]
y1 = [0,1,4,9,16,25]
y2 = [0,2,4,6,8,10]
mp = myplot()
mp.plot2y1x(x,y1,y2) | 3.34375 | 3 |
modules/utils.py | inconvergent/axidraw-xy | 29 | 12787828 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from numpy import array
from numpy import column_stack
from numpy import cos
from numpy import linspace
from numpy import pi
from numpy import reshape
from numpy import row_stack
from numpy import sin
from numpy import logical_or
from numpy.random import random
TWOPI = 2.0*pi
def get_bounding_box(xy):
mi = xy.min(axis=0).squeeze()
ma = xy.max(axis=0).squeeze()
xd = ma[0]-mi[0]
yd = ma[1]-mi[1]
return mi, ma, xd, yd
def print_values(mi, ma, xd, yd):
print(('x: min {:0.08f} max {:0.08f} d {:0.08f}'.format(mi[0], ma[0], xd)))
print(('y: min {:0.08f} max {:0.08f} d {:0.08f}'.format(mi[1], ma[1], yd)))
def do_scale(xy):
_, _, xd, yd = get_bounding_box(xy)
xy /= max(xd, yd)
def fit(vertices):
from modules.ddd import get_mid_2d as get_mid
mid = get_mid(vertices)
vertices -= mid
do_scale(vertices)
mid = get_mid(vertices)
vertices[:, :] += array([[0.5]*2])
mid = get_mid(vertices)
def get_paths_from_n_files(
pattern,
skip=0,
steps=1,
stride=1,
spatial_sort=True,
spatial_concat=False,
spatial_concat_eps=1.e-9
):
from glob import glob
from modules.ioOBJ import load_2d as load
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
vertices = []
lines = []
vnum = 0
files = sorted(glob(pattern))
for fn in files[skip:steps:stride]:
print(fn)
data = load(fn)
v = data['vertices']
l = data['lines']
vn = len(v)
vertices.append(v)
lines.append(array(l, 'int')+vnum)
vnum += vn
vertices = row_stack(vertices)
print('orig size:')
print_values(*get_bounding_box(vertices))
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
paths = [row_stack(vertices[li, :]) for li in lines]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
return paths
def get_paths_from_file(
fn,
spatial_sort=True,
spatial_concat=False,
spatial_concat_eps=1.e-9
):
from modules.ioOBJ import load_2d as load
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
data = load(fn)
vertices = data['vertices']
lines = data['lines']
print('orig size:')
print_values(*get_bounding_box(vertices))
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
paths = [row_stack(vertices[l, :]) for l in lines]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
return paths
def get_tris_from_file(
fn,
spatial_sort=True,
spatial_concat=False,
spatial_concat_eps=1.0e-9
):
from modules.ioOBJ import load_2d as load
from modules.ddd import get_distinct_edges_from_tris
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
data = load(fn)
vertices = data['vertices']
print('orig size:')
print_values(*get_bounding_box(vertices))
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
edges = get_distinct_edges_from_tris(data['faces'])
paths = [row_stack(p) for p in vertices[edges, :]]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
return paths
def get_dots_from_file(
fn,
spatial_sort=True,
):
from modules.ioOBJ import load_2d as load
from modules.ddd import spatial_sort_dots_2d as sort
data = load(fn)
vertices = data['vertices']
print('orig size:')
print_values(*get_bounding_box(vertices))
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
vertices = sort(vertices) if spatial_sort else vertices
print('dots: ', len(vertices))
return vertices
def get_edges_from_file(
fn,
spatial_sort=True,
spatial_concat=False,
spatial_concat_eps=1.0e-9
):
from modules.ioOBJ import load_2d as load
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
data = load(fn)
vertices = data['vertices']
print('orig size:')
print_values(*get_bounding_box(vertices))
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
edges = data['edges']
paths = [row_stack(p) for p in vertices[edges, :]]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
print('edges: ', len(paths))
return paths
def dots_to_circs(verts, rad):
paths = []
n = int(rad*TWOPI*1000)
print('number of rad segments: {:d}'.format(n))
discard = 0
for xy in verts:
theta = random()*TWOPI + linspace(0, TWOPI*1.1, n)
if random() < 0.5:
theta[:] = theta[::-1]
c = reshape(xy, (1, 2)) +\
rad * column_stack([
cos(theta),
sin(theta)])
test = logical_or(c > 1.0, c < 0.0).sum(axis=0).sum()
if test == 0:
paths.append(c)
else:
discard += 1
print('discarded circles: {:d}'.format(discard))
return paths
| 2.046875 | 2 |
kwat/vcf/__init__.py | KwatME/ccal | 5 | 12787829 | from .ANN import ANN
from .COLUMN import COLUMN
from .count_variant import count_variant
from .read import read
from .read_row import read_row
| 0.972656 | 1 |
secretupdater/secretupdater/headerclient.py | matthope/k8s-secret-updater | 5 | 12787830 | # Copyright 2019 Nine Entertainment Co.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from secretupdater import app
import requests
class HeaderClient():
"""This is a dummy ConfidantClient that works with header auth."""
def __init__(self, **_kwargs):
super(HeaderClient, self).__init__()
app.logger.debug("Initialising HeaderClient")
def get_service(self, service):
app.logger.debug("DummyClient.get_service")
url = "{}/v1/services/{}".format(
app.config.get('CONFIDANT_SERVER_URL'), service)
headers = {
"X-CONFIDANT-USERNAME": "secretupdater",
"X-CONFIDANT-EMAIL": "<EMAIL>"
}
r = requests.get(url, headers=headers)
return r.json()
| 1.984375 | 2 |
tinykit/__init__.py | iromli/timo | 0 | 12787831 | <reponame>iromli/timo
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__version__ = "0.2-dev"
from .db import Database # noqa
from .models import Model # noqa
| 1.023438 | 1 |
src/quilla/hookspecs/configuration.py | microsoft/quilla | 55 | 12787832 | <filename>src/quilla/hookspecs/configuration.py
'''
Hooks that are related to configuration, such as logger configs, parser additions,
etc.
'''
from enum import Enum
from logging import Logger
from argparse import (
ArgumentParser,
Namespace
)
from typing import (
Type,
TypeVar,
Optional,
)
from quilla.hookspecs._hookspec import hookspec
from quilla.ctx import Context
from quilla.ui_validation import QuillaTest
T = TypeVar('T', bound=Enum)
@hookspec
def quilla_configure_logger(logger: Logger):
'''
A hook called immediately after the plugin manager is created. This is the very
first hook called, and allows plugins to register additional handlers, formatters,
or otherwise modify the logger used throughout Quilla. Note, the Context is not
yet created
To help in debugging, it is recommended that plugins register their own StreamHandler
to the logger with a filter that shows only the messages relevant to the plugin.
Args:
logger: The configured logger instance for Quilla
'''
@hookspec
def quilla_addopts(parser: ArgumentParser):
'''
A hook to allow plugins to add additional arguments to the argument parser.
This can be used if a plugin requires additional parameters or data in some way.
This is called after the initial argument parser setup
Args:
parser: The argparse Argument Parser instance used by the application
'''
@hookspec
def quilla_configure(ctx: Context, args: Namespace):
'''
A hook to allow plugins to modify the context object, either changing its data
or adding data to it.
This is called after the initial setup of the context object
Args:
ctx: The runtime context for the application
args: Parsed CLI args, in case they are needed
'''
@hookspec
def quilla_prevalidate(validation: QuillaTest):
'''
A hook called immediately before the validations attempt to be resolved
(i.e. before `validations.validate_all()` is called)
Args:
validation: The collected validations from the json passed to
the application
'''
@hookspec(firstresult=True)
def quilla_resolve_enum_from_name(name: str, enum: Type[T]) -> Optional[T]:
'''
A hook called when a value specified by the quilla test should be resolved to an
enum, but no enum has been found. This is to allow plugins to register custom
enum values for quilla, such as new step actions, validation types, validation states,
output sources, etc.
Args:
name: the string value specified in the quilla test
enum: The enum subclass type that is being attempted to be resolved. This
should give an indication as to what is being resolved. For example,
UITestActions is the enum type being resolved for the 'actions' field.
Returns:
The resolved enum, if it can be resolved. None if the plugin can't resolve
the value.
'''
| 2.609375 | 3 |
tests/test_date_timestamp_conformance.py | uk-gov-mirror/moj-analytical-services.mojap-arrow-pd-parser | 0 | 12787833 | import pytest
import datetime
import pandas as pd
import pyarrow as pa
import numpy as np
from arrow_pd_parser.parse import (
pa_read_csv_to_pandas,
pa_read_json_to_pandas,
)
def pd_datetime_series_to_list(s, series_type, date=False):
fmt = "%Y-%m-%d" if date else "%Y-%m-%d %H:%M:%S"
if series_type == "object":
s_ = s.apply(datetime_object_as_str).to_list()
elif series_type == "datetime64":
s_ = s.dt.strftime(fmt).to_list()
elif series_type == "period":
s_ = s.apply(lambda x: None if pd.isna(x) else x.strftime(fmt))
s_ = s_.to_list()
else:
raise ValueError(f"series_type input {series_type} not expected.")
str_dates = [None if pd.isna(x) else x for x in s_]
return str_dates
def datetime_object_as_str(x):
if pd.isna(x):
return np.nan
else:
return str(x)
@pytest.mark.parametrize(
"in_type,pd_timestamp_type,out_type",
[
("timestamp[s]", "datetime_object", "object"),
("timestamp[s]", "pd_timestamp", "datetime64[ns]"),
("timestamp[s]", "pd_period", "period[S]"),
("timestamp[ms]", "datetime_object", "object"),
("timestamp[ms]", "pd_timestamp", "datetime64[ns]"),
("timestamp[ms]", "pd_period", "period[L]"),
("timestamp[us]", "datetime_object", "object"),
("timestamp[us]", "pd_timestamp", "datetime64[ns]"),
("timestamp[us]", "pd_period", "period[U]"),
("timestamp[ns]", "datetime_object", "datetime64[ns]"),
("timestamp[ns]", "pd_timestamp", "datetime64[ns]"),
("timestamp[ns]", "pd_period", "period[N]"),
],
)
def test_datetime(in_type, pd_timestamp_type, out_type):
test_data_path = "tests/data/datetime_type.csv"
test_str_dates = pd.read_csv(test_data_path, dtype=str)["my_datetime"]
test_str_dates = [None if pd.isna(s) else s for s in test_str_dates]
type_dict = {
"timestamp[s]": pa.timestamp("s"),
"timestamp[ms]": pa.timestamp("ms"),
"timestamp[us]": pa.timestamp("us"),
"timestamp[ns]": pa.timestamp("ns"),
}
schema = pa.schema([("my_datetime", type_dict[in_type])])
# datetime_object
df = pa_read_csv_to_pandas(
test_data_path,
schema=schema,
expect_full_schema=False,
pd_timestamp_type=pd_timestamp_type,
)
test_str_dates = pd.read_csv(test_data_path, dtype=str)["my_datetime"]
test_str_dates = [None if pd.isna(s) else s for s in test_str_dates]
assert str(df.my_datetime.dtype) == out_type
if out_type == "object":
assert isinstance(df.my_datetime[0], datetime.datetime)
actual_str_dates = pd_datetime_series_to_list(
df.my_datetime, out_type.split("[")[0], date=False
)
assert test_str_dates == actual_str_dates
@pytest.mark.parametrize(
"in_type,pd_date_type,out_type",
[
("date32", "datetime_object", "object"),
("date32", "pd_timestamp", "datetime64[ns]"),
("date32", "pd_period", "object"),
("date64", "datetime_object", "object"),
("date64", "pd_timestamp", "datetime64[ns]"),
("date64", "pd_period", "period[L]"),
],
)
def test_date(in_type, pd_date_type, out_type):
test_data_path = "tests/data/date_type.csv"
test_str_dates = pd.read_csv(test_data_path, dtype=str)["my_date"]
test_str_dates = [None if pd.isna(s) else s for s in test_str_dates]
schema = pa.schema([("my_date", getattr(pa, in_type)())])
# datetime_object
if in_type == "date32" and pd_date_type == "pd_period":
with pytest.warns(UserWarning):
df = pa_read_csv_to_pandas(
test_data_path,
schema,
expect_full_schema=False,
pd_date_type=pd_date_type,
)
else:
df = pa_read_csv_to_pandas(
test_data_path, schema, expect_full_schema=False, pd_date_type=pd_date_type
)
test_str_dates = pd.read_csv(test_data_path, dtype=str)["my_date"]
test_str_dates = [None if pd.isna(s) else s for s in test_str_dates]
assert str(df.my_date.dtype) == out_type
if out_type == "object":
assert isinstance(df.my_date[0], datetime.date)
actual_str_dates = pd_datetime_series_to_list(
df.my_date, out_type.split("[")[0], date=True
)
assert test_str_dates == actual_str_dates
@pytest.mark.skip(
reason=(
"This currently fails (see issue #43), but adding in "
"test boilerplate for future fix."
)
)
def test_timestamps_as_strs():
test_data_path = "tests/data/datetime_type.csv"
test_str_dates = pd.read_csv(test_data_path, dtype="string")["my_datetime"]
schema = pa.schema([("my_datetime", pa.string())])
df = pa_read_csv_to_pandas(test_data_path, schema, expect_full_schema=False)
assert df["my_datetime"].to_list() == test_str_dates.to_list()
df = pa_read_json_to_pandas(
test_data_path.replace(".csv", ".jsonl"), schema, expect_full_schema=False
)
assert df["my_datetime"].to_list() == test_str_dates.to_list()
| 2.796875 | 3 |
Module 1 - Functional programming/3. Loops/solutions/5. factorial.py | codific/Python-course-BFU | 1 | 12787834 | number = int(input('Enter a number: '))
# Classical approach using for loop
fac = 1
for n in range(1, number + 1):
fac *= n
# Using the math module
# from math import factorial
# fac = factorial(number)
print(f'{number}! = {fac}') | 4.09375 | 4 |
ruml/utils.py | irumata/ruml | 3 | 12787835 | import sklearn
from sklearn.linear_model import LinearRegression
import catboost
import pandas as pd
import copy
import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import train_test_split, KFold, cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.metrics import mean_absolute_error, r2_score
import inspect
import numpy as np
import skopt
import datetime
import os
from sklearn.externals import joblib
jl_compress = 3
def hello_world():
print("HW!")
# Bayes Search EXample
# opt = skopt.BayesSearchCV(lgb.LGBMRegressor(n_estimators=3000, observation="mae"),
# search_spaces= ruml.utils.SKOPT_BO["lgb"], verbose=True,
# n_iter=3000,n_jobs=5,cv=folds, scoring="neg_mean_absolute_error",
# fit_params ={"early_stopping_rounds":200,"eval_set":[(X_early_stop,y_early_stop)]} )
# opt.fit(X=X_train,y=y_train, callback=[ruml.utils.Print_Callback(), skopt.callbacks.DeadlineStopper(total_time=36000)])
DEFAULT_VALUES = {
"lgb": {
"regr": lgb.LGBMRegressor,
"model_params": {
"n_estimators":2000
},
"fit_params":
{
"eval_metric":"mae",
"early_stopping_rounds":150,
"verbose":False
}
},
"metrics":{
"mae": mean_absolute_error,
"r2": r2_score
}
}
def conv_pred(preds):
if (isinstance(preds,np.ndarray) and isinstance(preds[0],np.ndarray)) or (
isinstance(preds,pd.Series) and isinstance(preds.iloc[0],np.ndarray)):
preds = preds[:,0]
return preds
def add_def_params(model_name, model_params, fit_params, def_param = DEFAULT_VALUES):
if model_name in DEFAULT_VALUES.keys():
if "model_params" in DEFAULT_VALUES[model_name]:
new_p = copy.deepcopy(DEFAULT_VALUES[model_name]["model_params"])
new_p.update(model_params)
model_params = new_p
if "fit_params" in DEFAULT_VALUES[model_name]:
new_p = copy.deepcopy(DEFAULT_VALUES[model_name]["fit_params"])
new_p.update(fit_params)
fit_params = new_p
return model_params, fit_params
#model can be str,
#instance of estiomator - we use parameters of these estimator and model_params together
#or estimator type we use model_params
def cv(model=LinearRegression, X = pd.DataFrame([]), y = pd.Series([]), folds = 5,
model_params = {},
fit_params = {},
task = "regr",
metrics=["mae"]):
model_name = None
if isinstance(model,str):
model_name = model
model_params, fit_params = add_def_params(model, model_params,fit_params)
model = DEFAULT_VALUES[model_name][task]
if not isinstance(model, type):
model_params.update(model.get_params())
model = type(model)
predictions_cv = pd.Series([0]*len(X), index = X.index)
predictions_cv_best = pd.Series([0]*len(X), index = X.index)
scores = list()
scores_best = list()
models = list()
best_iterations = list()
if folds == 0:
model_instance = model(**model_params)
if "early_stopping_rounds" in fit_params.keys():
fit_params = {k:v for k,v in fit_params.items() if k != "early_stopping_rounds"}
model_instance = model_instance.fit( X, y,
**fit_params)
return {"models":[model_instance], "scores":[], "predictions_cv":None, "score_cv":None, "best_iterations": None}
if isinstance(folds,int):
folds = KFold(n_splits=folds, shuffle=True, random_state=42)
for fold_n, (train_index, valid_index) in enumerate(folds.split(X, y)):
X_train, X_valid = X.loc[train_index], X.loc[valid_index]
y_train, y_valid = y.loc[train_index], y.loc[valid_index]
model_instance = model(**model_params)
if "eval_set" in inspect.getfullargspec( model_instance.fit).args:
fit_params['eval_set'] = [(X_valid,y_valid)]
model_instance.fit( X_train, y_train,
**fit_params)
train_predict = conv_pred(model_instance.predict(X_train))
train_score = list()
for metric in metrics:
if isinstance(metric,str):
metric = DEFAULT_VALUES["metrics"][metric]
train_score.append(metric(y_train,train_predict))
valid_predict = conv_pred(model_instance.predict(X_valid))
predictions_cv[valid_index] = pd.Series(valid_predict)
score = list()
for metric in metrics:
if isinstance(metric,str):
metric = DEFAULT_VALUES["metrics"][metric]
score.append(metric(y_valid,valid_predict))
scores.append(score)
models.append(model_instance)
if hasattr(model_instance, "best_iteration_" ):
best_iterations.append(model_instance.best_iteration_)
print("Fold ", fold_n, "score ", scores[-1], "train_score", train_score)
if hasattr(model_instance,"predict_best"):
valid_predict_best = model_instance.predict_best(X_valid, y_valid)
valid_predict_best = conv_pred(valid_predict_best)
predictions_cv_best[valid_index] = pd.Series(valid_predict_best)
score_best = list()
for metric in metrics:
if isinstance(metric,str):
metric = DEFAULT_VALUES["metrics"][metric]
score_best.append(metric(y_valid,valid_predict_best))
scores_best.append(score_best)
print("score best ", scores_best[-1], "\n")
if hasattr(model_instance,"get_cluster_models"):
clust_models = model_instance.get_cluster_models()
for i, model_cluster in clust_models.items():
valid_predict_model = conv_pred(model_cluster.predict(X_valid))
score_model = list()
for metric in metrics:
if isinstance(metric,str):
metric = DEFAULT_VALUES["metrics"][metric]
score_model.append(metric(y_valid,valid_predict_model))
print("score best for model ", i, " ", score_model, "\n")
print("#"*30)
score = list()
for metric in metrics:
if isinstance(metric,str):
metric = DEFAULT_VALUES["metrics"][metric]
score.append(metric(y,predictions_cv))
print("Final scores: ", score)
score_best = list()
if len(scores_best)>0:
for metric in metrics:
if isinstance(metric,str):
metric = DEFAULT_VALUES["metrics"][metric]
score_best.append(metric(y,predictions_cv_best))
print("Final scores best: ", score_best)
return {"models":models, "scores":scores, "predictions_cv":predictions_cv, "score_cv":score,"score_cv_best":score_best, "best_iterations": best_iterations,
"scores_best":scores_best, "model":model, "model_params":model_params, "fit_params":fit_params}
def blend_models(models,X):
res = pd.Series([0]*len(X), index = X.index)
for m in models:
preds = m.predict(X)
preds = conv_pred(preds)
res+=preds
res/=len(models)
return res
#test
#ruml.utils.cv(X = pd.DataFrame({1:[i for i in range(10)],2:[2*i for i in range(10)]}),
# y = pd.Series(i*i for i in range(10)),
# )
lgbm_bo = {
'num_leaves': (6, 1024),
# 'max_depth': (4, 20),
'learning_rate': (0.00001, 0.1),
'bagging_fraction': (0.1, 1.0),
'feature_fraction': (0.1, 1.0),
'min_data_in_leaf': (6, 200),
'bagging_freq': (0, 10),
'reg_alpha': (0,100),
'reg_lambda': (0,100),
}
# space.Integer(6, 30, name='num_leaves'),
# space.Integer(50, 200, name='min_child_samples'),
# space.Real(1, 400, name='scale_pos_weight'),
# space.Real(0.6, 0.9, name='subsample'),
# space.Real(0.6, 0.9, name='colsample_bytree')
#objectives
# regression_l2, L2 loss, aliases: regression, mean_squared_error, mse, l2_root, root_mean_squared_error, rmse
# regression_l1, L1 loss, aliases: mean_absolute_error, mae
# huber, Huber loss
# fair, Fair loss
# poisson, Poisson regression
# quantile, Quantile regression
# mape, MAPE loss, aliases: mean_absolute_percentage_error
# gamma, Gamma regression with log-link. It might be useful, e.g., for modeling insurance claims severity, or for any target that might be gamma-distributed
# tweedie
SKOPT_BO = {
"lgb" : {
'num_leaves': skopt.space.Integer(6, 512),
'min_child_samples': skopt.space.Integer(10, 200), #min_data_in_leaf
'scale_pos_weight': skopt.space.Integer(1,400),
'subsample':skopt.space.Real(0.1,1.0), #bagging_fraction
'colsample_bytree':skopt.space.Real(0.1,1.0), #feature_fraction
'reg_alpha': skopt.space.Integer(0,100),
'reg_lambda': skopt.space.Integer(0,100),
'learning_rate': skopt.space.Real(0.00001, 0.1)
}
}
lcb_bo = {
'num_leaves': (15, 1024),
'l2_leaf_reg': [2, 18],
# 'max_depth': (4, 20),
'learning_rate': (0.005, 0.1),
'bagging_fraction': (0.1, 1.0),
'feature_fraction': (0.1, 1.0),
'min_data_in_leaf': (6, 200),
'bagging_freq': (0, 10),
'reg_alpha': (0,100),
'reg_lambda': (0,100),
}
BO_SPACES = {
sklearn.linear_model.Ridge.__name__: {
'alpha': (0.001, 1000),
},
lgb.LGBMRegressor.__name__: lgbm_bo,
lgb.LGBMClassifier.__name__: lgbm_bo,
catboost.CatBoostRegressor.__name__: {
'max_depth': [4, 12],
'learning_rate': [0.001],
}
}
def subm_res(res_dic, x_text , comm = "comment",
competition = "LANL-Earthquake-Prediction"):
res_dic['prediction'] = blend_models(res_dic["models"],x_text)
sub = pd.read_csv('../input/sample_submission.csv', index_col='seg_id')
sub['time_to_failure'] = res_dic['prediction']
filename = 'submission_'+str(res_dic["score_cv"][0])+'.csv'
sub.to_csv(filename)
command = 'kaggle competitions submit '+ competition + ' -f '+filename+' -m\"' + comm + '\"'
print(sub.head())
print('\n\n')
print(command, '\n\n')
pickle_filename = res_dic["model"].__name__[:20]+"_"+str(res_dic["score_cv"][0])+".model"+".jbl"
joblib.dump(res_dic,filename=pickle_filename,compress=jl_compress)
return res_dic['prediction']
def list_models(dir="."):
f_list = os.listdir(dir)
res = [f for f in f_list if ".model" in f]
return res
def stack_models(file_list, X, X_test):
for f in file_list:
model_dict = joblib.load(f)
X[f] = model_dict["predictions_cv"]
X_test[f] = ['prediction']
return X, X_test
class Print_Callback:
def __init__(self):
pass
# self.best_index = -1
def __call__(self, x):
if min(x.func_vals) == x.func_vals[-1]:
print(datetime.datetime.now().time().strftime(format="%HH:%MM:%SS"), "new best ", x.func_vals[-1], " iter ", len(x.func_vals))
BO_RUN = {
"lgbr": {
"model":
{
"estimator" : lgb.LGBMRegressor(n_estimators=2000, observation="mae"),
"search_spaces": SKOPT_BO["lgb"],
"verbose": True,
"n_iter":3000,
"n_jobs":5,
"cv":KFold(5, shuffle=True, random_state=42),
"scoring":"neg_mean_absolute_error",
},
"fit_params" :{"early_stopping_rounds":200}
}
}
def bo(X, y, estimator = "lgbr",
search_spaces= {},
verbose=True,
n_iter=3000,
n_jobs=5,
cv=KFold(5, shuffle=True, random_state=42),
scoring="neg_mean_absolute_error",
fit_params ={},
callbacks = [Print_Callback()],
max_time = 7200,
eval_set_ratio = 0.15
):
if eval_set_ratio is not None and eval_set_ratio>0:
X_train, X_early_stop, y_train, y_early_stop = train_test_split(X, y, test_size=eval_set_ratio, random_state=42)
fit_params["eval_set"] = [(X_early_stop,y_early_stop)]
else:
X_train = X,
y_train = y
if max_time is not None and max_time>0:
callbacks.append(skopt.callbacks.DeadlineStopper(total_time=max_time))
if isinstance(estimator, str):
fit_params.update(BO_RUN[estimator]["fit_params"])
params = BO_RUN[estimator]["model"]
if search_spaces is not None and len(search_spaces)>0: params["search_spaces"] = search_spaces
if n_iter is not None: params["n_iter"] = n_iter
if n_jobs is not None: params["n_jobs"] = n_jobs
if verbose is not None: params["verbose"] = verbose
if cv is not None: params["cv"] = cv
if scoring is not None: params["scoring"] = scoring
opt = skopt.BayesSearchCV(fit_params=fit_params,**params)
else:
opt = skopt.BayesSearchCV(estimator,
search_spaces= search_spaces,
n_iter=n_iter,n_jobs=n_jobs,cv=cv, scoring=scoring,
fit_params =fit_params )
opt.fit(X=X_train,y=y_train, callback=callbacks)
print(opt.best_iteration_)
print(opt.best_score_, opt.best_params_)
print("Byes opt res "+ str(opt.best_score_) + " " + str(opt.best_params_), file=open("output.txt", "a"))
return opt | 2.234375 | 2 |
configs/network_configs.py | t-zhong/WaPIRL | 7 | 12787836 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Configurations for CNN architectures.
"""
ALEXNET_BACKBONE_CONFIGS = ALEXNET_ENCODER_CONFIGS = dict(
batch_norm='bn',
local_response_norm='lrn',
)
VGGNET_BACKBONE_CONFIGS = VGGNET_ENCODER_CONFIGS = {
'16': {
'channels': [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'], [512, 512, 512, 'M'], [512, 512, 512, 'M']],
'batch_norm': False,
},
'16.bn': {
'channels': [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'], [512, 512, 512, 'M'], [512, 512, 512, 'M']],
'batch_norm': True,
}
}
RESNET_BACKBONE_CONFIGS = RESNET_ENCODER_CONFIGS = {
'18': {
'block_type': 'basic',
'channels': [64] * 2 + [128] * 2 + [256] * 2 + [512] * 2,
'strides': [1, 1] + [2, 1] + [2, 1] + [2, 1]
},
'50': {
'block_type': 'bottleneck',
'channels': [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3,
'strides': [1, 1, 1] + [2, 1, 1, 1] + [2, 1, 1, 1, 1, 1] + [2, 1, 1]
},
}
| 1.507813 | 2 |
blog-server/tests/repositories/test_post_repository.py | rob-blackbourn/blog-engine | 1 | 12787837 | import asyncio
import pytest
from motor.motor_asyncio import AsyncIOMotorClient
from blog.repositories import PostRepository
@pytest.mark.asyncio
async def test_create_blog(db):
post_repository = PostRepository()
collection = db['posts']
result = await collection.insert_one({'name': 'Rob'})
assert result.inserted_id is not None
| 2.078125 | 2 |
crawler/factory.py | bmwant/chemister | 0 | 12787838 | <gh_stars>0
"""
Create `Grabber` instances for list of resources we need to grab information
from.
"""
import importlib
import yaml
import settings
from utils import get_logger
from crawler.models.resource import Resource
from crawler.scheduled_task import ScheduledTask
from crawler.proxy import Proxy
from crawler.cache import Cache
from crawler.db import get_engine
# what?
from trader.shift_trader import ShiftTrader_v0
class Factory(object):
def __init__(self, resources=None):
self.resources = resources or []
self.cache = None
self.logger = get_logger(self.__class__.__name__.lower())
def load_resources(self):
self.logger.debug('Loading resources..')
with open(settings.RESOURCES_FILEPATH) as f:
resources = yaml.load(f.read())
self.resources = [Resource(**r) for r in resources]
return self.resources
async def init_cache(self):
"""
One shared instance for cache, but also may be implemented in the same
way as database engine.
"""
self.logger.debug('Initializing cache...')
self.cache = Cache()
await self.cache._create_pool()
async def init(self):
self.load_resources()
await self.init_cache()
async def cleanup(self):
self.logger.debug('Closing factory resources...')
await self.cache.close()
def _load_cls_from_module(self, subpackage, module_name):
"""
Load class from module name which follows our naming conventions.
"""
full_module_name = f'{__package__}.{subpackage}.{module_name}'
try:
module = importlib.import_module(full_module_name)
except ModuleNotFoundError:
raise ValueError(
f'No such {subpackage}: {full_module_name}. '
f'Check resources file syntax.'
)
class_name = f'{module_name}_{subpackage}'.title().replace('_', '')
cls_obj = getattr(module, class_name, None)
if cls_obj is None:
raise ValueError(
f'No such class {class_name} '
f'within module {full_module_name}.'
)
return cls_obj
def get_parser(self, parser_name):
parser_cls = self._load_cls_from_module('parser', parser_name)
return parser_cls()
def get_fetcher(self, resource):
fetcher_cfg = resource.fetcher
proxy_cfg = resource.proxy
fetcher_name = fetcher_cfg.instance
driver_name = fetcher_cfg.driver
proxy = None
if proxy_cfg.use:
proxy = Proxy(ip=resource.proxy.ip, port=resource.proxy.port)
driver_cls = None
if driver_name:
driver_cls = self._load_cls_from_module('driver', driver_name)
fetcher_cls = self._load_cls_from_module('fetcher', fetcher_name)
return fetcher_cls(
base_url=None,
proxy=proxy,
driver_cls=driver_cls,
)
def get_grabber(self, resource, *, fetcher, parser, cache, engine):
grabber_name = resource.grabber
grabber_cls = self._load_cls_from_module('grabber', grabber_name)
return grabber_cls(
resource=resource,
fetcher=fetcher,
parser=parser,
cache=cache,
engine=engine,
)
async def create_grabbers(self):
grabbers = []
# Each grabber is responsible for closing resources within itself
for res in self.resources:
fetcher = self.get_fetcher(res)
parser = self.get_parser(res.parser)
engine = await get_engine()
grabber = self.get_grabber(
resource=res,
fetcher=fetcher,
parser=parser,
cache=self.cache,
engine=engine,
)
grabbers.append(grabber)
return grabbers
async def create_traders(self):
# Create multiple traders for different algorithms
# todo: reuse cache from here
trader = ShiftTrader_v0(
starting_amount=10000,
)
await trader.init()
return [
ScheduledTask(
task=trader.daily,
scheduled_time='08:00',
)
]
async def create_daily(self):
return []
| 2.515625 | 3 |
odk_aggregation_tool/aggregation/readers.py | lindsay-stevens/odk_aggregation_tool | 0 | 12787839 | import os
import xlrd
from xlrd import XLRDError
from xlrd.book import Book
from xlrd.sheet import Sheet
from collections import OrderedDict
from typing import Iterable, List, Dict, Tuple
import logging
import traceback
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def read_xml_files(root_dir: str) -> Iterable[Tuple[str, str]]:
"""Read instance XML files found recursively in root_dir."""
for entry in os.scandir(path=root_dir):
if entry.is_dir():
yield from read_xml_files(root_dir=entry.path)
elif entry.name.endswith(".xml"):
with open(entry.path, mode='r', encoding="UTF-8") as f:
xml_file = f.read()
yield xml_file, entry.path
def read_xlsform_definitions(root_dir: str) -> Iterable[OrderedDict]:
"""Read XLSX files found recursively in root_dir"""
error_text = "Encountered an error while trying to read the XLSX file " \
"at the following path, and did not read from it: {0}.\n" \
"Error message was: {1}\n"
for entry in os.scandir(path=root_dir):
if entry.is_dir():
yield from read_xlsform_definitions(root_dir=entry.path)
elif entry.name.endswith(".xlsx"):
try:
workbook = xlrd.open_workbook(filename=entry.path)
form_def = read_xlsform_data(workbook=workbook)
except XLRDError as xle:
logger.info(error_text.format(entry.path, "{0}\n\n{1}".format(
str(xle), ''.join(traceback.format_exc()))))
continue
except ValueError as ve:
logger.info(error_text.format(entry.path, "{0}\n\n{1}".format(
str(ve), ''.join(traceback.format_exc()))))
continue
else:
yield form_def
def read_xlsform_data(workbook: Book) -> OrderedDict:
"""Return XLSForm definition data read from an XLRD Workbook."""
sheets = {x.name for x in workbook.sheets()}
required = {"survey", "choices", "settings"}
if not required.issubset(sheets):
raise ValueError(
"The required sheets for an XLSForm definition ({0}) were not "
"found in the workbook sheets ({1}).".format(required, sheets))
survey = xlrd_sheet_to_list_of_dict(
workbook.sheet_by_name(sheet_name='survey'))
choices = xlrd_sheet_to_list_of_dict(
workbook.sheet_by_name(sheet_name='choices'))
settings = xlrd_sheet_to_list_of_dict(
workbook.sheet_by_name(sheet_name='settings'))
form_def = OrderedDict()
form_def['@settings'] = settings[0]
for item in survey:
if item['type'].startswith('select'):
select_type, choice_name = item['type'].split(' ')
choice_list = [x for x in choices
if x['list_name'] == choice_name]
item['choices'] = choice_list
form_def[item['name']] = item
return form_def
def xlrd_sheet_to_list_of_dict(sheet: Sheet) -> List[Dict]:
"""Convert an xlrd sheet into a list of dicts."""
keys = [sheet.cell(0, col_index).value for col_index in range(sheet.ncols)]
dict_list = []
for row_index in range(1, sheet.nrows):
d = {keys[col_index]: sheet.cell(row_index, col_index).value
for col_index in range(sheet.ncols)}
dict_list.append(d)
return dict_list
def flatten_dict_leaf_nodes(dict_in: OrderedDict,
dict_out: OrderedDict = None) -> OrderedDict:
"""Flatten nested leaves of and/or a list of OrderedDict into one level."""
if dict_out is None:
dict_out = OrderedDict()
for k, v in dict_in.items():
if isinstance(v, OrderedDict):
if "#text" in v.keys():
dict_out[k] = v["#text"]
else:
flatten_dict_leaf_nodes(v, dict_out)
elif isinstance(v, list):
for i in v:
flatten_dict_leaf_nodes(i, dict_out)
else:
dict_out[k] = v
return dict_out
| 2.640625 | 3 |
src/clikit/api/resolver/command_resolver.py | finswimmer/clikit | 1 | 12787840 | from clikit.api.args import RawArgs
from .resolved_command import ResolvedCommand
class CommandResolver(object):
"""
Returns the command to execute for the given console arguments.
"""
def resolve(
self, args, application
): # type: (RawArgs, Application) -> ResolvedCommand
raise NotImplementedError()
| 2.265625 | 2 |
decorators/has_state_decorator/_add_state_methods/_add_from_pointer.py | ozgen92/classier | 0 | 12787841 | from classier.decorators.has_state_decorator.options import ATTRIBUTE_OPTIONS
from classier.decorators.has_state_decorator.options import METHOD_OPTIONS
from classier.objects import ClassMarker
from classier.decorators import _MARK_ATTRIBUTE_NAME
from classier.decorators.has_state_decorator import _MARK_TYPE_NAME
import classier.utils as utils
import json
def _get_from_pointer(options):
state_transformer = METHOD_OPTIONS.METHOD_STATE_TRANSFORMER.get_option(options)
pointer_exists = METHOD_OPTIONS.METHOD_POINTER_EXISTS.get_option(options) # TODO: remove?
saver = METHOD_OPTIONS.METHOD_SAVER.get_option(options)
state_attribute_name = ATTRIBUTE_OPTIONS.ATTRIBUTE_NAME_STATE.get_option(options)
saver = METHOD_OPTIONS.METHOD_SAVER.get_option(options)
index = METHOD_OPTIONS.METHOD_INDEX.get_option(options)
index_path = METHOD_OPTIONS.PATH_INDEX.get_option(options)
from_pointer_default = METHOD_OPTIONS.METHOD_POINTER_DEFAULT.get_option(options)
def from_pointer(self, pointer, default=None):
if isinstance(pointer, type(self)):
setattr(self, state_attribute_name, getattr(pointer, state_attribute_name))
return pointer
setattr(self, state_attribute_name, None)
default = utils.convenience.set_default(default, from_pointer_default)
index_information = None
if index is not None:
index_information = index(pointer, type(self), index_path)
state = None
if isinstance(pointer, dict):
state = pointer
# TODO: add debug logs here
if state is None and isinstance(pointer, str):
# pointer could be json.dumps
state = utils.convenience.optional(lambda: json.loads(pointer))
if state is None and isinstance(pointer, str) and index_information is not None:
# pointer could be something saver knows
state = utils.convenience.call(lambda: saver.get(pointer, index_information))
if state is None and default is not None:
state = default(pointer)
if state is None:
raise ValueError(f"Could not initialize from {pointer} of type {type(pointer)}")
if state_transformer is not None:
state = state_transformer(state)
setattr(self, state_attribute_name, state)
return self
return from_pointer
def _add_from_pointer(some_class, options):
method_name_from_pointer = METHOD_OPTIONS.METHOD_NAME_FROM_POINTER.get_option(options)
if not ClassMarker.does_mark_exist(some_class, _MARK_ATTRIBUTE_NAME, _MARK_TYPE_NAME, method_name_from_pointer):
ClassMarker.add_mark_to_class(some_class, _MARK_ATTRIBUTE_NAME, _MARK_TYPE_NAME, method_name_from_pointer)
some_class = utils.convenience.add_mixin(some_class, _get_from_pointer(options), method_name_from_pointer)
return some_class
| 2.203125 | 2 |
gamelib/scenes/map.py | CTPUG/suspended_sentence | 2 | 12787842 | """Neurally implanted schematic for moving around on the ship.
It is illegal for prisoners in transit to activate such an
implant. Failure to comply carries a minimum sentence of
six months.
Many parts of the ship are derelict and inaccessible.
"""
from pyntnclick.i18n import _
from pyntnclick.state import Scene, Thing, Result
from pyntnclick.scenewidgets import (
InteractRectUnion, InteractUnion, InteractText, InteractNoImage)
from gamelib.scenes.game_constants import PLAYER_ID
from gamelib.scenes.game_widgets import make_jim_dialog, make_sentence_dialog
class Map(Scene):
FOLDER = "map"
BACKGROUND = 'map.png'
INITIAL_DATA = {
'implant': True,
}
def setup(self):
self.add_thing(ToCryo())
self.add_thing(ToBridge())
self.add_thing(ToMess())
self.add_thing(ToEngine())
self.add_thing(ToMachine())
self.add_thing(ToCrew())
self.add_thing(InaccessibleArea())
self.add_thing(HydroponicsArea())
def enter(self):
if self.get_data('implant'):
self.set_data('implant', False)
ai1 = make_jim_dialog(
_("Under the terms of the emergency conscription "
"act, I have downloaded the ship's schematics to your "
"neural implant to help you navigate around the ship."),
self.game)
if ai1:
self.state.increase_sentence(3)
return ai1, make_jim_dialog(
_("Prisoner %s, you are a "
"class 1 felon. Obtaining access to the ship's "
"schematics constitutes a level 2 offence and carries a "
"minimal penalty of an additional 3 years on your "
"sentence.") % PLAYER_ID,
self.game), make_sentence_dialog(PLAYER_ID, self.game)
class DoorThing(Thing):
# name of destination
DEST = None
def interact(self, _item):
"""Go to destination."""
if self.DEST in self.game.scenes:
self.game.change_scene(self.DEST)
class ToCryo(DoorThing):
"Way to cryo room."
NAME = "map.tocryo"
DEST = "cryo"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(515, 158, 56, 68),
InteractText(
361, 512, 245, 33, _("Prisoner cryo chambers"),
'white', 20, 'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToBridge(DoorThing):
"Way to bridge room."
NAME = "map.tobridge"
DEST = "bridge"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(36, 260, 60, 83),
InteractText(
26, 170, 71, 33, _("Bridge"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToMess(DoorThing):
"Way to cryo room."
NAME = "map.tomess"
DEST = "mess"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(395, 262, 64, 80),
InteractText(
341, 430, 110, 33, _("Mess hall"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToEngine(DoorThing):
"Way to engine room."
NAME = "map.toengine"
DEST = "engine"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(691, 279, 76, 54),
InteractText(
662, 496, 128, 33, _("Engine room"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
def interact(self, item):
if not self.game.is_in_inventory('helmet:'):
return Result(
_('The airlock refuses to open. The automated'
' voice says: "Hull breach beyond this door. Personnel'
' must be equipped for vacuum before entry."'))
else:
return super(ToEngine, self).interact(item)
class ToMachine(DoorThing):
"Way to machine room."
NAME = "map.tomachine"
DEST = "machine"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(608, 156, 57, 72),
InteractText(
578, 83, 140, 33, _("Machine room"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class ToCrew(DoorThing):
"Way to crew quarters."
NAME = "map.tocrew_quarters"
DEST = "crew_quarters"
INTERACTS = {
'door': InteractUnion((
InteractNoImage(210, 321, 37, 64),
InteractText(
69, 460, 160, 33, _("Crew quarters"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'door'
class InaccessibleArea(Thing):
NAME = 'map.inaccessible'
INTERACTS = {
'areas': InteractRectUnion((
(207, 227, 39, 63),
(256, 225, 35, 64),
(259, 322, 34, 64),
(514, 380, 58, 66),
(607, 377, 60, 70),
))
}
INITIAL = 'areas'
def interact(self, _item):
return Result(_("You look in the door, but just see empty space: "
"that room appears to have been obliterated by "
"meteors."))
class HydroponicsArea(Thing):
NAME = 'map.hydroponics'
INTERACTS = {
'areas': InteractUnion((
InteractNoImage(314, 263, 73, 81),
InteractText(
313, 132, 140, 33, _("Hydroponics"), 'white', 20,
'Monospace.ttf'),
))
}
INITIAL = 'areas'
def interact(self, _item):
return Result(_("Peering in through the window, you see that the "
"entire chamber is overgrown with giant broccoli. "
"It would take you years to cut a path through that."))
SCENES = [Map]
| 3.140625 | 3 |
tests/test_ayab_image.py | shiluka/knitlib | 0 | 12787843 | # -*- coding: utf-8 -*-
# This file is part of Knitlib. It is based on AYAB.
#
# Knitlib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Knitlib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Knitlib. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015 <NAME>, <NAME>
import pytest
import unittest
import os
from knitlib.plugins.ayab_plugin.ayab_image import ayabImage
from PIL import Image
class TestImage(unittest.TestCase):
def setUp(self):
self.script_dir = os.path.dirname(os.path.abspath(__file__))
self.filename_text = u"mushroom.png"
self.conf = {}
self.conf["filename"] = self.filename_text
self.pil_image = Image.open(os.path.join(self.script_dir, self.conf["filename"]))
self.ayab_image = ayabImage(self.pil_image, 2)
def test_knitStartNeedle(self):
assert self.ayab_image.knitStartNeedle() == 0
def test_knitStopNeedle(self):
assert self.ayab_image.knitStopNeedle() == 199
def test_imgPosition(self):
assert self.ayab_image.imgPosition() == 'center'
def test_startLine(self):
assert self.ayab_image.startLine() == 0
def test_numColors(self):
assert self.ayab_image.numColors() == 2
def test_setStartLine(self):
self.startLine = 0
self.ayab_image.setStartLine(self.startLine)
assert self.ayab_image.startLine() == 0
| 2 | 2 |
moneymour/api_client.py | moneymour/api-client-python | 0 | 12787844 | import requests
import json
from moneymour import environments
from moneymour.crypto_utils import Signature
API_BASE_URL = 'https://api.moneymour.com'
API_SANDBOX_BASE_URL = 'https://api.sandbox.moneymour.com'
API_STAGE_BASE_URL = 'https://api.stage.moneymour.com'
API_DEVELOPMENT_BASE_URL = 'http://localhost:3000'
ENDPOINT_MERCHANT_REQUEST = '/merchant-request'
class ApiClient:
def __init__(self, merchant_id, merchant_secret, environment=environments.ENVIRONMENT_SANDBOX):
environments.validate_environment(environment)
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
self.environment = environment
def request(self, private_key, body):
"""
Request a loan.
:param private_key: Your personal private key
:param body: The body to be sent in the POST request
:return: JSON decoded object
"""
# Add identification fields to the request
body['merchantId'] = self.merchant_id
body['secret'] = self.merchant_secret
expires_at = Signature.generate_expires_at_header_value()
signature = Signature.build(private_key, expires_at, body)
headers = {
'Content-Type': 'application/json',
'Expires-at': expires_at,
'Signature': signature.decode("utf-8")
}
body = json.dumps(body, separators=(',', ':'))
# Perform the request
r = requests.post(ApiClient.get_api_base_url(self.environment) + ENDPOINT_MERCHANT_REQUEST, headers=headers,
data=body)
return json.loads(r.text)
@staticmethod
def get_api_base_url(environment):
environments.validate_environment(environment)
if environment == environments.ENVIRONMENT_PRODUCTION:
return API_BASE_URL
elif environment == environments.ENVIRONMENT_SANDBOX:
return API_SANDBOX_BASE_URL
elif environment == environments.ENVIRONMENT_STAGE:
return API_STAGE_BASE_URL
else:
return API_DEVELOPMENT_BASE_URL
| 2.59375 | 3 |
stellar-csv-creator/main.py | usertxt/stellar-csv-creator | 2 | 12787845 | <gh_stars>1-10
import csv
import json
import logging
import sys
import os
import re
from datetime import datetime
import requests
import requests_cache
from PySide2 import QtGui, QtCore, QtWidgets, QtSql
from gui.main_window import Ui_MainWindow
from gui.styles import dark
from utils.about_dialog import AboutDialog
from utils.message_box import MessageBox
from utils.version import version
from utils.util import (date_format, open_path, user_dir, make_dir, setup_config, isfloat, exit_app)
class CSVCreator(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(CSVCreator, self).__init__(parent)
self.version = version
self.log_file = os.path.join(user_dir(), "stellar-csv-creator.log")
sys.excepthook = self.error_handler
self.setupUi(self)
# Set config
self.config_path = os.path.join(user_dir(), "config.json")
self.config = json.load(open(self.config_path))
self.csv_config = self.config["CSV"]
self.app_config = self.config["APP"]
self.theme = self.app_config["THEME"]
# Configure GUI
self.setWindowTitle(f"Stellar CSV Creator v{self.version}")
self.window_icon = QtGui.QIcon()
if self.theme == "dark":
self.dark_theme()
self.window_icon_file = "gui/icons/stellar_dark.ico"
self.link_color = "#007bff"
self.error_color = "#ff4f4f"
else:
self.window_icon_file = "gui/icons/stellar_default.ico"
self.link_color = "#0000ff"
self.error_color = "red"
# Set icons
self.window_icon.addPixmap(QtGui.QPixmap(self.window_icon_file), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.setWindowIcon(self.window_icon)
# Set utils instances
self.mb = MessageBox(self.theme)
self.date_format = date_format
self.about_window = None
self.exit_app = exit_app
# Create address book DB
self.db = QtSql.QSqlDatabase.addDatabase("QSQLITE")
self.db.setDatabaseName(os.path.join(user_dir(), "addresses.db"))
if not self.db.open():
self.mb.message_box("Unable to establish a database connection.\n"
"This example needs SQLite support. Please read "
"the Qt SQL driver documentation for information "
"how to build it.\n\n", critical=True)
self.query = QtSql.QSqlQuery()
self.query.exec_("""
CREATE TABLE IF NOT EXISTS addresses (id integer primary key autoincrement,
Nickname VARCHAR(20),
Address VARCHAR(20))
""")
# Load address book table
self.view = self.tableAddresses
self.model = QtSql.QSqlTableModel()
self.model.setTable("addresses")
self.load_addresses()
# Address book context menu
self.useAction = QtWidgets.QAction("Use", self)
self.editAction = QtWidgets.QAction("Edit", self)
self.deleteAction = QtWidgets.QAction("Delete", self)
self.tableAddresses.addAction(self.useAction)
self.tableAddresses.addAction(self.editAction)
self.tableAddresses.addAction(self.deleteAction)
# Configure address book table
self.tableAddresses.installEventFilter(self)
self.tableAddresses.verticalHeader().setVisible(False)
self.tableAddresses.setColumnHidden(0, True)
self.tableAddresses.setColumnWidth(1, 90)
self.tableAddresses.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableAddresses.horizontalHeader().setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
# Backend functions
self.get_config()
self.make_links()
def load_addresses(self):
self.model.select()
self.view.setModel(self.model)
def add_address(self):
address = self.ABAddress.text()
nickname = self.ABNickname.text()
self.query.exec_(f"INSERT INTO addresses (Nickname, Address) values('{nickname}', '{address}')")
self.ABAddress.clear()
self.ABNickname.clear()
self.load_addresses()
def edit_address(self):
index = self.tableAddresses.selectionModel().currentIndex()
self.tableAddresses.edit(index)
def use_address(self):
index = self.tableAddresses.selectionModel().currentIndex()
value = index.sibling(index.row(), 2).data()
self.Address.setText(value)
def delete_address(self):
index = self.tableAddresses.selectionModel().currentIndex()
self.model.removeRow(index.row())
self.load_addresses()
def eventFilter(self, source, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == QtCore.Qt.Key_Delete:
index = self.tableAddresses.selectionModel().currentIndex()
self.model.removeRow(index.row())
self.load_addresses()
return True
return False
def about_dialog(self):
if self.about_window is None:
self.about_window = AboutDialog(self.version, self.theme, self.link_color, self.window_icon_file)
self.about_window.show()
self.about_window.activateWindow()
def dark_theme(self):
file = QtCore.QFile(":/qdarkstyle/style.qss")
file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text)
stream = QtCore.QTextStream(file)
self.setStyleSheet(stream.readAll())
def folder_dialog(self):
folder = QtWidgets.QFileDialog.getExistingDirectory()
if folder != "":
self.CSVOutputDest.setText(folder)
def get_config(self):
rb_light = self.radioButtonLightMode
rb_dark = self.radioButtonDarkMode
self.Source.setText(self.csv_config["SOURCE"])
self.Memo.setText(self.csv_config["MEMO"])
self.MinThresh.setText(self.csv_config["MIN_THRESH"])
self.MaxThresh.setText(self.csv_config["MAX_THRESH"])
self.CSVOutputDest.setText(self.csv_config["DESTINATION"])
if self.theme == "default":
rb_light.click()
rb_light.setDisabled(True)
if self.theme == "dark":
rb_dark.click()
rb_dark.setDisabled(True)
if rb_dark.isChecked() and self.theme == "default":
rb_light.setEnabled(True)
rb_light.click()
rb_light.setDisabled(True)
if rb_light.isChecked() and self.theme == "dark":
rb_dark.setEnabled(True)
rb_dark.click()
rb_dark.setDisabled(True)
def make_links(self):
# Menu bar
self.actionSave_Settings.triggered.connect(self.save_settings)
self.actionExit.triggered.connect(self.exit_app)
self.actionAbout.triggered.connect(self.about_dialog)
self.actionOpen_Log.triggered.connect(lambda: open_path(self.log_file))
self.actionCheck_for_updates.triggered.connect(self.check_for_updates)
# Main tab
self.CreateCSV.clicked.connect(self.create_csv)
self.GetBalance.clicked.connect(self.get_balance)
self.Address.textChanged.connect(self.enable_buttons)
self.Address.returnPressed.connect(self.CreateCSV.click)
self.Address.textChanged['QString'].connect(self.enable_buttons)
self.StartDate.textChanged.connect(self.enable_buttons)
self.StartDate.returnPressed.connect(self.CreateCSV.click)
self.EndDate.returnPressed.connect(self.CreateCSV.click)
self.output.anchorClicked.connect(lambda: open_path(self.csv_config["DESTINATION"]))
# Addresses tab
self.ABAddress.returnPressed.connect(self.addAddress.click)
self.ABNickname.returnPressed.connect(self.addAddress.click)
self.ABAddress.textChanged.connect(self.enable_buttons)
self.ABAddress.textChanged['QString'].connect(self.enable_buttons)
self.addAddress.clicked.connect(self.add_address)
self.useAction.triggered.connect(self.use_address)
self.editAction.triggered.connect(self.edit_address)
self.deleteAction.triggered.connect(self.delete_address)
self.tableAddresses.doubleClicked.connect(self.use_address)
# Settings tab
self.SaveSettings.clicked.connect(self.save_settings)
self.resetButton.clicked.connect(self.get_config)
self.folderButton.clicked.connect(self.folder_dialog)
self.openFolderButton.clicked.connect(lambda: open_path(self.csv_config["DESTINATION"]))
def enable_buttons(self):
if self.Address.text():
self.GetBalance.setEnabled(True)
else:
self.GetBalance.setEnabled(False)
if self.Address.text() and self.StartDate.text():
self.CreateCSV.setEnabled(True)
else:
self.CreateCSV.setEnabled(False)
if self.ABAddress.text():
self.addAddress.setEnabled(True)
else:
self.addAddress.setEnabled(False)
def check_for_updates(self):
session = requests_cache.CachedSession(cache_name=os.path.join(user_dir(), "update_cache"), expire_after=3600,
extension=".db")
with session:
response = session.get("https://api.github.com/repos/usertxt/stellar-csv-creator/releases")
response = response.json()
new_version = response[0]["tag_name"].replace("v", "")
if new_version > self.version:
self.mb.message_box("<a href=\"https://github.com/usertxt/stellar-csv-creator/releases/latest\">"
f"<span style=\"text-decoration: underlined; color: {self.link_color}\">Version "
f"{new_version} is available</span></a>", info=True)
else:
self.mb.message_box("You are using the latest release", info=True)
def create_csv(self):
url = f"https://horizon.stellar.org/accounts/{self.Address.text()}/effects?cursor=&limit=100&order=desc"
main_response = requests.get(url).json()
main_response = main_response["_embedded"]["records"]
start_date = self.date_format(self.StartDate.text(), date_object=True)
if self.EndDate.text():
end_date = self.date_format(self.EndDate.text(), date_object=True)
else:
end_date = datetime.utcnow()
start_date_console = self.date_format(start_date, str_object=True)
end_date_console = self.date_format(end_date, str_object=True)
threshold_min = float(self.csv_config["MIN_THRESH"])
threshold_max = float(self.csv_config["MAX_THRESH"])
csv_file = f"{self.csv_config['DESTINATION']}/{self.Address.text()}.csv"
top_row = ("Date", "Action", "Volume", "Symbol", "Source", "Memo")
action = "INCOME"
symbol = "XLM"
source = self.csv_config["SOURCE"]
memo = self.csv_config["MEMO"]
path = self.csv_config["DESTINATION"]
filtered_rows = []
try:
self.console(f"Searching for transactions from {start_date_console} to {end_date_console}<br>"
f"Thresholds are set to {self.csv_config['MIN_THRESH']} (MIN) and "
f"{self.csv_config['MAX_THRESH']} (MAX)<p>", log=True)
for tx in main_response:
created_at = tx["created_at"]
amount = tx.get("amount", 0)
rows = (created_at, action, amount, symbol, source, memo)
dates = datetime.strptime(created_at, "%Y-%m-%dT%H:%M:%SZ").strftime("%Y-%m-%d")
dates_formatted = self.date_format(dates, date_object=True)
if float(amount) > threshold_max or float(amount) < threshold_min:
pass
elif start_date <= dates_formatted <= end_date:
filtered_rows.append(rows)
if filtered_rows:
with open(csv_file, "w", newline="") as file:
writer = csv.writer(file)
writer.writerow(top_row)
self.console(f"Creating CSV", log=True)
self.console(str(top_row), log=True, append=True)
for filtered_tx in filtered_rows:
with open(csv_file, "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(filtered_tx)
self.console(str(filtered_tx), log=True, append=True)
self.console(f"End of transactions from {start_date_console} to {end_date_console}<p>")
self.console(f"Successfully created<br>{self.Address.text()}.csv<br>"
f"in folder <a href='{path}'><font color='{self.link_color}'>{path}</font></a><p>",
log=True)
self.statusbar.showMessage("CSV created", timeout=3000)
else:
self.console("No transactions found<p>", error=True, log=True)
except Exception as e:
e = getattr(e, "message", repr(e))
self.console(e, error=True, log=True)
def get_balance(self):
try:
url = f"https://horizon.stellar.org/accounts/{self.Address.text()}"
response = requests.get(url).json()
balance_response = response["balances"][0]["balance"]
if self.theme == "dark":
text_color = self.link_color
else:
text_color = "black"
self.console(f"<p><font color=\"{text_color}\">Balance: {balance_response} XLM</font><p>")
except Exception as e:
e = getattr(e, "message", repr(e))
self.console("Unable to retrieve balance. Check the accuracy of your address.", error=True, log=True)
self.console(e, error=True, log=True)
def save_settings(self):
try:
if not (self.MinThresh.text().isdigit() and self.MaxThresh.text().isdigit() or
isfloat(self.MinThresh.text()) and isfloat(self.MaxThresh.text())):
self.statusbar.showMessage("Settings not saved", timeout=3000)
self.labelMinThresh.setStyleSheet(f"color: {self.error_color};")
self.labelMaxThresh.setStyleSheet(f"color: {self.error_color};")
self.mb.message_box("Minimum Threshold and Maximum Threshold must be numbers only", warning=True)
elif self.MinThresh.text() > self.MaxThresh.text():
self.statusbar.showMessage("Settings not saved", timeout=3000)
self.labelMinThresh.setStyleSheet(f"color: {self.error_color};")
self.labelMaxThresh.setStyleSheet(f"color: {self.error_color};")
self.mb.message_box("Minimum Threshold must be less than Maximum Threshold", warning=True)
else:
self.labelMinThresh.setStyleSheet("")
self.labelMaxThresh.setStyleSheet("")
with open(self.config_path, "w") as updated_config:
if self.radioButtonLightMode.isChecked():
self.app_config["THEME"] = "default"
else:
self.app_config["THEME"] = "dark"
self.csv_config["MIN_THRESH"] = self.MinThresh.text()
self.csv_config["MAX_THRESH"] = self.MaxThresh.text()
self.csv_config["SOURCE"] = self.Source.text()
self.csv_config["MEMO"] = self.Memo.text()
self.csv_config["DESTINATION"] = self.CSVOutputDest.text()
json.dump(self.config, updated_config, indent=2, sort_keys=False, ensure_ascii=True)
self.statusbar.showMessage("Settings saved", timeout=3000)
if self.radioButtonLightMode.isChecked() and self.theme == "dark":
self.mb.theme_change_msgbox()
elif self.radioButtonDarkMode.isChecked() and self.theme == "default":
self.mb.theme_change_msgbox()
except Exception as e:
e = getattr(e, "message", repr(e))
self.statusbar.showMessage("Unable to save settings", timeout=3000)
self.console(e, error=True, log=True)
def console(self, info, error=False, log=False, append=False):
console_append = f"<b>{info}</b>"
if log:
info = re.sub("<[^>]*>", " ", info)
logging.info(info)
if error:
console_append = f"<font color={self.error_color}><b>{info}</b></font>"
if append:
console_append = info
return self.output.append(console_append)
def error_handler(self, exc_type, exc_value, exc_traceback):
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
self.console("CRITICAL ERROR: Check log file for full details", error=True)
def closeEvent(self, event: QtGui.QCloseEvent):
logging.info("App shutting down...")
if __name__ == "__main__":
make_dir(user_dir())
make_dir(os.path.join(user_dir(), "CSV Files"))
log_file = os.path.join(user_dir(), "stellar-csv-creator.log")
logging.basicConfig(filename=log_file, format=f"%(asctime)s:%(levelname)s:%(message)s",
datefmt="%Y-%m-%dT%H:%M:%SZ", level=logging.INFO)
logging.info("App started...")
setup_config()
app = QtWidgets.QApplication(sys.argv)
ui = CSVCreator()
ui.show()
sys.exit(app.exec_())
| 2.15625 | 2 |
scripts/adbconnect.py | acmerobotics/relic-recovery | 32 | 12787846 | <filename>scripts/adbconnect.py
from subprocess import run, Popen, PIPE, DEVNULL
from time import sleep
import sys, os
RC_PACKAGE = 'com.qualcomm.ftcrobotcontroller'
RC_ACTIVITY = 'org.firstinspires.ftc.robotcontroller.internal.FtcRobotControllerActivity'
ADB_PORT = 5555
PROFILE_FILENAME = 'temp.xml'
WIFI_DIRECT_PREFIX = 'DIRECT-xx-'
PROFILE_XML = '''<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{ssid}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>auto</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>WPA2PSK</authentication>
<encryption>AES</encryption>
<useOneX>false</useOneX>
</authEncryption>
<sharedKey>
<keyType>passPhrase</keyType>
<protected>false</protected>
<keyMaterial>{passphrase}</keyMaterial>
</sharedKey>
</security>
</MSM>
<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>'''
def list_devices():
output = run(['adb', 'devices', '-l'], stdout=PIPE).stdout.decode('ascii').split('\r\n')
devices = []
for line in output[1:]:
if line == '':
continue
# TODO add more information about the device
devices.append(line.split()[0])
return devices
def list_wifi_networks():
if os.name == 'nt':
output = run(['netsh', 'wlan', 'show', 'networks'], stdout=PIPE).stdout.decode('ascii').split('\r\n')
networks = []
for line in output:
if line.startswith('SSID'):
networks.append(line.split()[-1])
return networks
else:
print('Failure: wifi network listing not support on {}'.format(os.name))
def connect_to_wifi_network(network, passphrase):
if os.name == 'nt':
# create and add profile for the network
print('Creating profile')
with open(PROFILE_FILENAME, mode='w') as fh:
fh.write(PROFILE_XML.format(ssid=network, passphrase=passphrase))
print('Loading profile')
run(['netsh', 'wlan', 'add', 'profile', 'filename="{}"'.format(PROFILE_FILENAME)], stdout=DEVNULL)
os.remove('temp.xml')
# update the network as necessary
i = 0
while True:
output_lines = run(['netsh', 'wlan', 'show', 'interfaces'], stdout=PIPE).stdout.decode('ascii').split('\r\n')
fields = {(line.split(':')[0].strip()): (line.split(':')[-1].strip()) for line in output_lines if ' :' in line}
if fields['State'] == 'connected' and fields['SSID'] == network:
i += 1
if i >= 5:
print('Connected to {}'.format(fields['SSID']))
return
elif (fields['State'] == 'disconnected') or (fields['State'] == 'connected' and fields['SSID'] != network):
i = 0
run(['netsh', 'wlan', 'connect', network], stdout=DEVNULL)
print('Attempting to connect')
else:
i = 0
sleep(0.25)
else:
print('Failure: wifi network listing not support on {}'.format(os.name))
if __name__ == '__main__':
print('Connecting to device')
while True:
devices = list_devices()
print('Devices:')
for i, device in enumerate(devices):
print('{}: {}'.format(i, device))
device_input = input('Select device: ')
if device_input != '':
os.environ['ANDROID_SERIAL'] = devices[int(device_input)]
break
print('Restarting robot controller')
if run(['adb', 'shell', 'am', 'force-stop', RC_PACKAGE], stdout=DEVNULL).returncode != 0:
print('Failure: unable to restart robot controller')
sys.exit(-1)
if run(['adb', 'shell', 'am', 'start', '-n', '{}/{}'.format(RC_PACKAGE, RC_ACTIVITY)],
stdout=DEVNULL).returncode != 0:
print('Failure: unable to start robot controller')
sys.exit(-1)
print('Scanning logcat for passphrase')
passphrase, wifi_name = None, None
proc = Popen(['adb', 'logcat'], stdout=PIPE, universal_newlines=True, encoding='utf-8')
for line in iter(proc.stdout.readline, ''):
if 'passphrase' in line.lower():
passphrase = line.split()[-1]
if 'device information' in line.lower():
wifi_name = line.split()[-2]
if not (passphrase is None or wifi_name is None):
break
proc.kill()
print('Got WiFi passphrase: {}'.format(passphrase))
print('Got WiFi direct name: {}'.format(wifi_name))
searching = True
while searching:
networks = list_wifi_networks()
for network in networks:
if network[len(WIFI_DIRECT_PREFIX):] == wifi_name:
connect_to_wifi_network(network, passphrase)
searching = False
print('Connecting over wireless ADB')
run(['adb', 'tcpip', str(ADB_PORT)], stdout=DEVNULL)
run(['adb', 'connect', '192.168.49.1:{}'.format(ADB_PORT)], stdout=DEVNULL)
print('You may disconnect the device now')
| 2.5625 | 3 |
tests/test_zscii.py | swilcox/yazm-py | 0 | 12787847 | <reponame>swilcox/yazm-py<filename>tests/test_zscii.py
from zscii import zscii_to_ascii
def test_zscii_to_ascii():
pass
| 1.421875 | 1 |
maxt/members/tests.py | flynnguy/maxt_project | 0 | 12787848 | <reponame>flynnguy/maxt_project<filename>maxt/members/tests.py<gh_stars>0
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from . templatetags.member_filters import active_page_class
from . models import Member
class ModelTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser',
email='<EMAIL>',
password='<PASSWORD>')
self.member = Member.objects.create(user=self.user)
def test_member(self):
self.assertTrue(isinstance(self.member, Member))
self.assertEqual(self.member.__str__(), self.user.username)
class ViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='testuser',
email='<EMAIL>',
password='<PASSWORD>')
self.member = Member.objects.create(user=self.user)
self.inactive_user = User.objects.create_user(username='bad_testuser',
email='<EMAIL>',
is_active=False,
password='<PASSWORD>')
self.inactive_member = Member.objects.create(user=self.inactive_user)
def test_index(self):
url = reverse("index")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('Maxt Member Management', str(resp.content))
def test_userinfo(self):
url = reverse("userinfo", args=["testuser"])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_rfid(self):
url = reverse("rfid")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
class FilterTests(TestCase):
def test_active_page_class(self):
active_page = 'class="active"'
inactive_page = ''
testIO = (
{
'page': "/",
'request_url': "/",
'expected': active_page,
},
{
'page': "/tools/",
'request_url': "/tools/",
'expected': active_page,
},
{
'page': "/user/",
'request_url': "/user/foobar/",
'expected': active_page,
},
{
'page': "/",
'request_url': "/tools/",
'expected': inactive_page,
},
{
'page': "/tools/",
'request_url': "/users/",
'expected': inactive_page,
},
)
for t in testIO:
val = active_page_class(t['request_url'], t['page'])
self.assertIn(t['expected'], val)
| 2.40625 | 2 |
kubernetes_typed/client/models/v1alpha1_policy.py | sobolevn/kubernetes-typed | 22 | 12787849 | <filename>kubernetes_typed/client/models/v1alpha1_policy.py<gh_stars>10-100
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1alpha1PolicyDict generated type."""
from typing import TypedDict, List
V1alpha1PolicyDict = TypedDict(
"V1alpha1PolicyDict",
{
"level": str,
"stages": List[str],
},
total=False,
)
| 1.671875 | 2 |
test/test_paver.py | oneconcern/stompy | 17 | 12787850 | import os
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import matplotlib.pyplot as plt
from stompy.grid import paver
from stompy.spatial.linestring_utils import upsample_linearring,resample_linearring
from stompy.grid import paver
from stompy.spatial import field,constrained_delaunay,wkb2shp
##
from stompy.grid import exact_delaunay
from stompy.grid import live_dt
from stompy.grid import paver
reload(exact_delaunay)
reload(live_dt)
reload(paver)
##
def test_basic():
# Define a polygon
boundary=np.array([[0,0],[1000,0],[1000,1000],[0,1000]])
island =np.array([[200,200],[600,200],[200,600]])
rings=[boundary,island]
# And the scale:
scale=field.ConstantField(50)
p=paver.Paving(rings=rings,density=scale)
p.pave_all()
##
def test_basic_apollo():
# Define a polygon
boundary=np.array([[0,0],[1000,0],[1000,1000],[0,1000]])
island =np.array([[200,200],[600,200],[200,600]])
rings=[boundary,island]
# And the scale:
scale=field.PyApolloniusField()
scale.insert([50,50],20)
p=paver.Paving(rings=rings,density=scale)
p.pave_all()
return p
##
# A circle - r = 100, C=628, n_points = 628
def test_circle():
r = 100
thetas = np.linspace(0,2*np.pi,200)[:-1]
circle = np.zeros((len(thetas),2),np.float64)
circle[:,0] = r*np.cos(thetas)
circle[:,1] = r*np.sin(thetas)
class CircleDensityField(field.Field):
# horizontally varying, from 5 to 20
def value(self,X):
X = np.array(X)
return 5 + 15 * (X[...,0] + 100) / 200.0
density = CircleDensityField()
p=paver.Paving(circle,density,label='circle')
p.pave_all()
def test_long_channel():
l = 2000
w = 50
long_channel = np.array([[0,0],
[l,0],
[l,w],
[0,w]], np.float64 )
density = field.ConstantField( 19.245 )
p=paver.Paving(long_channel,density)
p.pave_all()
def test_long_channel_rigid():
l = 2000
w = 50
long_channel = np.array([[0,0],
[l,0],
[l,w],
[0,w]], np.float64 )
density = field.ConstantField( 19.245 )
p=paver.Paving(long_channel,density,initial_node_status=paver.Paving.RIGID)
p.pave_all()
def test_narrow_channel():
l = 1000
w = 50
long_channel = np.array([[0,0],
[l,0.375*w],
[l,0.625*w],
[0,w]], np.float64 )
density = field.ConstantField( w/np.sin(60*np.pi/180.) / 4 )
p=paver.Paving(long_channel,density)
p.pave_all()
def test_small_island():
l = 100
square = np.array([[0,0],
[l,0],
[l,l],
[0,l]], np.float64 )
r=10
theta = np.linspace(0,2*np.pi,30)
circle = r/np.sqrt(2) * np.swapaxes( np.array([np.cos(theta), np.sin(theta)]), 0,1)
island1 = circle + np.array([45,45])
island2 = circle + np.array([65,65])
island3 = circle + np.array([20,80])
rings = [square,island1,island2,island3]
density = field.ConstantField( 10 )
p=paver.Paving(rings,density)
p.pave_all()
def test_tight_peanut():
r = 100
thetas = np.linspace(0,2*np.pi,300)
peanut = np.zeros( (len(thetas),2), np.float64)
x = r*np.cos(thetas)
y = r*np.sin(thetas) * (0.9/10000 * x*x + 0.05)
peanut[:,0] = x
peanut[:,1] = y
density = field.ConstantField( 6.0 )
p=paver.Paving(peanut,density,label='tight_peanut')
p.pave_all()
def test_tight_with_island():
# build a peanut first:
r = 100
thetas = np.linspace(0,2*np.pi,250)
peanut = np.zeros( (len(thetas),2), np.float64)
x = r*np.cos(thetas)
y = r*np.sin(thetas) * (0.9/10000 * x*x + 0.05)
peanut[:,0] = x
peanut[:,1] = y
# put two holes into it
thetas = np.linspace(0,2*np.pi,30)
hole1 = np.zeros( (len(thetas),2), np.float64)
hole1[:,0] = 10*np.cos(thetas) - 75
hole1[:,1] = 10*np.sin(thetas)
hole2 = np.zeros( (len(thetas),2), np.float64)
hole2[:,0] = 20*np.cos(thetas) + 75
hole2[:,1] = 20*np.sin(thetas)
rings = [peanut,hole1,hole2]
density = field.ConstantField( 6.0 )
p=paver.Paving(rings,density,label='tight_with_island')
p.pave_all()
def test_peninsula():
r = 100
thetas = np.linspace(0,2*np.pi,1000)
pen = np.zeros( (len(thetas),2), np.float64)
pen[:,0] = r*(0.2+ np.abs(np.sin(2*thetas))**0.2)*np.cos(thetas)
pen[:,1] = r*(0.2+ np.abs(np.sin(2*thetas))**0.2)*np.sin(thetas)
density = field.ConstantField( 10.0 )
pen2 = upsample_linearring(pen,density)
p=paver.Paving(pen2,density,label='peninsula')
p.pave_all()
def test_peanut():
# like a figure 8, or a peanut
r = 100
thetas = np.linspace(0,2*np.pi,1000)
peanut = np.zeros( (len(thetas),2), np.float64)
peanut[:,0] = r*(0.5+0.3*np.cos(2*thetas))*np.cos(thetas)
peanut[:,1] = r*(0.5+0.3*np.cos(2*thetas))*np.sin(thetas)
min_pnt = peanut.min(axis=0)
max_pnt = peanut.max(axis=0)
d_data = np.array([ [min_pnt[0],min_pnt[1], 1.5],
[min_pnt[0],max_pnt[1], 1.5],
[max_pnt[0],min_pnt[1], 8],
[max_pnt[0],max_pnt[1], 8]])
density = field.XYZField(X=d_data[:,:2],F=d_data[:,2])
p=paver.Paving(peanut,density)
p.pave_all()
def test_cul_de_sac():
r=5
theta = np.linspace(-np.pi/2,np.pi/2,20)
cap = r * np.swapaxes( np.array([np.cos(theta), np.sin(theta)]), 0,1)
box = np.array([ [-3*r,r],
[-4*r,-r] ])
ring = np.concatenate((box,cap))
density = field.ConstantField(2*r/(np.sqrt(3)/2))
p=paver.Paving(ring,density,label='cul_de_sac')
p.pave_all()
def test_bow():
x = np.linspace(-100,100,50)
# with /1000 it seems to do okay
# with /500 it still looks okay
y = x**2 / 250.0
bow = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
height = np.array([0,20])
ring = np.concatenate( (bow+height,bow[::-1]-height) )
density = field.ConstantField(2)
p=paver.Paving(ring,density,label='bow')
p.pave_all()
def test_ngon(nsides=7):
# hexagon works ok, though a bit of perturbation
# septagon starts to show expansion issues, but never pronounced
# octagon - works fine.
theta = np.linspace(0,2*np.pi,nsides+1)[:-1]
r=100
x = r*np.cos(theta)
y = r*np.sin(theta)
poly = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
density = field.ConstantField(6)
p=paver.Paving(poly,density,label='ngon%02d'%nsides)
p.pave_all()
def test_expansion():
# 40: too close to a 120deg angle - always bisect on centerline
# 30: rows alternate with wall and bisect seams
# 35: starts to diverge, but recovers.
# 37: too close to 120.
d = 36
pnts = np.array([[0.,0.],
[100,-d],
[200,0],
[200,100],
[100,100+d],
[0,100]])
density = field.ConstantField(6)
p=paver.Paving([pnts],density,label='expansion')
p.pave_all()
def test_embedded_channel():
# trying out degenerate internal lines - the trick may be mostly in
# how to specify them.
# make a large rectangle, with a sinuous channel in the middle
L = 500.0
W = 300.0
rect = np.array([[0,0],
[L,0],
[L,W],
[0,W]])
x = np.linspace(0.1*L,0.9*L,50)
y = W/2 + 0.1*W*np.cos(4*np.pi*x/L)
shore = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
density = field.ConstantField(10)
# this will probably get moved into Paver itself.
# Note closed_ring=0 !
shore = resample_linearring(shore,density,closed_ring=0)
south_shore = shore - np.array([0,0.1*W])
north_shore = shore + np.array([0,0.1*W])
p=paver.Paving([rect],density,degenerates=[north_shore,south_shore])
p.pave_all()
# dumbarton...
def test_dumbarton():
shp=os.path.join( os.path.dirname(__file__), 'data','dumbarton.shp')
features=wkb2shp.shp2geom(shp)
geom = features['geom'][0]
dumbarton = np.array(geom.exterior)
density = field.ConstantField(250.0)
p=paver.Paving(dumbarton, density,label='dumbarton')
p.pave_all()
# #def log_spiral_channel():
# t = linspace(1.0,12*pi,200)
# a = 1 ; b = 0.1
# x = a*exp(b*t)*cos(t)
# y = a*exp(b*t)*sin(t)
# # each 2*pi, the radius gets bigger by exp(2pi*b)
# x2 = a*exp(b*t-b*pi)*cos(t)
# y2 = a*exp(b*t-b*pi)*sin(t)
# cla(); plot(x,y,'b',x2,y2,'r')
##
# This is going to require a fair bit of porting --
# hmm - maybe better just to have a sinusoid channel, then perturb it
# and put some islands in there. having a wide range of scales looks
# nice but isn't going to be a great test.
def gen_sine_sine():
t = np.linspace(1.0,12*np.pi,400)
x1 = 100*t
y1 = 200*np.sin(t)
# each 2*pi, the radius gets bigger by exp(2pi*b)
x2 = x1
y2 = y1+50
# now perturb both sides, but keep amplitude < 20
y1 = y1 + 20*np.sin(10*t)
y2 = y2 + 10*np.cos(5*t)
x = np.concatenate( (x1,x2[::-1]) )
y = np.concatenate( (y1,y2[::-1]) )
shore = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
rings = [shore]
# and make some islands:
north_island_shore = 0.4*y1 + 0.6*y2
south_island_shore = 0.6*y1 + 0.4*y2
Nislands = 20
# islands same length as space between islands, so divide
# island shorelines into 2*Nislands blocks
for i in range(Nislands):
i_start = int( (2*i+0.5)*len(t)/(2*Nislands) )
i_stop = int( (2*i+1.5)*len(t)/(2*Nislands) )
north_y = north_island_shore[i_start:i_stop]
south_y = south_island_shore[i_start:i_stop]
north_x = x1[i_start:i_stop]
south_x = x2[i_start:i_stop]
x = np.concatenate( (north_x,south_x[::-1]) )
y = np.concatenate( (north_y,south_y[::-1]) )
island = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
rings.append(island)
density = field.ConstantField(25.0)
min_density = field.ConstantField(2.0)
p = paver.Paving(rings,density=density,min_density=min_density)
print("Smoothing to nominal 1.0m")
# mostly just to make sure that long segments are
# sampled well relative to the local feature scale.
p.smooth()
print("Adjusting other densities to local feature size")
p.telescope_rate=1.1
p.adjust_density_by_apollonius()
return p
def test_sine_sine():
p=gen_sine_sine()
p.pave_all()
if 0:
# debugging the issue with sine_sine()
# fails deep inside here, step 512
# lots of crap coming from this one, too.
# at some point, dt_incident_constraints reports only 1 constraint,
# but it should have two, which happens because of a bad slide.
# Tricky to guard against -
# Several avenues to fix this:
# 1. Make the resample_neighbors code (which I'm pretty sure is the culprit)
# more cautious and willing to accept a local maximum in distance instead
# of shoving a node far away. This is a nice but incomplete solution.
# 2. The resample code, which I think is responsible for adding the new node
# that screwed it all up, should check for self-intersections
# this is probably the appropriate thing to do.
# test_sine_sine()
p=gen_sine_sine()
p.pave_all(n_steps=512)
##
p.verbose=3
p.pave_all(n_steps=513)
##
zoom=plt.axis()
plt.figure(1).clf()
p.plot()
p.plot_boundary()
plt.axis('equal')
plt.axis(zoom)
##
# Step 510 really takes the end off an island
# yep.
p.pave_all(n_steps=512)
##
# node is 3626
# to_remove: an edge with nodes 5374, 3626
# pnt2edges: [3626, 5915]
# part of the problem is that there is some sliding around
# at the beginning of step 512 that really wreaks havoc on
# what was already a dicey node.
p.plot_nodes([3626,5374])
| 2.15625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.