text
stringlengths 4
1.02M
| meta
dict |
---|---|
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightC()
print """/* DO NOT EDIT! THIS CODE IS AUTOGENERATED BY unpack.py */
#include "unpacker.h"
#include "cr_opcodes.h"
#include "cr_error.h"
#include "cr_mem.h"
#include "cr_spu.h"
#include "unpack_extend.h"
#include <stdio.h>
#include <memory.h>
#include <iprt/cdefs.h>
DECLEXPORT(const unsigned char *) cr_unpackData = NULL;
SPUDispatchTable cr_unpackDispatch;
static void crUnpackExtend(void);
static void crUnpackExtendDbg(void);
#if 0 //def DEBUG_misha
//# define CR_UNPACK_DEBUG_OPCODES
# define CR_UNPACK_DEBUG_LAST_OPCODES
# define CR_UNPACK_DEBUG_PREV_OPCODES
#endif
#ifdef CR_UNPACK_DEBUG_PREV_OPCODES
static GLenum g_VBoxDbgCrPrevOpcode = 0;
static GLenum g_VBoxDbgCrPrevExtendOpcode = 0;
#endif
"""
nodebug_opcodes = [
"CR_MULTITEXCOORD2FARB_OPCODE",
"CR_VERTEX3F_OPCODE",
"CR_NORMAL3F_OPCODE",
"CR_COLOR4UB_OPCODE",
"CR_LOADIDENTITY_OPCODE",
"CR_MATRIXMODE_OPCODE",
"CR_LOADMATRIXF_OPCODE",
"CR_DISABLE_OPCODE",
"CR_COLOR4F_OPCODE",
"CR_ENABLE_OPCODE",
"CR_BEGIN_OPCODE",
"CR_END_OPCODE",
"CR_SECONDARYCOLOR3FEXT_OPCODE"
]
nodebug_extopcodes = [
"CR_ACTIVETEXTUREARB_EXTEND_OPCODE"
]
#
# Useful functions
#
def ReadData( offset, arg_type ):
"""Emit a READ_DOUBLE or READ_DATA call for pulling a GL function
argument out of the buffer's operand area."""
if arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "READ_DOUBLE( %d )" % offset
else:
retval = "READ_DATA( %d, %s )" % (offset, arg_type)
return retval
def FindReturnPointer( return_type, params ):
"""For GL functions that return values (either as the return value or
through a pointer parameter) emit a SET_RETURN_PTR call."""
arg_len = apiutil.PacketLength( params )
if (return_type != 'void'):
print '\tSET_RETURN_PTR( %d );' % (arg_len + 8) # extended opcode plus packet length
else:
paramList = [ ('foo', 'void *', 0) ]
print '\tSET_RETURN_PTR( %d );' % (arg_len + 8 - apiutil.PacketLength(paramList))
def FindWritebackPointer( return_type, params ):
"""Emit a SET_WRITEBACK_PTR call."""
arg_len = apiutil.PacketLength( params )
if return_type != 'void':
paramList = [ ('foo', 'void *', 0) ]
arg_len += apiutil.PacketLength( paramList )
print '\tSET_WRITEBACK_PTR( %d );' % (arg_len + 8) # extended opcode plus packet length
def MakeNormalCall( return_type, func_name, params, counter_init = 0 ):
counter = counter_init
copy_of_params = params[:]
for i in range( 0, len(params) ):
(name, type, vecSize) = params[i]
if apiutil.IsPointer(copy_of_params[i][1]):
params[i] = ('NULL', type, vecSize)
copy_of_params[i] = (copy_of_params[i][0], 'void', 0)
if not "get" in apiutil.Properties(func_name):
print '\tcrError( "%s needs to be special cased!" );' % func_name
else:
print "\t%s %s = %s;" % ( copy_of_params[i][1], name, ReadData( counter, copy_of_params[i][1] ) )
counter += apiutil.sizeof(copy_of_params[i][1])
if ("get" in apiutil.Properties(func_name)):
FindReturnPointer( return_type, params )
FindWritebackPointer( return_type, params )
if return_type != "void":
print "\t(void)",
else:
print "\t",
print "cr_unpackDispatch.%s( %s );" % (func_name, apiutil.MakeCallString(params))
def MakeVectorCall( return_type, func_name, arg_type ):
"""Convert a call like glVertex3f to glVertex3fv."""
vec_func = apiutil.VectorFunction(func_name)
params = apiutil.Parameters(vec_func)
assert len(params) == 1
(arg_name, vecType, vecSize) = params[0]
if arg_type == "GLdouble" or arg_type == "GLclampd":
print "#ifdef CR_UNALIGNED_ACCESS_OKAY"
print "\tcr_unpackDispatch.%s((%s) cr_unpackData);" % (vec_func, vecType)
print "#else"
for index in range(0, vecSize):
print "\tGLdouble v" + `index` + " = READ_DOUBLE(", `index * 8`, ");"
if return_type != "void":
print "\t(void) cr_unpackDispatch.%s(" % func_name,
else:
print "\tcr_unpackDispatch.%s(" % func_name,
for index in range(0, vecSize):
print "v" + `index`,
if index != vecSize - 1:
print ",",
print ");"
print "#endif"
else:
print "\tcr_unpackDispatch.%s((%s) cr_unpackData);" % (vec_func, vecType)
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
#
# Generate unpack functions for all the simple functions.
#
for func_name in keys:
if (not "pack" in apiutil.ChromiumProps(func_name) or
apiutil.FindSpecial( "unpacker", func_name )):
continue
params = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
print "static void crUnpack%s(void)" % func_name
print "{"
vector_func = apiutil.VectorFunction(func_name)
if (vector_func and len(apiutil.Parameters(vector_func)) == 1):
MakeVectorCall( return_type, func_name, params[0][1] )
else:
MakeNormalCall( return_type, func_name, params )
packet_length = apiutil.PacketLength( params )
if packet_length == 0:
print "\tINCR_DATA_PTR_NO_ARGS( );"
else:
print "\tINCR_DATA_PTR( %d );" % packet_length
print "}\n"
#
# Emit some code
#
print """
typedef struct __dispatchNode {
const unsigned char *unpackData;
struct __dispatchNode *next;
} DispatchNode;
static DispatchNode *unpackStack = NULL;
static SPUDispatchTable *cr_lastDispatch = NULL;
void crUnpackPush(void)
{
DispatchNode *node = (DispatchNode*)crAlloc( sizeof( *node ) );
node->next = unpackStack;
unpackStack = node;
node->unpackData = cr_unpackData;
}
void crUnpackPop(void)
{
DispatchNode *node = unpackStack;
if (!node)
{
crError( "crUnpackPop called with an empty stack!" );
}
unpackStack = node->next;
cr_unpackData = node->unpackData;
crFree( node );
}
CR_UNPACK_BUFFER_TYPE crUnpackGetBufferType(const void *opcodes, unsigned int num_opcodes)
{
const uint8_t *pu8Codes = (const uint8_t *)opcodes;
uint8_t first;
uint8_t last;
if (!num_opcodes)
return CR_UNPACK_BUFFER_TYPE_GENERIC;
first = pu8Codes[0];
last = pu8Codes[1-(int)num_opcodes];
switch (last)
{
case CR_CMDBLOCKFLUSH_OPCODE:
return CR_UNPACK_BUFFER_TYPE_CMDBLOCK_FLUSH;
case CR_CMDBLOCKEND_OPCODE:
return (first == CR_CMDBLOCKBEGIN_OPCODE) ? CR_UNPACK_BUFFER_TYPE_GENERIC : CR_UNPACK_BUFFER_TYPE_CMDBLOCK_END;
default:
return (first != CR_CMDBLOCKBEGIN_OPCODE) ? CR_UNPACK_BUFFER_TYPE_GENERIC : CR_UNPACK_BUFFER_TYPE_CMDBLOCK_BEGIN;
}
}
void crUnpack( const void *data, const void *opcodes,
unsigned int num_opcodes, SPUDispatchTable *table )
{
unsigned int i;
const unsigned char *unpack_opcodes;
if (table != cr_lastDispatch)
{
crSPUCopyDispatchTable( &cr_unpackDispatch, table );
cr_lastDispatch = table;
}
unpack_opcodes = (const unsigned char *)opcodes;
cr_unpackData = (const unsigned char *)data;
#if defined(CR_UNPACK_DEBUG_OPCODES) || defined(CR_UNPACK_DEBUG_LAST_OPCODES)
crDebug("crUnpack: %d opcodes", num_opcodes);
#endif
for (i = 0 ; i < num_opcodes ; i++)
{
CRDBGPTR_CHECKZ(writeback_ptr);
CRDBGPTR_CHECKZ(return_ptr);
/*crDebug(\"Unpacking opcode \%d\", *unpack_opcodes);*/
#ifdef CR_UNPACK_DEBUG_PREV_OPCODES
g_VBoxDbgCrPrevOpcode = *unpack_opcodes;
#endif
switch( *unpack_opcodes )
{"""
#
# Emit switch cases for all unextended opcodes
#
for func_name in keys:
if "pack" in apiutil.ChromiumProps(func_name):
print '\t\t\tcase %s:' % apiutil.OpcodeName( func_name )
if not apiutil.OpcodeName(func_name) in nodebug_opcodes:
print """
#ifdef CR_UNPACK_DEBUG_LAST_OPCODES
if (i==(num_opcodes-1))
#endif
#if defined(CR_UNPACK_DEBUG_OPCODES) || defined(CR_UNPACK_DEBUG_LAST_OPCODES)
crDebug("Unpack: %s");
#endif """ % apiutil.OpcodeName(func_name)
print '\t\t\t\tcrUnpack%s(); \n\t\t\t\tbreak;' % func_name
print """
case CR_EXTEND_OPCODE:
#ifdef CR_UNPACK_DEBUG_OPCODES
crUnpackExtendDbg();
#else
# ifdef CR_UNPACK_DEBUG_LAST_OPCODES
if (i==(num_opcodes-1)) crUnpackExtendDbg();
else
# endif
crUnpackExtend();
#endif
break;
case CR_CMDBLOCKBEGIN_OPCODE:
case CR_CMDBLOCKEND_OPCODE:
case CR_CMDBLOCKFLUSH_OPCODE:
case CR_NOP_OPCODE:
INCR_DATA_PTR_NO_ARGS( );
break;
default:
crError( "Unknown opcode: %d", *unpack_opcodes );
break;
}
CRDBGPTR_CHECKZ(writeback_ptr);
CRDBGPTR_CHECKZ(return_ptr);
unpack_opcodes--;
}
}"""
#
# Emit unpack functions for extended opcodes, non-special functions only.
#
for func_name in keys:
if ("extpack" in apiutil.ChromiumProps(func_name)
and not apiutil.FindSpecial("unpacker", func_name)):
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print 'static void crUnpackExtend%s(void)' % func_name
print '{'
MakeNormalCall( return_type, func_name, params, 8 )
print '}\n'
print 'static void crUnpackExtend(void)'
print '{'
print '\tGLenum extend_opcode = %s;' % ReadData( 4, 'GLenum' );
print ''
print '#ifdef CR_UNPACK_DEBUG_PREV_OPCODES'
print '\tg_VBoxDbgCrPrevExtendOpcode = extend_opcode;'
print '#endif'
print ''
print '\t/*crDebug(\"Unpacking extended opcode \%d", extend_opcode);*/'
print '\tswitch( extend_opcode )'
print '\t{'
#
# Emit switch statement for extended opcodes
#
for func_name in keys:
if "extpack" in apiutil.ChromiumProps(func_name):
print '\t\tcase %s:' % apiutil.ExtendedOpcodeName( func_name )
# print '\t\t\t\tcrDebug("Unpack: %s");' % apiutil.ExtendedOpcodeName( func_name )
print '\t\t\tcrUnpackExtend%s( );' % func_name
print '\t\t\tbreak;'
print """ default:
crError( "Unknown extended opcode: %d", (int) extend_opcode );
break;
}
INCR_VAR_PTR();
}"""
print 'static void crUnpackExtendDbg(void)'
print '{'
print '\tGLenum extend_opcode = %s;' % ReadData( 4, 'GLenum' );
print ''
print '#ifdef CR_UNPACK_DEBUG_PREV_OPCODES'
print '\tg_VBoxDbgCrPrevExtendOpcode = extend_opcode;'
print '#endif'
print ''
print '\t/*crDebug(\"Unpacking extended opcode \%d", extend_opcode);*/'
print '\tswitch( extend_opcode )'
print '\t{'
#
# Emit switch statement for extended opcodes
#
for func_name in keys:
if "extpack" in apiutil.ChromiumProps(func_name):
print '\t\tcase %s:' % apiutil.ExtendedOpcodeName( func_name )
if not apiutil.ExtendedOpcodeName(func_name) in nodebug_extopcodes:
print '\t\t\tcrDebug("Unpack: %s");' % apiutil.ExtendedOpcodeName( func_name )
print '\t\t\tcrUnpackExtend%s( );' % func_name
print '\t\t\tbreak;'
print """ default:
crError( "Unknown extended opcode: %d", (int) extend_opcode );
break;
}
INCR_VAR_PTR();
}"""
| {
"content_hash": "698cccca1e19167d67b05602f789d14d",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 125,
"avg_line_length": 30.020460358056265,
"alnum_prop": 0.6164593627534504,
"repo_name": "egraba/vbox_openbsd",
"id": "7329bb29905f0ad16101234046628bf999cebbec",
"size": "11738",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "VirtualBox-5.0.0/src/VBox/HostServices/SharedOpenGL/unpacker/unpack.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "88714"
},
{
"name": "Assembly",
"bytes": "4303680"
},
{
"name": "AutoIt",
"bytes": "2187"
},
{
"name": "Batchfile",
"bytes": "95534"
},
{
"name": "C",
"bytes": "192632221"
},
{
"name": "C#",
"bytes": "64255"
},
{
"name": "C++",
"bytes": "83842667"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "6041"
},
{
"name": "CSS",
"bytes": "26756"
},
{
"name": "D",
"bytes": "41844"
},
{
"name": "DIGITAL Command Language",
"bytes": "56579"
},
{
"name": "DTrace",
"bytes": "1466646"
},
{
"name": "GAP",
"bytes": "350327"
},
{
"name": "Groff",
"bytes": "298540"
},
{
"name": "HTML",
"bytes": "467691"
},
{
"name": "IDL",
"bytes": "106734"
},
{
"name": "Java",
"bytes": "261605"
},
{
"name": "JavaScript",
"bytes": "80927"
},
{
"name": "Lex",
"bytes": "25122"
},
{
"name": "Logos",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "426902"
},
{
"name": "Module Management System",
"bytes": "2707"
},
{
"name": "NSIS",
"bytes": "177212"
},
{
"name": "Objective-C",
"bytes": "5619792"
},
{
"name": "Objective-C++",
"bytes": "81554"
},
{
"name": "PHP",
"bytes": "58585"
},
{
"name": "Pascal",
"bytes": "69941"
},
{
"name": "Perl",
"bytes": "240063"
},
{
"name": "PowerShell",
"bytes": "10664"
},
{
"name": "Python",
"bytes": "9094160"
},
{
"name": "QMake",
"bytes": "3055"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1460572"
},
{
"name": "SourcePawn",
"bytes": "4139"
},
{
"name": "TypeScript",
"bytes": "142342"
},
{
"name": "Visual Basic",
"bytes": "7161"
},
{
"name": "XSLT",
"bytes": "1034475"
},
{
"name": "Yacc",
"bytes": "22312"
}
],
"symlink_target": ""
} |
import unittest
import logging
import emission.analysis.modelling.trip_model.model_storage as eamums
import emission.analysis.modelling.trip_model.model_type as eamumt
import emission.analysis.modelling.trip_model.run_model as eamur
import emission.storage.timeseries.abstract_timeseries as esta
import emission.tests.modellingTests.modellingTestAssets as etmm
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.core.get_database as edb
import emission.storage.pipeline_queries as epq
import emission.core.wrapper.pipelinestate as ecwp
class TestRunGreedyModel(unittest.TestCase):
"""these tests were copied forward during a refactor of the tour model
[https://github.com/e-mission/e-mission-server/blob/10772f892385d44e11e51e796b0780d8f6609a2c/emission/analysis/modelling/tour_model_first_only/load_predict.py#L114]
it's uncertain what condition they are in besides having been refactored to
use the more recent tour modeling code.
"""
def setUp(self):
"""
sets up the end-to-end run model test with Confirmedtrip data
"""
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG)
# configuration for randomly-generated test data
self.user_id = user_id = 'TestRunGreedyModel-TestData'
self.origin = (-105.1705977, 39.7402654,)
self.destination = (-105.1755606, 39.7673075)
self.min_trips = 14
self.total_trips = 100
self.clustered_trips = 33 # bins must have at least self.min_trips similar trips by default
self.has_label_percent = 0.9 # let's make a few that don't have a label, but invariant
# $clustered_trips * $has_label_percent > self.min_trips
# must be correct or else this test could fail under some random test cases.
# for a negative test, below
self.unused_user_id = 'asdjfkl;asdfjkl;asd08234ur13fi4jhf2103mkl'
# test data can be saved between test invocations, check if data exists before generating
ts = esta.TimeSeries.get_time_series(user_id)
test_data = list(ts.find_entries(["analysis/confirmed_trip"]))
if len(test_data) == 0:
# generate test data for the database
logging.debug(f"inserting mock Confirmedtrips into database")
# generate labels with a known sample weight that we can rely on in the test
label_data = {
"mode_confirm": ['ebike', 'bike'],
"purpose_confirm": ['happy-hour', 'dog-park'],
"replaced_mode": ['walk'],
"mode_weights": [0.9, 0.1],
"purpose_weights": [0.1, 0.9]
}
train = etmm.generate_mock_trips(
user_id=user_id,
trips=self.total_trips,
origin=self.origin,
destination=self.destination,
label_data=label_data,
within_threshold=self.clustered_trips,
threshold=0.004, # ~400m
has_label_p=self.has_label_percent
)
ts.bulk_insert(train)
# confirm data write did not fail
test_data = esda.get_entries(key="analysis/confirmed_trip", user_id=user_id, time_query=None)
if len(test_data) != self.total_trips:
logging.debug(f'test invariant failed after generating test data')
self.fail()
else:
logging.debug(f'found {self.total_trips} trips in database')
def tearDown(self):
"""
clean up database
"""
edb.get_analysis_timeseries_db().delete_many({'user_id': self.user_id})
edb.get_model_db().delete_many({'user_id': self.user_id})
edb.get_pipeline_state_db().delete_many({'user_id': self.user_id})
def testBuildGreedyModelFromConfig(self):
"""
greedy model takes config arguments via the constructor for testing
purposes but will load from a file in /conf/analysis/ which is tested here
"""
eamumt.ModelType.GREEDY_SIMILARITY_BINNING.build()
# success if it didn't throw
def testTrainGreedyModelWithZeroTrips(self):
"""
greedy model takes config arguments via the constructor for testing
purposes but will load from a file in /conf/analysis/ which is tested here
"""
# pass along debug model configuration
greedy_model_config = {
"metric": "od_similarity",
"similarity_threshold_meters": 500,
"apply_cutoff": False,
"incremental_evaluation": False
}
logging.debug(f'~~~~ do nothing ~~~~')
eamur.update_trip_model(
user_id=self.unused_user_id,
model_type=eamumt.ModelType.GREEDY_SIMILARITY_BINNING,
model_storage=eamums.ModelStorage.DOCUMENT_DATABASE,
min_trips=self.min_trips,
model_config=greedy_model_config
)
# user had no entries so their pipeline state should not have been set
# if it was set, the time query here would
stage = ecwp.PipelineStages.TRIP_MODEL
pipeline_state = epq.get_current_state(self.unused_user_id, stage)
self.assertIsNone(
pipeline_state['curr_run_ts'],
"pipeline should not have a current timestamp for the test user")
def test1RoundTripGreedySimilarityBinning(self):
"""
train a model, save it, load it, and use it for prediction, using
the high-level training/testing API provided via
run_model.py:update_trip_model() # train
run_model.py:predict_labels_with_n() # test
for clustering, use the default greedy similarity binning model
"""
# pass along debug model configuration
greedy_model_config = {
"metric": "od_similarity",
"similarity_threshold_meters": 500,
"apply_cutoff": False,
"incremental_evaluation": False
}
logging.debug(f'(TRAIN) creating a model based on trips in database')
eamur.update_trip_model(
user_id=self.user_id,
model_type=eamumt.ModelType.GREEDY_SIMILARITY_BINNING,
model_storage=eamums.ModelStorage.DOCUMENT_DATABASE,
min_trips=self.min_trips,
model_config=greedy_model_config
)
logging.debug(f'(TEST) testing prediction of stored model')
test = etmm.build_mock_trip(
user_id=self.user_id,
origin=self.origin,
destination=self.destination
)
prediction, n = eamur.predict_labels_with_n(
trip = test,
model_type=eamumt.ModelType.GREEDY_SIMILARITY_BINNING,
model_storage=eamums.ModelStorage.DOCUMENT_DATABASE,
model_config=greedy_model_config
)
[logging.debug(p) for p in sorted(prediction, key=lambda r: r['p'], reverse=True)]
self.assertNotEqual(len(prediction), 0, "should have a prediction")
| {
"content_hash": "b5f695d5950e0c11b04a32d5bdfad3b4",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 168,
"avg_line_length": 41.52601156069364,
"alnum_prop": 0.6227728285077951,
"repo_name": "e-mission/e-mission-server",
"id": "10f221909503534fbafd3e2c868f75449d1302fe",
"size": "7184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "emission/tests/modellingTests/TestRunGreedyModel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "97039"
},
{
"name": "Dockerfile",
"bytes": "1326"
},
{
"name": "HTML",
"bytes": "64875"
},
{
"name": "JavaScript",
"bytes": "116761"
},
{
"name": "Jupyter Notebook",
"bytes": "4656584"
},
{
"name": "Python",
"bytes": "2219428"
},
{
"name": "SCSS",
"bytes": "41755"
},
{
"name": "Shell",
"bytes": "11419"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from csv import DictReader
from time import time
#field name constants
TIME = "time"
HUMIDITY = "humidity"
PRESSURE = "pressure"
TEMP_HUMIDITY = "temperature (humidity)"
TEMP_PRESSURE = "temperature (pressure)"
ORIENTATION_RAD_PITCH = "orientation radians pitch"
ORIENTATION_RAD_YAW = "orientation radians yaw"
ORIENTATION_RAD_ROLL = "orientation radians roll"
ORIENTATION_DEG_PITCH = "orientation degrees pitch"
ORIENTATION_DEG_YAW = "orientation degrees yaw"
ORIENTATION_DEG_ROLL = "orientation degrees roll"
COMPASS_RAW_X = "compass raw x"
COMPASS_RAW_Y = "compass raw y"
COMPASS_RAW_Z = "compass raw z"
GYRO_RAW_X = "gyroscope raw x"
GYRO_RAW_Y = "gyroscope raw y"
GYRO_RAW_Z = "gyroscope raw z"
ACCEL_RAW_X = "accelerometer raw x"
ACCEL_RAW_Y = "accelerometer raw y"
ACCEL_RAW_Z = "accelerometer raw z"
"""
print("Reading file into OrderedDict")
starttime = time()
dataDict = OrderedDict()
with open("/home/pi/data/data.2015-05-13-19-33-42.csv", "r") as datafile:
reader = DictReader(datafile)
print(reader[0])
for row in reader:
dataDict[row[TIME]] = row
#for time, data in dataDict.items():
# print("{} : {}".format(time, data))
endtime = time()
print("Completed {}".format(endtime - starttime))
"""
print("Reading file into list")
starttime = time()
datalist = []
with open("/home/pi/data/data.2015-05-13-19-33-42.csv", "r") as datafile:
reader = DictReader(datafile)
for row in reader:
datalist.append(row)
print(len(datalist))
#for row in datalist:
# temp = row[TEMP_HUMIDITY]
endtime = time()
print("Completed {}".format(endtime - starttime))
| {
"content_hash": "4276de5a08b9650119b585fc35e9ba5d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 73,
"avg_line_length": 25.71875,
"alnum_prop": 0.7053462940461726,
"repo_name": "martinohanlon/SpaceCRAFT",
"id": "08128b5a2f603940f977d02de6fa9cb7377aaa80",
"size": "1693",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "poc/readindata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "133848"
},
{
"name": "Shell",
"bytes": "949"
}
],
"symlink_target": ""
} |
import os
import unittest
import sys
import glob
from IECore import *
from math import pow
class TGAImageWriterTest(unittest.TestCase):
def __verifyImageRGB( self, imgNew, imgOrig, maxError = 0.002 ):
self.assertEqual( type(imgNew), ImagePrimitive )
if "R" in imgOrig :
self.assert_( "R" in imgNew )
if "G" in imgOrig :
self.assert_( "G" in imgNew )
if "B" in imgOrig :
self.assert_( "B" in imgNew )
if "A" in imgOrig :
self.assert_( "A" in imgNew )
if "Y" in imgOrig :
self.assert_( "Y" in imgNew )
op = ImageDiffOp()
res = op(
imageA = imgNew,
imageB = imgOrig,
maxError = maxError,
skipMissingChannels = True
)
self.failIf( res.value )
def __makeFloatImage( self, dataWindow, displayWindow, withAlpha = False, dataType = FloatVectorData ) :
img = ImagePrimitive( dataWindow, displayWindow )
w = dataWindow.max.x - dataWindow.min.x + 1
h = dataWindow.max.y - dataWindow.min.y + 1
area = w * h
R = dataType( area )
G = dataType( area )
B = dataType( area )
if withAlpha:
A = dataType( area )
offset = 0
for y in range( 0, h ) :
for x in range( 0, w ) :
R[offset] = float(x) / (w - 1)
G[offset] = float(y) / (h - 1)
B[offset] = 0.0
if withAlpha:
A[offset] = 0.5
offset = offset + 1
img["R"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, R )
img["G"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, G )
img["B"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, B )
if withAlpha:
img["A"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, A )
return img
def __makeIntImage( self, dataWindow, displayWindow, dataType = UIntVectorData, maxInt = 2**32-1 ) :
img = ImagePrimitive( dataWindow, displayWindow )
w = dataWindow.max.x - dataWindow.min.x + 1
h = dataWindow.max.y - dataWindow.min.y + 1
area = w * h
R = dataType( area )
G = dataType( area )
B = dataType( area )
offset = 0
for y in range( 0, h ) :
for x in range( 0, w ) :
R[offset] = int( round( maxInt * (float(x) / (w - 1)) ) )
G[offset] = int( round( maxInt * (float(y) / (h - 1)) ) )
B[offset] = 0
offset = offset + 1
img["R"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, R )
img["G"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, G )
img["B"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, B )
return img
def testWrite( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
# TGA channels are 8-bit
rawImage = self.__makeIntImage( dataWindow, displayWindow, dataType = UCharVectorData, maxInt = 2**8-1 )
for dataType in [ FloatVectorData, HalfVectorData, DoubleVectorData ] :
self.setUp()
rawMode = ( dataType != FloatVectorData )
imgOrig = self.__makeFloatImage( dataWindow, displayWindow, dataType = dataType )
w = Writer.create( imgOrig, "test/IECore/data/tgaFiles/output.tga" )
self.assertEqual( type(w), TGAImageWriter )
w['rawChannels'] = rawMode
w.write()
self.assert_( os.path.exists( "test/IECore/data/tgaFiles/output.tga" ) )
# Now we've written the image, verify the rgb
r = Reader.create( "test/IECore/data/tgaFiles/output.tga" )
r['rawChannels'] = rawMode
imgNew = r.read()
if rawMode :
self.assertEqual( type(imgNew['R'].data), UCharVectorData )
self.__verifyImageRGB( rawImage, imgNew )
else :
self.assertEqual( type(imgNew['R'].data), FloatVectorData )
self.__verifyImageRGB( imgOrig, imgNew )
self.tearDown()
for dataType in [ ( UIntVectorData, 2**32-1), (UCharVectorData, 2**8-1 ), (UShortVectorData, 2**16-1 ) ] :
self.setUp()
imgOrig = self.__makeIntImage( dataWindow, displayWindow, dataType = dataType[0], maxInt = dataType[1] )
w = Writer.create( imgOrig, "test/IECore/data/tgaFiles/output.tga" )
self.assertEqual( type(w), TGAImageWriter )
w['rawChannels'] = True
w.write()
self.assert_( os.path.exists( "test/IECore/data/tgaFiles/output.tga" ) )
# Now we've written the image, verify the rgb
r = Reader.create( "test/IECore/data/tgaFiles/output.tga" )
r['rawChannels'] = True
imgNew = r.read()
self.__verifyImageRGB( rawImage, imgNew )
self.tearDown()
def testWriteIncomplete( self ) :
displayWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
dataWindow = displayWindow
imgOrig = self.__makeFloatImage( dataWindow, displayWindow )
# We don't have enough data to fill this dataWindow
imgOrig.dataWindow = Box2i(
V2i( 0, 0 ),
V2i( 199, 199 )
)
self.failIf( imgOrig.arePrimitiveVariablesValid() )
w = Writer.create( imgOrig, "test/IECore/data/tgaFiles/output.tga" )
self.assertEqual( type(w), TGAImageWriter )
self.assertRaises( RuntimeError, w.write )
self.failIf( os.path.exists( "test/IECore/tgaFiles/output.tga" ) )
def testWindowWrite( self ) :
dataWindow = Box2i(
V2i( 0, 0 ),
V2i( 99, 99 )
)
imgOrig = self.__makeFloatImage( dataWindow, dataWindow )
imgOrig.displayWindow = Box2i(
V2i( -20, -20 ),
V2i( 199, 199 )
)
w = Writer.create( imgOrig, "test/IECore/data/tgaFiles/output.tga" )
self.assertEqual( type(w), TGAImageWriter )
w.write()
self.assert_( os.path.exists( "test/IECore/data/tgaFiles/output.tga" ) )
r = Reader.create( "test/IECore/data/tgaFiles/output.tga" )
imgNew = r.read()
r = Reader.create( "test/IECore/data/expectedResults/windowWrite.tga" )
# the test image was originally saved in linear colorspace...
r['colorSpace'] = 'linear'
imgExpected = r.read()
self.__verifyImageRGB( imgNew, imgExpected )
def testOversizeDataWindow( self ) :
r = Reader.create( "test/IECore/data/exrFiles/oversizeDataWindow.exr" )
img = r.read()
w = Writer.create( img, "test/IECore/data/tgaFiles/output.tga" )
self.assertEqual( type(w), TGAImageWriter )
w.write()
r = Reader.create( "test/IECore/data/tgaFiles/output.tga" )
imgNew = r.read()
r = Reader.create( "test/IECore/data/expectedResults/oversizeDataWindow.tga" )
# the test image was originally saved in linear colorspace...
r['colorSpace'] = 'linear'
imgExpected = r.read()
self.__verifyImageRGB( imgNew, imgExpected )
def setUp( self ) :
if os.path.isfile( "test/IECore/data/tgaFiles/output.tga") :
os.remove( "test/IECore/data/tgaFiles/output.tga" )
def tearDown( self ) :
if os.path.isfile( "test/IECore/data/tgaFiles/output.tga") :
os.remove( "test/IECore/data/tgaFiles/output.tga" )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "15eb8c73bf6d53b00507260bade2f327",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 109,
"avg_line_length": 26.437751004016064,
"alnum_prop": 0.663223454352119,
"repo_name": "danieldresser/cortex",
"id": "13327f134f8bf77dd731f36e1c45de676c3ff161",
"size": "8372",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "test/IECore/TGAImageWriterTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "213747"
},
{
"name": "C++",
"bytes": "9413775"
},
{
"name": "COBOL",
"bytes": "5543194"
},
{
"name": "Objective-C",
"bytes": "449498"
},
{
"name": "Perl",
"bytes": "961"
},
{
"name": "Python",
"bytes": "4495696"
},
{
"name": "Slash",
"bytes": "7896"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
import sys
import os
import glob
import time
basepath = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(basepath, 'gen-py.twisted'))
sys.path.insert(0, glob.glob(os.path.join(basepath, '../../lib/py/build/lib.*'))[0])
from ThriftTest import ThriftTest
from ThriftTest.ttypes import Xception, Xtruct
from thrift.transport import TTwisted
from thrift.protocol import TBinaryProtocol
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.internet.protocol import ClientCreator
from zope.interface import implementer
@implementer(ThriftTest.Iface)
class TestHandler:
def __init__(self):
self.onewaysQueue = defer.DeferredQueue()
def testVoid(self):
pass
def testString(self, s):
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testBinary(self, thing):
return thing
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
x = Xception()
x.errorCode = 1001
x.message = s
raise x
elif s == "throw_undeclared":
raise ValueError("foo")
def testOneway(self, seconds):
def fireOneway(t):
self.onewaysQueue.put((t, time.time(), seconds))
reactor.callLater(seconds, fireOneway, time.time())
def testNest(self, thing):
return thing
def testMap(self, thing):
return thing
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
class ThriftTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.handler = TestHandler()
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = reactor.listenTCP(
0, TTwisted.ThriftServerFactory(self.processor, self.pfactory), interface="127.0.0.1")
self.portNo = self.server.getHost().port
self.txclient = yield ClientCreator(reactor,
TTwisted.ThriftClientProtocol,
ThriftTest.Client,
self.pfactory).connectTCP("127.0.0.1", self.portNo)
self.client = self.txclient.client
@defer.inlineCallbacks
def tearDown(self):
yield self.server.stopListening()
self.txclient.transport.loseConnection()
@defer.inlineCallbacks
def testVoid(self):
self.assertEquals((yield self.client.testVoid()), None)
@defer.inlineCallbacks
def testString(self):
self.assertEquals((yield self.client.testString('Python')), 'Python')
@defer.inlineCallbacks
def testByte(self):
self.assertEquals((yield self.client.testByte(63)), 63)
@defer.inlineCallbacks
def testI32(self):
self.assertEquals((yield self.client.testI32(-1)), -1)
self.assertEquals((yield self.client.testI32(0)), 0)
@defer.inlineCallbacks
def testI64(self):
self.assertEquals((yield self.client.testI64(-34359738368)), -34359738368)
@defer.inlineCallbacks
def testDouble(self):
self.assertEquals((yield self.client.testDouble(-5.235098235)), -5.235098235)
# TODO: def testBinary(self) ...
@defer.inlineCallbacks
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEquals(y.string_thing, "Zero")
self.assertEquals(y.byte_thing, 1)
self.assertEquals(y.i32_thing, -3)
self.assertEquals(y.i64_thing, -5)
@defer.inlineCallbacks
def testException(self):
yield self.client.testException('Safe')
try:
yield self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception as x:
self.assertEquals(x.errorCode, 1001)
self.assertEquals(x.message, 'Xception')
try:
yield self.client.testException("throw_undeclared")
self.fail("should have thrown exception")
except Exception: # type is undefined
pass
@defer.inlineCallbacks
def testOneway(self):
yield self.client.testOneway(1)
start, end, seconds = yield self.handler.onewaysQueue.get()
self.assertAlmostEquals(seconds, (end - start), places=1)
| {
"content_hash": "7f341182cd7e141d1899170554be5844",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 98,
"avg_line_length": 28.327485380116958,
"alnum_prop": 0.6275805119735756,
"repo_name": "wfxiang08/thrift",
"id": "886de44d26adae90dea67c643d60bfd8f51e0db3",
"size": "5653",
"binary": false,
"copies": "2",
"ref": "refs/heads/wf/20171009",
"path": "test/py.twisted/test_suite.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "890"
},
{
"name": "ActionScript",
"bytes": "75730"
},
{
"name": "Batchfile",
"bytes": "56120"
},
{
"name": "C",
"bytes": "875206"
},
{
"name": "C#",
"bytes": "767147"
},
{
"name": "C++",
"bytes": "4442262"
},
{
"name": "CMake",
"bytes": "117410"
},
{
"name": "CSS",
"bytes": "1070"
},
{
"name": "D",
"bytes": "645921"
},
{
"name": "Dart",
"bytes": "173228"
},
{
"name": "Emacs Lisp",
"bytes": "5361"
},
{
"name": "Erlang",
"bytes": "318544"
},
{
"name": "Go",
"bytes": "499088"
},
{
"name": "HTML",
"bytes": "23115"
},
{
"name": "Haskell",
"bytes": "122439"
},
{
"name": "Haxe",
"bytes": "310957"
},
{
"name": "Java",
"bytes": "991381"
},
{
"name": "JavaScript",
"bytes": "361646"
},
{
"name": "Lex",
"bytes": "17122"
},
{
"name": "Lua",
"bytes": "81221"
},
{
"name": "M4",
"bytes": "163124"
},
{
"name": "Makefile",
"bytes": "207232"
},
{
"name": "OCaml",
"bytes": "39241"
},
{
"name": "Objective-C",
"bytes": "154877"
},
{
"name": "PHP",
"bytes": "328406"
},
{
"name": "Pascal",
"bytes": "453413"
},
{
"name": "Perl",
"bytes": "128513"
},
{
"name": "Python",
"bytes": "405017"
},
{
"name": "Ruby",
"bytes": "398777"
},
{
"name": "Rust",
"bytes": "323472"
},
{
"name": "Shell",
"bytes": "40206"
},
{
"name": "Smalltalk",
"bytes": "22944"
},
{
"name": "Swift",
"bytes": "28538"
},
{
"name": "Thrift",
"bytes": "359355"
},
{
"name": "Vim script",
"bytes": "2846"
},
{
"name": "Yacc",
"bytes": "27371"
}
],
"symlink_target": ""
} |
from correios_lib.validators import CPF
from voluptuous import Invalid
from unittest import TestCase
class TestCPF(TestCase):
def setUp(self):
self.invalid_cases = ['648.152.363-05111', '000000000000000']
self.valid_cases = ['648.152.363-05', '64815236305']
def test_invalid_cnpjs(self):
for i in self.invalid_cases:
self.assertRaises(Invalid, CPF, i)
def test_valid_cnpjs(self):
for i in self.valid_cases:
self.assertTrue(CPF(i))
| {
"content_hash": "64f127244e57e9741fa0f6b9be933a67",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 69,
"avg_line_length": 28,
"alnum_prop": 0.6567460317460317,
"repo_name": "trocafone/correios-lib",
"id": "22dffdc4ea0b717ec8f3eaa9aa05461aa740a3b3",
"size": "504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/tests/validators/CPF_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99418"
}
],
"symlink_target": ""
} |
import functools
import logging
import jax
import jax.numpy as jnp
import numpy as np
from distla_core.blas.summa import summa
from distla_core.linalg.eigh import purify
from distla_core.linalg.invsqrt import invsqrt
from distla_core.linalg.polar import polar
from distla_core.utils import config
from distla_core.utils import misc
from distla_core.utils import pops
# # # UTILTIES # # #
def _pad_for_distribution(matrix, global_shape):
"""Pads a matrix so that it fits the distla_core distribution pattern."""
g0, g1 = global_shape
d0, d1 = matrix.shape
largest_dimension = max(pops.GRID)
pad0 = misc.distance_to_next_divisor(g0, largest_dimension)
pad1 = misc.distance_to_next_divisor(g1, largest_dimension)
b0 = (g0 + pad0) // pops.HGRID[0]
b1 = (g1 + pad1) // pops.HGRID[1]
result = np.zeros((b0, b1), dtype=matrix.dtype)
result[:d0, :d1] += matrix
return result
@functools.partial(pops.pmap, static_broadcasted_argnums=(2, 3, 4))
def similarity_transform(A, V, transpose, p_sz, precision):
"""Similarity transforms A by V.
Args:
A: The matrix to transform.
V: The transformation.
transpose: If `transpose is False`, return V^H @ A V, if `True`, V @ A V^H.
p_sz: Summa panel size.
precision: Matmul precision.
Returns:
The transformed matrix
"""
if transpose:
AV = summa.summa(A, V.conj(), p_sz, False, True, precision=precision)
return summa.summa(V, AV, p_sz, False, False, precision=precision)
else:
AV = summa.summa(A, V, p_sz, False, False, precision=precision)
return summa.summa(V.conj(), AV, p_sz, True, False, precision=precision)
# # # COMPUTING THE TRUNCATING ISOMETRY # # #
@functools.partial(
pops.pmap, out_axes=(0, None), static_broadcasted_argnums=(1, 2))
def _condition_projector(
overlap_matrix,
overlap_threshold,
condition_polar_kwargs,
):
"""Computes the projector onto the span of eigenvectors of `overlap_matrix`
for which the eigenvalue is above `overlap_threshold`. Returns the projector
and its rank.
"""
# The use here of an eye that extends into the padded region is intentional:
# We want to project out the padding as well, if possible.
eye = pops.eye(overlap_matrix.shape, overlap_matrix.dtype)
overlap_matrix = overlap_matrix - overlap_threshold * eye
U, _, _, _ = polar.polarU(overlap_matrix, **condition_polar_kwargs)
P = (U + eye) / 2
k = pops.trace(P)
return P, k
@functools.partial(pops.pmap, static_broadcasted_argnums=(1, 2, 3, 4, 5, 6))
def _subspace_iter(P, k, k_loc, n_iter, p_sz, precision, subspace_polar_kwargs):
"""Computes the isometry V such that P = V @ V^H. If P is of size D x D and
rank k, then V will be of size D x k_loc where k_loc = k + k_pad, where k_pad
makes sure that V fits the distla_core distribution pattern. The last k_pad
columns of V will be zero.
"""
V = pops.eye((P.shape[0], k_loc), P.dtype)
V = pops.apply_pad(V, k)
for i in range(n_iter):
PV = summa.summa(P, V, p_sz, False, False, precision=precision)
V, _, _, _ = polar.polarU(PV, **subspace_polar_kwargs)
return V
def _condition_isometry(
overlap_matrix,
overlap_threshold,
p_sz,
precision,
condition_polar_kwargs,
subspace_n_iter,
subspace_polar_kwargs,
):
"""Computes the isometry that projects onto the span of eigenvectors of
`overlap_matrix` for which the eigenvalue is above `overlap_threshold`.
Returns the isometry and the dimension of the space that it projects onto.
"""
P, k = _condition_projector(
overlap_matrix,
overlap_threshold,
condition_polar_kwargs,
)
k = int(np.round(k))
largest_dimension = max(pops.GRID)
k_pad = misc.distance_to_next_divisor(k, largest_dimension)
k_loc = (k + k_pad) // pops.NCOLS
V = _subspace_iter(
P,
k,
k_loc,
subspace_n_iter,
p_sz,
precision,
subspace_polar_kwargs,
)
return V, k
# # # COMPUTING THE TRUNCATED INVERSE SQUARE ROOT # # #
def _set_padded_diagonal(M, k):
"""For a D x D matrix `M`, that is assumed to only be nonzero in `M[:k, :k]`
due to padding, sets the diagonal of `M[k:, k:]` to be ones.
This is needed when inverting a padded matrix, since the padding would create
zero eigenvalues that would make the the inverse blow up.
"""
eye = pops.eye(M.shape, M.dtype)
rows, cols = pops.indices(M.shape)
left_of_k = rows < k
above_k = cols < k
return jnp.where(jnp.logical_or(left_of_k, above_k), x=M, y=eye)
@functools.partial(pops.pmap, static_broadcasted_argnums=(2, 3, 4, 5))
def _overlap_matrix_invsqrt_part2(
overlap_matrix,
V,
k,
p_sz,
precision,
invsqrt_kwargs,
):
"""_overlap_matrix_invsqrt needs to be pmapped in two parts, this is the
second part.
"""
overlap_matrix = _set_padded_diagonal(overlap_matrix, k)
_, om_invsqrt, _, _ = invsqrt.invsqrt(overlap_matrix, **dict(invsqrt_kwargs))
om_invsqrt = pops.apply_pad(om_invsqrt, k)
if V is not None:
om_invsqrt = summa.summa(V, om_invsqrt, p_sz, False, False, precision)
return om_invsqrt
def overlap_matrix_invsqrt(
overlap_matrix,
unpadded_dim,
overlap_threshold=-1,
p_sz=None,
precision=jax.lax.Precision.HIGHEST,
condition_polar_kwargs={},
subspace_n_iter=2,
subspace_polar_kwargs={},
invsqrt_kwargs={},
):
"""Compute the inverse square root of an overlap matrix.
The inverse is regularised by truncating away small eigenvalues. Hence the
resulting inverse square root matrix may not be square, but of size D x k_loc,
where D is the dimension of the original matrix, and k_loc = k + k_pad, with
k_pad making sure that the matrix conforms to the distla_core distribution
pattern.
Args:
overlap_matrix: The overlap matrix, as a numpy array.
overlap_threshold: Eigenvalues of the overlap matrix below this number will
be discarded.
p_sz: Optional; SUMMA panel size. Maximum by default.
precision: Optional; Jax matrix multiplication precision.
`jax.lax.Precision.HIGHEST` by default
condition_polar_kwargs: Optional; A dictionary of keyword arguments to be
passed to `distla_core.linalg.polar.polarU` when computing the projector that
truncates the overlap matrix. `{}` by default.
subspace_n_iter: Optional; Number of subspace iterations when finding the
isometry that truncates the overlap matrix.
subspace_polar_kwargs: Optional; A dictionary of keyword arguments to be
passed to `distla_core.linalg.polar.polarU` when computing the isometry that
truncates the overlap matrix. `{}` by default.
invsqrt_kwargs: Optional; A dictionary of keyword arguments to be
passed to `distla_core.linalg.invsqrt.invsqrt` when computing the inverse
square root of the overlap matrix. `{}` by default.
Returns:
om_invsqrt: Inverse square root of `overlap_matrix`.
k: The unpadded dimension of `om_invsqrt`.
"""
if p_sz is None:
# In practice this is going to get cut down, this choice is essentially
# equivalent to MAXINT.
p_sz = max(overlap_matrix.shape)
if "p_sz" not in condition_polar_kwargs:
condition_polar_kwargs["p_sz"] = p_sz
if "p_sz" not in subspace_polar_kwargs:
subspace_polar_kwargs["p_sz"] = p_sz
if "p_sz" not in invsqrt_kwargs:
invsqrt_kwargs["p_sz"] = p_sz
logging.info("Computing invsqrt(S)")
if overlap_threshold > 0:
V, k = _condition_isometry(
overlap_matrix,
overlap_threshold,
p_sz,
precision,
condition_polar_kwargs,
subspace_n_iter,
subspace_polar_kwargs,
)
overlap_matrix = similarity_transform(
overlap_matrix,
V,
False,
p_sz,
precision,
)
else:
V = None
k = unpadded_dim
om_invsqrt = _overlap_matrix_invsqrt_part2(
overlap_matrix,
V,
k,
p_sz,
precision,
tuple(invsqrt_kwargs.items()),
)
return om_invsqrt, k
# # # MISCELLANEOUS # # #
@functools.partial(
pops.pmap,
static_broadcasted_argnums=(1, 3),
out_axes=(None, None),
in_axes=(0, None, None, None))
def error_checks(rho, p_sz, num_occupied, precision):
"""Returns idempotency error (|rho^2 - rho|_F) and trace error
(|Tr(rho) - num_occupied|) for the transformed density matrix, rho.
"""
rho_squared = summa.summa(rho, rho, p_sz, False, False, precision)
rho_norm = pops.frobnorm(rho)
idempotency_error = pops.frobnorm(rho - rho_squared) / rho_norm
rho_trace = pops.trace(rho)
trace_error = jnp.abs(rho_trace - num_occupied) / num_occupied
return idempotency_error, trace_error
def compute_energy_weighted_density_matrix(
objective_fn,
density_matrix,
p_sz=None,
precision=jax.lax.Precision.HIGHEST,
):
"""Given a (converged) ObjectiveFn and density matrix, computes the
energy-weighted density matrix (EDM).
Simply, Q = D @ H @ D, where Q is the EDM, D is the density matrix, and
H is the ObjectiveFn. The EDM is used to calculate Pulay forces.
Args:
objective_fn: The ObjectiveFn, as a numpy array.
density_matrix: The density matrix, as a numpy array.
p_sz: Optional; SUMMA panel size. 128 by default.
Returns:
en_weighted_density_matrix: The energy-weighted density matrix,
as a numpy array.
"""
if p_sz is None:
# In practice this is going to get cut down, this choice is essentially
# equivalent to MAXINT.
p_sz = max(objective_fn.shape)
logging.info("Computing EDM")
en_weighted_density_matrix = similarity_transform(
objective_fn,
density_matrix,
False,
p_sz,
precision,
)
del objective_fn
del density_matrix
en_weighted_density_matrix.block_until_ready() # For benchmarking
return en_weighted_density_matrix
# # # PURIFICATION # # #
# REDACTED Should this sum be accumulated in efloat57?
@functools.partial(pops.pmap, out_axes=None)
def compute_ebs(objective_fn, density_matrix):
local_sum = jnp.sum(objective_fn.conj() * density_matrix)
return jax.lax.psum(local_sum, axis_name=pops.AXIS_NAME)
def purify_density_matrix(
objective_fn,
om_invsqrt,
k,
num_occupied,
p_sz=None,
precision=jax.lax.Precision.HIGHEST,
canonically_purify_kwargs={},
):
"""Computes the DFT density matrix.
By the density matrix we mean the projector onto the `num_occupied`
eigenvectors with smallest eigenvalues of the generalised eigenvalue problem
H D = e S D, where H and S are the ObjectiveFn and the overlap matrix,
respectively.
Args:
objective_fn: The ObjectiveFn, as a numpy array.
om_invsqrt: The inverse square root of the overlap_matrix, as a distributed
ShardedDeviceArray.
k: The unpadded dimension of `om_invsqrt`.
num_occupied: Number of occupied modes in the density matrix.
p_sz: Optional; SUMMA panel size. Maximum by default.
precision: Optional; Jax matrix multiplication precision.
`jax.lax.Precision.HIGHEST` by default
canonically_purify_kwargs: Optional; A dictionary of keyword arguments to be
passed to `distla_core.linalg.eigh.purify.canonically_purify`. `{}` by
default.
Returns:
density_matrix: Approximation to the density matrix, as a numpy array.
ebs: Electronic band structure energy (Tr[objective_fn @ density_matrix]).
"""
if p_sz is None:
# In practice this is going to get cut down, this choice is essentially
# equivalent to MAXINT.
p_sz = max(objective_fn.shape)
if "p_sz" not in canonically_purify_kwargs:
canonically_purify_kwargs["p_sz"] = p_sz
logging.info("Type casting invsqrt(S)")
om_invsqrt = pops.pmap(lambda x: x.astype(objective_fn.dtype))(om_invsqrt)
om_invsqrt.block_until_ready()
logging.info("Similarity transforming H")
objective_fn = similarity_transform(
objective_fn,
om_invsqrt,
False,
p_sz,
precision,
)
objective_fn.block_until_ready() # For benchmarking
logging.info("Running canonically purify")
# TODO What to do about the fact that since objective_fn is padded,
# it has fake 0 eigenvalues, which might get excited? Shift the unpadded part
# to make it negative definite?
out = purify.canonically_purify(
objective_fn,
num_occupied,
unpadded_dim=k,
**canonically_purify_kwargs,
)
density_matrix, purify_iters, purify_errs = out
del out
idempotency_error, trace_error = error_checks(
density_matrix,
p_sz,
num_occupied,
precision,
)
density_matrix.block_until_ready() # For benchmarking
logging.info(f'idempotency_error = {idempotency_error}')
logging.info(f'trace_error = {trace_error}')
logging.info(f'purify_iters = {purify_iters}')
logging.info(f'purify_errs = {purify_errs}')
logging.info("Computing EBS")
ebs = compute_ebs(objective_fn, density_matrix)
del objective_fn
ebs.block_until_ready() # For benchmarking
logging.info("Similarity transforming the DM")
density_matrix = similarity_transform(
density_matrix,
om_invsqrt,
False,
p_sz,
precision,
)
density_matrix.block_until_ready() # For benchmarking
return density_matrix, ebs
| {
"content_hash": "2106f10e7d8083cf41ac5b05cdb79680",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 83,
"avg_line_length": 32.12439024390244,
"alnum_prop": 0.6831675651051553,
"repo_name": "google/distla_core",
"id": "bb9c963e6607fc1250bab98452486c3bf632e67e",
"size": "13855",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "distla/struc_pack/multi-host/purify_density_matrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1317325"
},
{
"name": "Shell",
"bytes": "5454"
}
],
"symlink_target": ""
} |
"""try to find more bugs in the code using astroid inference capabilities
"""
import collections
import fnmatch
import re
import shlex
import sys
import six
import astroid
import astroid.context
import astroid.arguments
from astroid import exceptions
from astroid import objects
from astroid import bases
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
is_super, check_messages, decorated_with_property,
decorated_with, node_ignores_exception,
is_iterable, is_mapping, supports_membership_test,
is_comprehension, is_inside_abstract_class,
supports_subscript,
safe_infer,
has_known_bases)
from pylint import utils
_ZOPE_DEPRECATED = (
"This option is deprecated. Use generated-members instead."
)
BUILTINS = six.moves.builtins.__name__
STR_FORMAT = "%s.str.format" % BUILTINS
def _unflatten(iterable):
for index, elem in enumerate(iterable):
if (isinstance(elem, collections.Sequence) and
not isinstance(elem, six.string_types)):
for elem in _unflatten(elem):
yield elem
elif elem and not index:
# We're interested only in the first element.
yield elem
def _is_owner_ignored(owner, name, ignored_classes, ignored_modules):
"""Check if the given owner should be ignored
This will verify if the owner's module is in *ignored_modules*
or the owner's module fully qualified name is in *ignored_modules*
or if the *ignored_modules* contains a pattern which catches
the fully qualified name of the module.
Also, similar checks are done for the owner itself, if its name
matches any name from the *ignored_classes* or if its qualified
name can be found in *ignored_classes*.
"""
ignored_modules = set(ignored_modules)
module_name = owner.root().name
module_qname = owner.root().qname()
if any(module_name in ignored_modules or
module_qname in ignored_modules or
fnmatch.fnmatch(module_qname, ignore) for ignore in ignored_modules):
return True
ignored_classes = set(ignored_classes)
if hasattr(owner, 'qname'):
qname = owner.qname()
else:
qname = ''
return any(name == ignore or qname == ignore for ignore in ignored_classes)
MSGS = {
'E1101': ('%s %r has no %r member',
'no-member',
'Used when a variable is accessed for an unexistent member.',
{'old_names': [('E1103', 'maybe-no-member')]}),
'E1102': ('%s is not callable',
'not-callable',
'Used when an object being called has been inferred to a non \
callable object'),
'E1111': ('Assigning to function call which doesn\'t return',
'assignment-from-no-return',
'Used when an assignment is done on a function call but the \
inferred function doesn\'t return anything.'),
'E1120': ('No value for argument %s in %s call',
'no-value-for-parameter',
'Used when a function call passes too few arguments.'),
'E1121': ('Too many positional arguments for %s call',
'too-many-function-args',
'Used when a function call passes too many positional \
arguments.'),
'E1123': ('Unexpected keyword argument %r in %s call',
'unexpected-keyword-arg',
'Used when a function call passes a keyword argument that \
doesn\'t correspond to one of the function\'s parameter names.'),
'E1124': ('Argument %r passed by position and keyword in %s call',
'redundant-keyword-arg',
'Used when a function call would result in assigning multiple \
values to a function parameter, one value from a positional \
argument and one from a keyword argument.'),
'E1125': ('Missing mandatory keyword argument %r in %s call',
'missing-kwoa',
('Used when a function call does not pass a mandatory'
' keyword-only argument.'),
{'minversion': (3, 0)}),
'E1126': ('Sequence index is not an int, slice, or instance with __index__',
'invalid-sequence-index',
'Used when a sequence type is indexed with an invalid type. '
'Valid types are ints, slices, and objects with an __index__ '
'method.'),
'E1127': ('Slice index is not an int, None, or instance with __index__',
'invalid-slice-index',
'Used when a slice index is not an integer, None, or an object \
with an __index__ method.'),
'E1128': ('Assigning to function call which only returns None',
'assignment-from-none',
'Used when an assignment is done on a function call but the '
'inferred function returns nothing but None.',
{'old_names': [('W1111', 'assignment-from-none')]}),
'E1129': ("Context manager '%s' doesn't implement __enter__ and __exit__.",
'not-context-manager',
'Used when an instance in a with statement doesn\'t implement '
'the context manager protocol(__enter__/__exit__).'),
'E1130': ('%s',
'invalid-unary-operand-type',
'Emitted when an unary operand is used on an object which does not '
'support this type of operation'),
'E1131': ('%s',
'unsupported-binary-operation',
'Emitted when a binary arithmetic operation between two '
'operands is not supported.'),
'E1132': ('Got multiple values for keyword argument %r in function call',
'repeated-keyword',
'Emitted when a function call got multiple values for a keyword.'),
'E1135': ("Value '%s' doesn't support membership test",
'unsupported-membership-test',
'Emitted when an instance in membership test expression doesn\'t'
'implement membership protocol (__contains__/__iter__/__getitem__)'),
'E1136': ("Value '%s' is unsubscriptable",
'unsubscriptable-object',
"Emitted when a subscripted value doesn't support subscription"
"(i.e. doesn't define __getitem__ method)"),
}
# builtin sequence types in Python 2 and 3.
SEQUENCE_TYPES = set(['str', 'unicode', 'list', 'tuple', 'bytearray',
'xrange', 'range', 'bytes', 'memoryview'])
def _emit_no_member(node, owner, owner_name, ignored_mixins):
"""Try to see if no-member should be emitted for the given owner.
The following cases are ignored:
* the owner is a function and it has decorators.
* the owner is an instance and it has __getattr__, __getattribute__ implemented
* the module is explicitly ignored from no-member checks
* the owner is a class and the name can be found in its metaclass.
* The access node is protected by an except handler, which handles
AttributeError, Exception or bare except.
"""
if node_ignores_exception(node, AttributeError):
return False
# skip None anyway
if isinstance(owner, astroid.Const) and owner.value is None:
return False
if is_super(owner) or getattr(owner, 'type', None) == 'metaclass':
return False
if ignored_mixins and owner_name[-5:].lower() == 'mixin':
return False
if isinstance(owner, astroid.FunctionDef) and owner.decorators:
return False
if isinstance(owner, astroid.Instance):
if owner.has_dynamic_getattr() or not has_known_bases(owner):
return False
if isinstance(owner, objects.Super):
# Verify if we are dealing with an invalid Super object.
# If it is invalid, then there's no point in checking that
# it has the required attribute. Also, don't fail if the
# MRO is invalid.
try:
owner.super_mro()
except (exceptions.MroError, exceptions.SuperError):
return False
if not all(map(has_known_bases, owner.type.mro())):
return False
return True
def _determine_callable(callable_obj):
# Ordering is important, since BoundMethod is a subclass of UnboundMethod,
# and Function inherits Lambda.
if isinstance(callable_obj, astroid.BoundMethod):
# Bound methods have an extra implicit 'self' argument.
return callable_obj, 1, callable_obj.type
elif isinstance(callable_obj, astroid.UnboundMethod):
return callable_obj, 0, 'unbound method'
elif isinstance(callable_obj, astroid.FunctionDef):
return callable_obj, 0, callable_obj.type
elif isinstance(callable_obj, astroid.Lambda):
return callable_obj, 0, 'lambda'
elif isinstance(callable_obj, astroid.ClassDef):
# Class instantiation, lookup __new__ instead.
# If we only find object.__new__, we can safely check __init__
# instead. If __new__ belongs to builtins, then we look
# again for __init__ in the locals, since we won't have
# argument information for the builtin __new__ function.
try:
# Use the last definition of __new__.
new = callable_obj.local_attr('__new__')[-1]
except exceptions.NotFoundError:
new = None
from_object = new and new.parent.scope().name == 'object'
from_builtins = new and new.root().name in sys.builtin_module_names
if not new or from_object or from_builtins:
try:
# Use the last definition of __init__.
callable_obj = callable_obj.local_attr('__init__')[-1]
except exceptions.NotFoundError:
# do nothing, covered by no-init.
raise ValueError
else:
callable_obj = new
if not isinstance(callable_obj, astroid.FunctionDef):
raise ValueError
# both have an extra implicit 'cls'/'self' argument.
return callable_obj, 1, 'constructor'
else:
raise ValueError
class TypeChecker(BaseChecker):
"""try to find bugs in the code using type inference
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'typecheck'
# messages
msgs = MSGS
priority = -1
# configuration options
options = (('ignore-mixin-members',
{'default' : True, 'type' : 'yn', 'metavar': '<y_or_n>',
'help' : 'Tells whether missing members accessed in mixin \
class should be ignored. A mixin class is detected if its name ends with \
"mixin" (case insensitive).'}
),
('ignored-modules',
{'default': (),
'type': 'csv',
'metavar': '<module names>',
'help': 'List of module names for which member attributes '
'should not be checked (useful for modules/projects '
'where namespaces are manipulated during runtime and '
'thus existing member attributes cannot be '
'deduced by static analysis. It supports qualified '
'module names, as well as Unix pattern matching.'}
),
('ignored-classes',
{'default' : (),
'type' : 'csv',
'metavar' : '<members names>',
'help' : 'List of classes names for which member attributes '
'should not be checked (useful for classes with '
'attributes dynamically set). This supports '
'can work with qualified names.'}
),
('zope', utils.deprecated_option(opt_type='yn',
help_msg=_ZOPE_DEPRECATED)),
('generated-members',
{'default' : (),
'type' : 'string',
'metavar' : '<members names>',
'help' : 'List of members which are set dynamically and \
missed by pylint inference system, and so shouldn\'t trigger E1101 when \
accessed. Python regular expressions are accepted.'}
),
)
def open(self):
# do this in open since config not fully initialized in __init__
# generated_members may contain regular expressions
# (surrounded by quote `"` and followed by a comma `,`)
# REQUEST,aq_parent,"[a-zA-Z]+_set{1,2}"' =>
# ('REQUEST', 'aq_parent', '[a-zA-Z]+_set{1,2}')
if isinstance(self.config.generated_members, str):
gen = shlex.shlex(self.config.generated_members)
gen.whitespace += ','
gen.wordchars += '[]-+'
self.config.generated_members = tuple(tok.strip('"') for tok in gen)
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_attribute(node)
def visit_delattr(self, node):
self.visit_attribute(node)
@check_messages('no-member')
def visit_attribute(self, node):
"""check that the accessed attribute exists
to avoid too much false positives for now, we'll consider the code as
correct if a single of the inferred nodes has the accessed attribute.
function/method, super call and metaclasses are ignored
"""
for pattern in self.config.generated_members:
# attribute is marked as generated, stop here
if re.match(pattern, node.attrname):
return
try:
infered = list(node.expr.infer())
except exceptions.InferenceError:
return
# list of (node, nodename) which are missing the attribute
missingattr = set()
inference_failure = False
for owner in infered:
# skip yes object
if owner is astroid.YES:
inference_failure = True
continue
name = getattr(owner, 'name', None)
if _is_owner_ignored(owner, name, self.config.ignored_classes,
self.config.ignored_modules):
continue
try:
if not [n for n in owner.getattr(node.attrname)
if not isinstance(n.statement(), astroid.AugAssign)]:
missingattr.add((owner, name))
continue
except AttributeError:
# XXX method / function
continue
except exceptions.NotFoundError:
# This can't be moved before the actual .getattr call,
# because there can be more values inferred and we are
# stopping after the first one which has the attribute in question.
# The problem is that if the first one has the attribute,
# but we continue to the next values which doesn't have the
# attribute, then we'll have a false positive.
# So call this only after the call has been made.
if not _emit_no_member(node, owner, name,
self.config.ignore_mixin_members):
continue
missingattr.add((owner, name))
continue
# stop on the first found
break
else:
# we have not found any node with the attributes, display the
# message for infered nodes
done = set()
for owner, name in missingattr:
if isinstance(owner, astroid.Instance):
actual = owner._proxied
else:
actual = owner
if actual in done:
continue
done.add(actual)
confidence = INFERENCE if not inference_failure else INFERENCE_FAILURE
self.add_message('no-member', node=node,
args=(owner.display_type(), name,
node.attrname),
confidence=confidence)
@check_messages('assignment-from-no-return', 'assignment-from-none')
def visit_assign(self, node):
"""check that if assigning to a function call, the function is
possibly returning something valuable
"""
if not isinstance(node.value, astroid.Call):
return
function_node = safe_infer(node.value.func)
# skip class, generator and incomplete function definition
if not (isinstance(function_node, astroid.FunctionDef) and
function_node.root().fully_defined()):
return
if function_node.is_generator() \
or function_node.is_abstract(pass_is_abstract=False):
return
returns = list(function_node.nodes_of_class(astroid.Return,
skip_klass=astroid.FunctionDef))
if len(returns) == 0:
self.add_message('assignment-from-no-return', node=node)
else:
for rnode in returns:
if not (isinstance(rnode.value, astroid.Const)
and rnode.value.value is None
or rnode.value is None):
break
else:
self.add_message('assignment-from-none', node=node)
def _check_uninferable_callfunc(self, node):
"""
Check that the given uninferable CallFunc node does not
call an actual function.
"""
if not isinstance(node.func, astroid.Attribute):
return
# Look for properties. First, obtain
# the lhs of the Getattr node and search the attribute
# there. If that attribute is a property or a subclass of properties,
# then most likely it's not callable.
# TODO: since astroid doesn't understand descriptors very well
# we will not handle them here, right now.
expr = node.func.expr
klass = safe_infer(expr)
if (klass is None or klass is astroid.YES or
not isinstance(klass, astroid.Instance)):
return
try:
attrs = klass._proxied.getattr(node.func.attrname)
except exceptions.NotFoundError:
return
for attr in attrs:
if attr is astroid.YES:
continue
if not isinstance(attr, astroid.FunctionDef):
continue
# Decorated, see if it is decorated with a property.
# Also, check the returns and see if they are callable.
if decorated_with_property(attr):
if all(return_node.callable()
for return_node in attr.infer_call_result(node)):
continue
else:
self.add_message('not-callable', node=node,
args=node.func.as_string())
break
@staticmethod
def _no_context_variadic(node):
"""Verify if the given call node has variadic nodes without context
This is a workaround for handling cases of nested call functions
which don't have the specific call context at hand.
Variadic arguments (variable positional arguments and variable
keyword arguments) are inferred, inherently wrong, by astroid
as a Tuple, respectively a Dict with empty elements.
This can lead pylint to believe that a function call receives
too few arguments.
"""
for arg in node.args:
if not isinstance(arg, astroid.Starred):
continue
inferred = safe_infer(arg.value)
if isinstance(inferred, astroid.Tuple):
length = len(inferred.elts)
elif isinstance(inferred, astroid.Dict):
length = len(inferred.items)
else:
return False
if not length and isinstance(inferred.statement(), astroid.FunctionDef):
return True
return False
@check_messages(*(list(MSGS.keys())))
def visit_call(self, node):
"""check that called functions/methods are inferred to callable objects,
and that the arguments passed to the function match the parameters in
the inferred function's definition
"""
# Build the set of keyword arguments, checking for duplicate keywords,
# and count the positional arguments.
call_site = astroid.arguments.CallSite.from_call(node)
num_positional_args = len(call_site.positional_arguments)
keyword_args = list(call_site.keyword_arguments.keys())
no_context_variadic = self._no_context_variadic(node)
called = safe_infer(node.func)
# only function, generator and object defining __call__ are allowed
if called is not None and not called.callable():
self.add_message('not-callable', node=node,
args=node.func.as_string())
self._check_uninferable_callfunc(node)
try:
called, implicit_args, callable_name = _determine_callable(called)
except ValueError:
# Any error occurred during determining the function type, most of
# those errors are handled by different warnings.
return
num_positional_args += implicit_args
if called.args.args is None:
# Built-in functions have no argument information.
return
if len(called.argnames()) != len(set(called.argnames())):
# Duplicate parameter name (see duplicate-argument). We can't really
# make sense of the function call in this case, so just return.
return
# Warn about duplicated keyword arguments, such as `f=24, **{'f': 24}`
for keyword in call_site.duplicated_keywords:
self.add_message('repeated-keyword',
node=node, args=(keyword, ))
if call_site.has_invalid_arguments() or call_site.has_invalid_keywords():
# Can't make sense of this.
return
# Analyze the list of formal parameters.
num_mandatory_parameters = len(called.args.args) - len(called.args.defaults)
parameters = []
parameter_name_to_index = {}
for i, arg in enumerate(called.args.args):
if isinstance(arg, astroid.Tuple):
name = None
# Don't store any parameter names within the tuple, since those
# are not assignable from keyword arguments.
else:
assert isinstance(arg, astroid.AssignName)
# This occurs with:
# def f( (a), (b) ): pass
name = arg.name
parameter_name_to_index[name] = i
if i >= num_mandatory_parameters:
defval = called.args.defaults[i - num_mandatory_parameters]
else:
defval = None
parameters.append([(name, defval), False])
kwparams = {}
for i, arg in enumerate(called.args.kwonlyargs):
if isinstance(arg, astroid.Keyword):
name = arg.arg
else:
assert isinstance(arg, astroid.AssignName)
name = arg.name
kwparams[name] = [called.args.kw_defaults[i], False]
# Match the supplied arguments against the function parameters.
# 1. Match the positional arguments.
for i in range(num_positional_args):
if i < len(parameters):
parameters[i][1] = True
elif called.args.vararg is not None:
# The remaining positional arguments get assigned to the *args
# parameter.
break
else:
# Too many positional arguments.
self.add_message('too-many-function-args',
node=node, args=(callable_name,))
break
# 2. Match the keyword arguments.
for keyword in keyword_args:
if keyword in parameter_name_to_index:
i = parameter_name_to_index[keyword]
if parameters[i][1]:
# Duplicate definition of function parameter.
# Might be too hardcoded, but this can actually
# happen when using str.format and `self` is passed
# by keyword argument, as in `.format(self=self)`.
# It's perfectly valid to so, so we're just skipping
# it if that's the case.
if not (keyword == 'self' and called.qname() == STR_FORMAT):
self.add_message('redundant-keyword-arg',
node=node, args=(keyword, callable_name))
else:
parameters[i][1] = True
elif keyword in kwparams:
if kwparams[keyword][1]: # XXX is that even possible?
# Duplicate definition of function parameter.
self.add_message('redundant-keyword-arg', node=node,
args=(keyword, callable_name))
else:
kwparams[keyword][1] = True
elif called.args.kwarg is not None:
# The keyword argument gets assigned to the **kwargs parameter.
pass
else:
# Unexpected keyword argument.
self.add_message('unexpected-keyword-arg', node=node,
args=(keyword, callable_name))
# 3. Match the **kwargs, if any.
if node.kwargs:
for i, [(name, defval), assigned] in enumerate(parameters):
# Assume that *kwargs provides values for all remaining
# unassigned named parameters.
if name is not None:
parameters[i][1] = True
else:
# **kwargs can't assign to tuples.
pass
# Check that any parameters without a default have been assigned
# values.
for [(name, defval), assigned] in parameters:
if (defval is None) and not assigned:
if name is None:
display_name = '<tuple>'
else:
display_name = repr(name)
# TODO(cpopa): this should be removed after PyCQA/astroid/issues/177
if not no_context_variadic:
self.add_message('no-value-for-parameter', node=node,
args=(display_name, callable_name))
for name in kwparams:
defval, assigned = kwparams[name]
if defval is None and not assigned:
self.add_message('missing-kwoa', node=node,
args=(name, callable_name))
@check_messages('invalid-sequence-index')
def visit_extslice(self, node):
# Check extended slice objects as if they were used as a sequence
# index to check if the object being sliced can support them
return self.visit_index(node)
@check_messages('invalid-sequence-index')
def visit_index(self, node):
if not node.parent or not hasattr(node.parent, "value"):
return
# Look for index operations where the parent is a sequence type.
# If the types can be determined, only allow indices to be int,
# slice or instances with __index__.
parent_type = safe_infer(node.parent.value)
if not isinstance(parent_type, (astroid.ClassDef, astroid.Instance)):
return
# Determine what method on the parent this index will use
# The parent of this node will be a Subscript, and the parent of that
# node determines if the Subscript is a get, set, or delete operation.
operation = node.parent.parent
if isinstance(operation, astroid.Assign):
methodname = '__setitem__'
elif isinstance(operation, astroid.Delete):
methodname = '__delitem__'
else:
methodname = '__getitem__'
# Check if this instance's __getitem__, __setitem__, or __delitem__, as
# appropriate to the statement, is implemented in a builtin sequence
# type. This way we catch subclasses of sequence types but skip classes
# that override __getitem__ and which may allow non-integer indices.
try:
methods = parent_type.getattr(methodname)
if methods is astroid.YES:
return
itemmethod = methods[0]
except (exceptions.NotFoundError, IndexError):
return
if not isinstance(itemmethod, astroid.FunctionDef):
return
if itemmethod.root().name != BUILTINS:
return
if not itemmethod.parent:
return
if itemmethod.parent.name not in SEQUENCE_TYPES:
return
# For ExtSlice objects coming from visit_extslice, no further
# inference is necessary, since if we got this far the ExtSlice
# is an error.
if isinstance(node, astroid.ExtSlice):
index_type = node
else:
index_type = safe_infer(node)
if index_type is None or index_type is astroid.YES:
return
# Constants must be of type int
if isinstance(index_type, astroid.Const):
if isinstance(index_type.value, int):
return
# Instance values must be int, slice, or have an __index__ method
elif isinstance(index_type, astroid.Instance):
if index_type.pytype() in (BUILTINS + '.int', BUILTINS + '.slice'):
return
try:
index_type.getattr('__index__')
return
except exceptions.NotFoundError:
pass
elif isinstance(index_type, astroid.Slice):
# Delegate to visit_slice. A slice can be present
# here after inferring the index node, which could
# be a `slice(...)` call for instance.
return self.visit_slice(index_type)
# Anything else is an error
self.add_message('invalid-sequence-index', node=node)
@check_messages('invalid-slice-index')
def visit_slice(self, node):
# Check the type of each part of the slice
for index in (node.lower, node.upper, node.step):
if index is None:
continue
index_type = safe_infer(index)
if index_type is None or index_type is astroid.YES:
continue
# Constants must of type int or None
if isinstance(index_type, astroid.Const):
if isinstance(index_type.value, (int, type(None))):
continue
# Instance values must be of type int, None or an object
# with __index__
elif isinstance(index_type, astroid.Instance):
if index_type.pytype() in (BUILTINS + '.int',
BUILTINS + '.NoneType'):
continue
try:
index_type.getattr('__index__')
return
except exceptions.NotFoundError:
pass
# Anything else is an error
self.add_message('invalid-slice-index', node=node)
@check_messages('not-context-manager')
def visit_with(self, node):
for ctx_mgr, _ in node.items:
context = astroid.context.InferenceContext()
infered = safe_infer(ctx_mgr, context=context)
if infered is None or infered is astroid.YES:
continue
if isinstance(infered, bases.Generator):
# Check if we are dealing with a function decorated
# with contextlib.contextmanager.
if decorated_with(infered.parent, ['contextlib.contextmanager']):
continue
# If the parent of the generator is not the context manager itself,
# that means that it could have been returned from another
# function which was the real context manager.
# The following approach is more of a hack rather than a real
# solution: walk all the inferred statements for the
# given *ctx_mgr* and if you find one function scope
# which is decorated, consider it to be the real
# manager and give up, otherwise emit not-context-manager.
# See the test file for not_context_manager for a couple
# of self explaining tests.
for path in six.moves.filter(None, _unflatten(context.path)):
scope = path.scope()
if not isinstance(scope, astroid.FunctionDef):
continue
if decorated_with(scope, ['contextlib.contextmanager']):
break
else:
self.add_message('not-context-manager',
node=node, args=(infered.name, ))
else:
try:
infered.getattr('__enter__')
infered.getattr('__exit__')
except exceptions.NotFoundError:
if isinstance(infered, astroid.Instance):
# If we do not know the bases of this class,
# just skip it.
if not has_known_bases(infered):
continue
# Just ignore mixin classes.
if self.config.ignore_mixin_members:
if infered.name[-5:].lower() == 'mixin':
continue
self.add_message('not-context-manager',
node=node, args=(infered.name, ))
# Disabled until we'll have a more capable astroid.
@check_messages('invalid-unary-operand-type')
def _visit_unaryop(self, node):
"""Detect TypeErrors for unary operands."""
for error in node.type_errors():
# Let the error customize its output.
self.add_message('invalid-unary-operand-type',
args=str(error), node=node)
@check_messages('unsupported-binary-operation')
def _visit_binop(self, node):
"""Detect TypeErrors for binary arithmetic operands."""
self._check_binop_errors(node)
@check_messages('unsupported-binary-operation')
def _visit_augassign(self, node):
"""Detect TypeErrors for augmented binary arithmetic operands."""
self._check_binop_errors(node)
def _check_binop_errors(self, node):
for error in node.type_errors():
# Let the error customize its output.
self.add_message('unsupported-binary-operation',
args=str(error), node=node)
def _check_membership_test(self, node):
if is_inside_abstract_class(node):
return
if is_comprehension(node):
return
infered = safe_infer(node)
if infered is None or infered is astroid.YES:
return
if not supports_membership_test(infered):
self.add_message('unsupported-membership-test',
args=node.as_string(),
node=node)
@check_messages('unsupported-membership-test')
def visit_compare(self, node):
if len(node.ops) != 1:
return
operator, right = node.ops[0]
if operator in ['in', 'not in']:
self._check_membership_test(right)
@check_messages('unsubscriptable-object')
def visit_subscript(self, node):
if isinstance(node.value, (astroid.ListComp, astroid.DictComp)):
return
if isinstance(node.value, astroid.SetComp):
self.add_message('unsubscriptable-object',
args=node.value.as_string(),
node=node.value)
return
infered = safe_infer(node.value)
if infered is None or infered is astroid.YES:
return
if is_inside_abstract_class(node):
return
if not supports_subscript(infered):
self.add_message('unsubscriptable-object',
args=node.value.as_string(),
node=node.value)
class IterableChecker(BaseChecker):
"""
Checks for non-iterables used in an iterable context.
Contexts include:
- for-statement
- starargs in function call
- `yield from`-statement
- list, dict and set comprehensions
- generator expressions
Also checks for non-mappings in function call kwargs.
"""
__implements__ = (IAstroidChecker,)
name = 'iterable_check'
msgs = {'E1133': ('Non-iterable value %s is used in an iterating context',
'not-an-iterable',
'Used when a non-iterable value is used in place where'
'iterable is expected'),
'E1134': ('Non-mapping value %s is used in a mapping context',
'not-a-mapping',
'Used when a non-mapping value is used in place where'
'mapping is expected'),
}
def _check_iterable(self, node):
if is_inside_abstract_class(node):
return
if is_comprehension(node):
return
infered = safe_infer(node)
if infered is None or infered is astroid.YES:
return
if not is_iterable(infered):
self.add_message('not-an-iterable',
args=node.as_string(),
node=node)
def _check_mapping(self, node):
if is_inside_abstract_class(node):
return
if isinstance(node, astroid.DictComp):
return
infered = safe_infer(node)
if infered is None or infered is astroid.YES:
return
if not is_mapping(infered):
self.add_message('not-a-mapping',
args=node.as_string(),
node=node)
@check_messages('not-an-iterable')
def visit_for(self, node):
self._check_iterable(node.iter)
@check_messages('not-an-iterable')
def visit_yieldfrom(self, node):
self._check_iterable(node.value)
@check_messages('not-an-iterable', 'not-a-mapping')
def visit_call(self, node):
for stararg in node.starargs:
self._check_iterable(stararg.value)
for kwarg in node.kwargs:
self._check_mapping(kwarg.value)
@check_messages('not-an-iterable')
def visit_listcomp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter)
@check_messages('not-an-iterable')
def visit_dictcomp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter)
@check_messages('not-an-iterable')
def visit_setcomp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter)
@check_messages('not-an-iterable')
def visit_generatorexp(self, node):
for gen in node.generators:
self._check_iterable(gen.iter)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(TypeChecker(linter))
linter.register_checker(IterableChecker(linter))
| {
"content_hash": "4f1b033a48dcfbb5034f1956801288ad",
"timestamp": "",
"source": "github",
"line_count": 959,
"max_line_length": 87,
"avg_line_length": 41.69447340980188,
"alnum_prop": 0.568363136176066,
"repo_name": "mith1979/ansible_automation",
"id": "8d12039bd9bb400fff39f3895a321111ac46ebf5",
"size": "40799",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pylint/checkers/typecheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"""
Tests & Stuff
"""
import unittest
from onepy import OneNote
on = OneNote()
#lists all sections & notebooks open in onenote
class TestOneNote(unittest.TestCase):
def test_instance(self):
for nbk in on.hierarchy:
print (nbk)
if nbk.name == "SoundFocus":
for s in nbk:
print (" " + str(s))
for page in s:
print (" " + str(page.name.encode('ascii', 'ignore')))
#print(on.get_page_content("{37B075B6-358E-04DA-193E-73D0AD300DA3}{1}{B0}"))
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "f9aa2c4382360ef707e03228795d0a6e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 18.966666666666665,
"alnum_prop": 0.6186291739894552,
"repo_name": "varunsrin/one-py",
"id": "f8a6e6ae06d5552d8ace659f5d65ca5d39bb12f5",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_onepy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26466"
}
],
"symlink_target": ""
} |
from automlk.solutions_pp import *
print([p.ref for p in pp_solutions]) | {
"content_hash": "096641ea55ce52828844e8e945314aac",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 24,
"alnum_prop": 0.75,
"repo_name": "pierre-chaville/automlk",
"id": "e06e344e70542fb6e242873b8dd2454471f38e76",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/list_pp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "867"
},
{
"name": "CSS",
"bytes": "207943"
},
{
"name": "HTML",
"bytes": "108986"
},
{
"name": "Jupyter Notebook",
"bytes": "25275"
},
{
"name": "Python",
"bytes": "322808"
},
{
"name": "Shell",
"bytes": "337"
}
],
"symlink_target": ""
} |
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_poisson_1(self):
csvFilename = 'covtype.data'
csvPathname = 'standard/' + csvFilename
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', timeoutSecs=20)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "WARNING: max_iter set to 8 for benchmark comparisons"
max_iter = 8
y = "54"
kwargs = {
'response': y,
'family': 'poisson',
'n_folds': 0,
'max_iter': max_iter,
'beta_epsilon': 1e-3}
timeoutSecs = 120
# L2
start = time.time()
kwargs.update({'alpha': 0, 'lambda': 0})
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L2) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, "C14", **kwargs)
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (Elastic) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, "C14", **kwargs)
# L1
kwargs.update({'alpha': 0.75, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm (L1) end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, "C14", **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| {
"content_hash": "ba92bf291f237e74bb92b2bc5ca2c80b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 117,
"avg_line_length": 36.733333333333334,
"alnum_prop": 0.5739564428312159,
"repo_name": "111t8e/h2o-2",
"id": "3460acf06e609eb19e84635ea9cc88e10fea04de",
"size": "2204",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_GLM2_poisson_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177967"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "92357"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
} |
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
machine_type='VExpress_EMM64',
mem_class=DDR3_1600_x64,
cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, MinorCPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| {
"content_hash": "a322cf6a1871d463d9fbc62219ca8e8f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 29.615384615384617,
"alnum_prop": 0.7558441558441559,
"repo_name": "cancro7/gem5",
"id": "b9f28a4a9b623d12420c82ffba8b70a0417d0586",
"size": "2485",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "tests/configs/realview64-switcheroo-full.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "235643"
},
{
"name": "C",
"bytes": "1163291"
},
{
"name": "C++",
"bytes": "16580148"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136695"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "49620"
},
{
"name": "Makefile",
"bytes": "53221"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "11074"
},
{
"name": "Python",
"bytes": "4587386"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "Shell",
"bytes": "52434"
},
{
"name": "Vim script",
"bytes": "4335"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import pytest
from thefuck.rules.nixos_cmd_not_found import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('vim', 'nix-env -iA nixos.vim')])
def test_match(mocker, command):
mocker.patch('thefuck.rules.nixos_cmd_not_found', return_value=None)
assert match(command)
@pytest.mark.parametrize('command', [
Command('vim', ''),
Command('', '')])
def test_not_match(mocker, command):
mocker.patch('thefuck.rules.nixos_cmd_not_found', return_value=None)
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('vim', 'nix-env -iA nixos.vim'), 'nix-env -iA nixos.vim && vim'),
(Command('pacman', 'nix-env -iA nixos.pacman'), 'nix-env -iA nixos.pacman && pacman')])
def test_get_new_command(mocker, command, new_command):
assert get_new_command(command) == new_command
| {
"content_hash": "76cf71239b01e000c446781c25b4c90e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 91,
"avg_line_length": 35.68,
"alnum_prop": 0.6894618834080718,
"repo_name": "SimenB/thefuck",
"id": "4370334547fcc9862aa96d04f3dba5d7e898e199",
"size": "892",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/rules/test_nixos_cmd_not_found.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "511988"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetOrder(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetOrder Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetOrder, self).__init__(temboo_session, '/Library/Amazon/Marketplace/Orders/GetOrder')
def new_input_set(self):
return GetOrderInputSet()
def _make_result_set(self, result, path):
return GetOrderResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetOrderChoreographyExecution(session, exec_id, path)
class GetOrderInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetOrder
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(GetOrderInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSMarketplaceId(self, value):
"""
Set the value of the AWSMarketplaceId input for this Choreo. ((required, string) The Marketplace ID provided by Amazon Web Services.)
"""
super(GetOrderInputSet, self)._set_input('AWSMarketplaceId', value)
def set_AWSMerchantId(self, value):
"""
Set the value of the AWSMerchantId input for this Choreo. ((required, string) The Merchant ID provided by Amazon Web Services.)
"""
super(GetOrderInputSet, self)._set_input('AWSMerchantId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(GetOrderInputSet, self)._set_input('AWSSecretKeyId', value)
def set_AmazonOrderId(self, value):
"""
Set the value of the AmazonOrderId input for this Choreo. ((required, string) One or more AmazonOrderId values separated by commas used to retrieve orders.)
"""
super(GetOrderInputSet, self)._set_input('AmazonOrderId', value)
def set_Endpoint(self, value):
"""
Set the value of the Endpoint input for this Choreo. ((conditional, string) The base URL for the MWS endpoint. Defaults to mws.amazonservices.co.uk.)
"""
super(GetOrderInputSet, self)._set_input('Endpoint', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(GetOrderInputSet, self)._set_input('ResponseFormat', value)
class GetOrderResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetOrder Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (Stores the response from Amazon.)
"""
return self._output.get('Response', None)
class GetOrderChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetOrderResultSet(response, path)
| {
"content_hash": "f38190ac206c7140235aaa9af73d327c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 179,
"avg_line_length": 43.75581395348837,
"alnum_prop": 0.6872176454956151,
"repo_name": "jordanemedlock/psychtruths",
"id": "bbeb8499cc8a6e3b57dd37ac61cadb24bead6fec",
"size": "4643",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "temboo/Library/Amazon/Marketplace/Orders/GetOrder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
import sqlalchemy
USERNAME = 'vagrant'
PASSWORD = 'vagrant'
DATABASE = 'vagrant'
engine = sqlalchemy.create_engine(
'postgres://{username}:{pw}@localhost:5432/{db}'.format(
username=USERNAME,
pw=PASSWORD,
db=DATABASE
), convert_unicode=True
)
def execute_query(query, *args, **kwargs):
return engine.execute(sqlalchemy.sql.expression.text(query), *args, **kwargs)
| {
"content_hash": "408dca11bdc04507d184153b2c68aeef",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 23.5,
"alnum_prop": 0.726063829787234,
"repo_name": "gaurav-sanghani/intro-to-web",
"id": "4186687149e0528636c3a2e5fe009f28f0abe432",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo-adv/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "JavaScript",
"bytes": "5632"
},
{
"name": "Makefile",
"bytes": "188"
},
{
"name": "Python",
"bytes": "20163"
},
{
"name": "Ruby",
"bytes": "591"
},
{
"name": "Shell",
"bytes": "563"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
install_requires = open('requirements.txt').read().splitlines()
__version__ = '0.4.2'
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name='python-qbittorrent',
description='Python wrapper for qBittorrent >4.1.x',
version=__version__,
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='torrent, qBittorent, API, wrapper',
author='Vikas Yadav',
author_email='[email protected]',
url="https://github.com/v1k45/python-qbittorrent",
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Environment :: Console'
]
)
| {
"content_hash": "ba6900dca84687fb571c65960bc9c3cf",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 30.238095238095237,
"alnum_prop": 0.6291338582677165,
"repo_name": "v1k45/python-qBittorrent",
"id": "556a5fd4317f7d5ce6e5777cd8c8497d60a7224f",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26688"
}
],
"symlink_target": ""
} |
import tweepy
from zope import interface
from twisted.application import service
from twisted.internet import defer, threads, reactor
from piped import exceptions, log, resource, util
class MyTwitterProvider(object):
# state that we are a resource provider, so that the piped plugin system finds us
interface.classProvides(resource.IResourceProvider)
def __init__(self):
# we store the apis by the account name since we might have
# multiple consumers of the same api.
self._api_by_name = dict()
def configure(self, runtime_environment):
# look up the twitter account configurations:
self.twitter_configs = runtime_environment.get_configuration_value('twitter', dict())
for account_name, account_config in self.twitter_configs.items():
auth = tweepy.BasicAuthHandler(**account_config['auth'])
self._api_by_name[account_name] = tweepy.API(auth)
# tell the resource manager that we can provide the named twitter accounts
runtime_environment.resource_manager.register('twitter.%s' % account_name, provider=self)
def add_consumer(self, resource_dependency):
# since we registered for 'twitter.<account_name>', we can find the account_name requested by splitting:
twitter, account_name = resource_dependency.provider.split('.')
# give the tweepy API instance to the resource:
resource_dependency.on_resource_ready(self._api_by_name[account_name])
| {
"content_hash": "bb5059612cf722a8d8da56197ad6f7de",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 112,
"avg_line_length": 42.857142857142854,
"alnum_prop": 0.7066666666666667,
"repo_name": "alexbrasetvik/Piped",
"id": "60c3b609cab96b418e38852282e438867f8dcbda",
"size": "1500",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "doc/tutorials/twitter/1_basic/twitter_tutorial/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1144292"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
} |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Get-ChromeDump',
# list of one or more authors for the module
'Author': ['@xorrior'],
# more verbose multi-line description of the module
'Description': ('This module will decrypt passwords saved in chrome and display them in the console.'),
# True if the module needs to run in the background
'Background' : True,
'SaveOutput' : False,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
'https://github.com/xorrior/RandomPS-Scripts/blob/master/Get-ChromeDump.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run the module on.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description' : 'File path to write the results to.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/collection/Get-ChromeDump.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = " Get-ChromeDump"
# add any arguments to the end execution of the script
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| {
"content_hash": "59899a3837a380941e6bb0baab2d8387",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 127,
"avg_line_length": 36.03703703703704,
"alnum_prop": 0.5426515930113053,
"repo_name": "bneg/Empire",
"id": "dec2c330bc2b162b637fcfc4d70ab701f4eb8f6a",
"size": "3892",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/collection/ChromeDump.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "17003288"
},
{
"name": "Python",
"bytes": "2787352"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
} |
"""
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/3/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
| {
"content_hash": "8f8b2ea75a9a7b784504f6b1e228dc59",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 35.51162790697674,
"alnum_prop": 0.633267845448592,
"repo_name": "yephper/django",
"id": "38ba2b1a84276cbb94c80f28549687780895c73f",
"size": "1527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/backends/postgis/const.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
import os
import sys
if sys.version_info[0] == 3:
import configparser
from urllib.request import urlretrieve
else:
import ConfigParser as configparser
from urllib import urlretrieve
config = configparser.SafeConfigParser()
config.read('ci_config/virtualenv_util.cfg')
bootstrap_url = config.get('global', 'bootstrap_url')
destination = os.path.basename(bootstrap_url)
if not os.path.exists(destination):
urlretrieve(bootstrap_url, destination)
execfile(destination)
| {
"content_hash": "680065c1c0fd22c08114e89bea3494fc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 25.789473684210527,
"alnum_prop": 0.7673469387755102,
"repo_name": "teamfruit/defend_against_fruit",
"id": "65c7c569a2145c9a65bef25dde64e70a124fb57a",
"size": "512",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/daf_fruit/ci_config/ci_bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "287994"
},
{
"name": "Shell",
"bytes": "515"
}
],
"symlink_target": ""
} |
try:
import json
_parse_json = lambda s: json.loads(s)
except ImportError:
try:
import simplejson
_parse_json = lambda s: simplejson.loads(s)
except ImportError:
# For Google AppEngine
from django.utils import simplejson
_parse_json = lambda s: simplejson.loads(s)
import urllib
class ZoomItService(object):
"""A client for the Zoom.it API.
See http://zoom.it/pages/api for complete documentation of the API.
"""
def __init__(self, endpoint="http://api.zoom.it/v1"):
self.endpoint = endpoint
def get_content_by_id(self, id):
try:
response = urllib.urlopen('%s/content/%s' % (self.endpoint, id))
if response.code != 200:
message = response.read()
raise ZoomitServiceException(response.code, message)
return _parse_json(response.read())
except Exception, e:
raise e
finally:
if response:
response.close()
def get_content_by_url(self, url):
try:
request_url = '%s/content/?%s' % (self.endpoint,
urllib.urlencode({'url': url}))
response = urllib.urlopen(request_url)
if response.code >= 400:
message = response.read()
raise ZoomitServiceException(response.code, message)
return _parse_json(response.read())
except Exception, e:
raise e
finally:
if response:
response.close()
class ZoomitServiceException(Exception):
def __init__(self, status_code, message):
Exception.__init__(self, message)
self.status_code = status_code
# ------------------------------------------------------------------------------
import unittest
class ZoomItServiceTest(unittest.TestCase):
def setUp(self):
self.service = ZoomItService()
def test_missing_id(self):
def aux_test_missing_id():
# Try to retrieve content for image with a funny smiley making
# the OMG face as its id. This should obviously fail, as
# zoom.it uses non-smileys as identifiers.
self.service.get_content_by_id(u'8=o')
self.assertRaises(ZoomitServiceException, aux_test_missing_id)
def test_existing_id(self):
# This test is pedo bear approved
test_id = '8'
content = self.service.get_content_by_id(test_id)
self.assertEquals(content['failed'], False)
self.assertEquals(content['ready'], True)
self.assertEquals(content['id'], test_id)
def test_existing_url(self):
url = 'http://answers.yahoo.com/question/index?qid=20080331170418AAhm4TU'
content = self.service.get_content_by_url(url)
required_keys = [u'id', u'embedHtml', u'url', u'shareUrl', u'dzi',
u'failed', u'ready', u'progress']
for key in required_keys:
self.assertTrue(key in content, "Required key '%s' missing" % key)
def test_invalid_url(self):
url = '?!@#$://i/like/bacon/on/my/plate~'
def aux_test_invalid_url():
content = self.service.get_content_by_url(url)
self.assertRaises(ZoomitServiceException, aux_test_invalid_url)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5bc3eeb1e36424c9c259758ae90f6244",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 81,
"avg_line_length": 31.933962264150942,
"alnum_prop": 0.5710487444608567,
"repo_name": "openzoom/zoomit-python-sdk",
"id": "693eec5d44142e8d57fb461b50ab54b6f3cfa3de",
"size": "4154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zoomit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4561"
}
],
"symlink_target": ""
} |
import os
from twilio.rest import Client
# Initialize the client
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
response = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("IDENTITY") \
.delete()
print(response)
| {
"content_hash": "b07556392f7d7db8826ff22d66189d79",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 30.5,
"alnum_prop": 0.6598360655737705,
"repo_name": "TwilioDevEd/api-snippets",
"id": "b96fc3b25cc607f374ed32a3905b6fbcb711a938",
"size": "561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ip-messaging/rest/members/remove-member/remove-member.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
"""
oauthlib.oauth2.rfc6749.tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains methods for adding two types of access tokens to requests.
- Bearer http://tools.ietf.org/html/rfc6750
- MAC http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01
"""
from __future__ import absolute_import, unicode_literals
from binascii import b2a_base64
import hashlib
import hmac
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from oauthlib.common import add_params_to_uri, add_params_to_qs, unicode_type
from oauthlib import common
from . import utils
class OAuth2Token(dict):
def __init__(self, params, old_scope=None):
super(OAuth2Token, self).__init__(params)
self._new_scope = None
if 'scope' in params and params['scope']:
self._new_scope = set(utils.scope_to_list(params['scope']))
if old_scope is not None:
self._old_scope = set(utils.scope_to_list(old_scope))
if self._new_scope is None:
# the rfc says that if the scope hasn't changed, it's optional
# in params so set the new scope to the old scope
self._new_scope = self._old_scope
else:
self._old_scope = self._new_scope
@property
def scope_changed(self):
return self._new_scope != self._old_scope
@property
def old_scope(self):
return utils.list_to_scope(self._old_scope)
@property
def old_scopes(self):
return list(self._old_scope)
@property
def scope(self):
return utils.list_to_scope(self._new_scope)
@property
def scopes(self):
return list(self._new_scope)
@property
def missing_scopes(self):
return list(self._old_scope - self._new_scope)
@property
def additional_scopes(self):
return list(self._new_scope - self._old_scope)
def prepare_mac_header(token, uri, key, http_method,
nonce=None,
headers=None,
body=None,
ext='',
hash_algorithm='hmac-sha-1',
issue_time=None,
draft=0):
"""Add an `MAC Access Authentication`_ signature to headers.
Unlike OAuth 1, this HMAC signature does not require inclusion of the
request payload/body, neither does it use a combination of client_secret
and token_secret but rather a mac_key provided together with the access
token.
Currently two algorithms are supported, "hmac-sha-1" and "hmac-sha-256",
`extension algorithms`_ are not supported.
Example MAC Authorization header, linebreaks added for clarity
Authorization: MAC id="h480djs93hd8",
nonce="1336363200:dj83hs9s",
mac="bhCQXTVyfj5cmA9uKkPFx1zeOXM="
.. _`MAC Access Authentication`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01
.. _`extension algorithms`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-7.1
:param uri: Request URI.
:param headers: Request headers as a dictionary.
:param http_method: HTTP Request method.
:param key: MAC given provided by token endpoint.
:param hash_algorithm: HMAC algorithm provided by token endpoint.
:param issue_time: Time when the MAC credentials were issued (datetime).
:param draft: MAC authentication specification version.
:return: headers dictionary with the authorization field added.
"""
http_method = http_method.upper()
host, port = utils.host_from_uri(uri)
if hash_algorithm.lower() == 'hmac-sha-1':
h = hashlib.sha1
elif hash_algorithm.lower() == 'hmac-sha-256':
h = hashlib.sha256
else:
raise ValueError('unknown hash algorithm')
if draft == 0:
nonce = nonce or '{0}:{1}'.format(utils.generate_age(issue_time),
common.generate_nonce())
else:
ts = common.generate_timestamp()
nonce = common.generate_nonce()
sch, net, path, par, query, fra = urlparse(uri)
if query:
request_uri = path + '?' + query
else:
request_uri = path
# Hash the body/payload
if body is not None and draft == 0:
body = body.encode('utf-8')
bodyhash = b2a_base64(h(body).digest())[:-1].decode('utf-8')
else:
bodyhash = ''
# Create the normalized base string
base = []
if draft == 0:
base.append(nonce)
else:
base.append(ts)
base.append(nonce)
base.append(http_method.upper())
base.append(request_uri)
base.append(host)
base.append(port)
if draft == 0:
base.append(bodyhash)
base.append(ext or '')
base_string = '\n'.join(base) + '\n'
# hmac struggles with unicode strings - http://bugs.python.org/issue5285
if isinstance(key, unicode_type):
key = key.encode('utf-8')
sign = hmac.new(key, base_string.encode('utf-8'), h)
sign = b2a_base64(sign.digest())[:-1].decode('utf-8')
header = []
header.append('MAC id="%s"' % token)
if draft != 0:
header.append('ts="%s"' % ts)
header.append('nonce="%s"' % nonce)
if bodyhash:
header.append('bodyhash="%s"' % bodyhash)
if ext:
header.append('ext="%s"' % ext)
header.append('mac="%s"' % sign)
headers = headers or {}
headers['Authorization'] = ', '.join(header)
return headers
def prepare_bearer_uri(token, uri):
"""Add a `Bearer Token`_ to the request URI.
Not recommended, use only if client can't use authorization header or body.
http://www.example.com/path?access_token=h480djs93hd8
.. _`Bearer Token`: http://tools.ietf.org/html/rfc6750
"""
return add_params_to_uri(uri, [(('access_token', token))])
def prepare_bearer_headers(token, headers=None):
"""Add a `Bearer Token`_ to the request URI.
Recommended method of passing bearer tokens.
Authorization: Bearer h480djs93hd8
.. _`Bearer Token`: http://tools.ietf.org/html/rfc6750
"""
headers = headers or {}
headers['Authorization'] = 'Bearer %s' % token
return headers
def prepare_bearer_body(token, body=''):
"""Add a `Bearer Token`_ to the request body.
access_token=h480djs93hd8
.. _`Bearer Token`: http://tools.ietf.org/html/rfc6750
"""
return add_params_to_qs(body, [(('access_token', token))])
def random_token_generator(request, refresh_token=False):
return common.generate_token()
def signed_token_generator(private_pem, **kwargs):
def signed_token_generator(request):
request.claims = kwargs
return common.generate_signed_token(private_pem, request)
return signed_token_generator
class TokenBase(object):
def __call__(self, request, refresh_token=False):
raise NotImplementedError('Subclasses must implement this method.')
def validate_request(self, request):
raise NotImplementedError('Subclasses must implement this method.')
def estimate_type(self, request):
raise NotImplementedError('Subclasses must implement this method.')
class BearerToken(TokenBase):
__slots__ = (
'request_validator', 'token_generator',
'refresh_token_generator', 'expires_in'
)
def __init__(self, request_validator=None, token_generator=None,
expires_in=None, refresh_token_generator=None):
self.request_validator = request_validator
self.token_generator = token_generator or random_token_generator
self.refresh_token_generator = (
refresh_token_generator or self.token_generator
)
self.expires_in = expires_in or 3600
def create_token(self, request, refresh_token=False, save_token=True):
"""Create a BearerToken, by default without refresh token."""
if callable(self.expires_in):
expires_in = self.expires_in(request)
else:
expires_in = self.expires_in
request.expires_in = expires_in
token = {
'access_token': self.token_generator(request),
'expires_in': expires_in,
'token_type': 'Bearer',
}
if request.scopes is not None:
token['scope'] = ' '.join(request.scopes)
if request.state is not None:
token['state'] = request.state
if refresh_token:
if (request.refresh_token and
not self.request_validator.rotate_refresh_token(request)):
token['refresh_token'] = request.refresh_token
else:
token['refresh_token'] = self.refresh_token_generator(request)
token.update(request.extra_credentials or {})
token = OAuth2Token(token)
if save_token:
self.request_validator.save_bearer_token(token, request)
return token
def validate_request(self, request):
token = None
if 'Authorization' in request.headers:
token = request.headers.get('Authorization')[7:]
else:
token = request.access_token
return self.request_validator.validate_bearer_token(
token, request.scopes, request)
def estimate_type(self, request):
if request.headers.get('Authorization', '').startswith('Bearer'):
return 9
elif request.access_token is not None:
return 5
else:
return 0
| {
"content_hash": "0912fe08848d1c475cd2d72662a3d58f",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 102,
"avg_line_length": 31.73913043478261,
"alnum_prop": 0.6184404636459431,
"repo_name": "ownport/jira-reports",
"id": "06ff5585568f4cd612c5eb8adab20bc74721adef",
"size": "9490",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jirareports/vendor/oauthlib/oauth2/rfc6749/tokens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5750"
},
{
"name": "HTML",
"bytes": "1407"
},
{
"name": "JavaScript",
"bytes": "3838"
},
{
"name": "Makefile",
"bytes": "1246"
},
{
"name": "Python",
"bytes": "63467"
}
],
"symlink_target": ""
} |
from helpers.match_manipulator import MatchManipulator
from models.match import Match
class MatchSuggestionAccepter(object):
"""
Handle accepting Match suggestions.
"""
@classmethod
def accept_suggestion(self, match, suggestion):
if "youtube_videos" in suggestion.contents:
match = self._merge_youtube_videos(match, suggestion.contents["youtube_videos"])
return MatchManipulator.createOrUpdate(match)
@classmethod
def _merge_youtube_videos(self, match, youtube_videos):
for youtube_video in youtube_videos:
if youtube_video not in match.youtube_videos:
match.youtube_videos.append(youtube_video)
return match
| {
"content_hash": "2914a212a46f4b3f7a407af941d1bca3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 92,
"avg_line_length": 31.08695652173913,
"alnum_prop": 0.6937062937062937,
"repo_name": "tsteward/the-blue-alliance",
"id": "604ae4ac77b741537f9b3c0f985500186642f488",
"size": "715",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "helpers/suggestions/match_suggestion_accepter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "374878"
},
{
"name": "HTML",
"bytes": "713312"
},
{
"name": "JavaScript",
"bytes": "407439"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2073744"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
db = SQLAlchemy(app)
| {
"content_hash": "428e4f7ccfa34d709f1e3cbda7317474",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.7586206896551724,
"repo_name": "omtcyf0/VisARTM",
"id": "2f908dc1ec9b3c25c7323ddd1cb2e6f2d563398e",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "142"
},
{
"name": "HTML",
"bytes": "36063"
},
{
"name": "JavaScript",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "34738"
}
],
"symlink_target": ""
} |
import json
import re
import datetime
from itertools import groupby
import logging
from django.db import connection
from django.db.models import Prefetch, Q, Count
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework import viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.decorators import api_view, permission_classes, renderer_classes
from rest_framework.permissions import AllowAny
from rest_framework.views import Response
from rest_framework.renderers import JSONRenderer
from dplace_app.renderers import DPLACECSVRenderer
from dplace_app import serializers
from dplace_app import models
from dplace_app.tree import update_newick
log = logging.getLogger('profile')
class VariableViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.VariableSerializer
filter_fields = ('label', 'name', 'index_categories', 'niche_categories', 'source')
queryset = models.Variable.objects\
.prefetch_related('index_categories', 'niche_categories')
# Override retrieve to use the detail serializer, which includes categories
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = serializers.VariableDetailSerializer(self.object)
return Response(serializer.data)
class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.CategorySerializer
filter_fields = ('name', 'type', 'index_variables', 'niche_variables')
queryset = models.Category.objects.all()
# Override retrieve to use the detail serializer, which includes variables
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = serializers.CategoryDetailSerializer(self.object)
return Response(serializer.data)
class CodeDescriptionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.CodeDescriptionSerializer
filter_fields = ('variable',)
queryset = models.CodeDescription.objects.all()
class ValueViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.ValueSerializer
filter_fields = ('variable', 'coded_value', 'code', 'society',)
# Avoid additional database trips by select_related for foreign keys
queryset = models.Value.objects.filter(variable__type='cultural')\
.select_related('variable', 'code', 'source').all()
class SocietyViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.SocietySerializer
queryset = models.Society.objects.all().select_related('source', 'language__family')
lookup_field = 'ext_id'
def detail(self, request, society_id):
# block spider attacks
if len(request.GET) > 0 and request.path.startswith('/society'):
raise Http404
society = get_object_or_404(models.Society, ext_id=society_id)
# gets the society's location for inset map
location = {}
if society.location:
location = {
'lat': society.location['coordinates'][1],
'lng': society.location['coordinates'][0]
}
# gets other societies in database with the same xd_id
xd_id = models.Society.objects.filter(
xd_id=society.xd_id).exclude(ext_id=society_id)
if society.hraf_link and '(' in society.hraf_link:
length = len(society.hraf_link.split('(')) - 1
hraf_link = society.hraf_link.split('(')[length]
else:
hraf_link = ''
environmentals = society.get_environmental_data()
cultural_traits = society.get_cultural_trait_data()
references = society.get_data_references()
language_classification = None
if society.language:
# just glottolog at the moment
language_classification = models.LanguageFamily.objects\
.filter(name=society.language.family.name)
return Response(
{
'society': society,
'hraf_link': hraf_link[0:len(hraf_link) - 1],
'xd_id': xd_id,
'location': location,
'language_classification': language_classification,
'environmentals': dict(environmentals),
'cultural_traits': dict(cultural_traits),
'references': references
},
template_name='society.html'
)
class LargeResultsSetPagination(PageNumberPagination):
page_size = 1000
page_size_query_param = 'page_size'
max_page_size = 1000
class VeryLargeResultsSetPagination(PageNumberPagination):
page_size = 3000
# do not set: page_size_query_param = 'page_size'
# it sets page_size to default 1000 - internal bug??
max_page_size = 3000
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageSerializerWithSocieties
filter_fields = ('name', 'iso_code', 'societies', 'family',)
queryset = models.Language.objects.all()\
.select_related('family')\
.prefetch_related(Prefetch(
'societies',
queryset=models.Society.objects.exclude(value__isnull=True)
))
# 'Select All Languages' has to return a list with more than 1000 items
pagination_class = VeryLargeResultsSetPagination
class LanguageFamilyViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageFamilySerializer
filter_fields = ('name',)
queryset = models.LanguageFamily.objects.all()\
.annotate(language_count=Count('language__societies'))\
.order_by('name')
pagination_class = LargeResultsSetPagination
class TreeResultsSetPagination(PageNumberPagination):
"""
Since trees may have *many* languages, which are serialized as well, we
limit the page size to just 1.
"""
page_size = 3
page_size_query_param = 'page_size'
max_page_size = 10
class LanguageTreeViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageTreeSerializer
filter_fields = ('name',)
queryset = models.LanguageTree.objects.all()
pagination_class = TreeResultsSetPagination
class LanguageTreeLabelsViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageTreeLabelsSerializer
filter_fields = ('label',)
queryset = models.LanguageTreeLabels.objects.all()
pagination_class = LargeResultsSetPagination
class SourceViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.SourceSerializer
filter_fields = ('author', 'name')
queryset = models.Source.objects.all()
def get_query_from_json(request):
query_string = request.query_params.get('query')
if query_string is None:
raise Http404('missing query parameter')
try:
query_dict = json.loads(query_string)
except ValueError:
raise Http404('malformed query parameter')
if not isinstance(query_dict, dict):
raise Http404('malformed query parameter')
return query_dict
def result_set_from_query_dict(query_dict):
from time import time
_s = time()
log.info('enter result_set_from_query_dict')
result_set = serializers.SocietyResultSet()
sql_joins, sql_where = [], []
def id_array(l):
return '(%s)' % ','.join('%s' % int(i) for i in l)
def join_values(varid, criteria):
alias = 'ev%s' % varid
sql_joins.append((
"value",
alias,
"{0}.society_id = s.id AND {0}.variable_id = {1}".format(alias, int(varid))))
for varid, operator, params in criteria:
if operator != 'categorical':
params = map(float, params)
if operator == 'inrange':
sql_where.append("{0}.coded_value_float >= {1:f} AND {0}.coded_value_float <= {2:f}".format(alias, params[0], params[1]))
elif operator == 'outrange':
sql_where.append("{0}.coded_value_float >= {1:f} OR {0}.coded_value_float <= {2:f}".format(alias, params[1], params[0]))
elif operator == 'gt':
sql_where.append("{0}.coded_value_float >= {1:f}".format(alias, params[0]))
elif operator == 'lt':
sql_where.append("{0}.coded_value_float <= {1:f}".format(alias, params[1]))
elif operator == 'categorical':
sql_where.append("{0}.code_id IN %s".format(alias) % id_array(params))
if 'l' in query_dict:
sql_joins.append(('language', 'l', 'l.id = s.language_id'))
sql_where.append('l.id IN ' + id_array(query_dict['l']))
for lang in models.Language.objects.filter(id__in=query_dict['l']):
result_set.languages.add(lang)
if 'c' in query_dict:
variables = {
v.id: v for v in models.Variable.objects
.filter(id__in=[x[0] for x in query_dict['c']])
.prefetch_related(Prefetch(
'codes',
queryset=models.CodeDescription.objects
.filter(id__in=[x[0] for x in query_dict['c']])))
}
for varid, criteria in groupby(
sorted(query_dict['c'], key=lambda c: c[0]),
key=lambda x: x[0]
):
join_values(varid, criteria)
for variable in models.Variable.objects.filter(id__in=[x[0] for x in query_dict['c']]):
result_set.variable_descriptions.add(serializers.VariableCode(variable.codes, variable))
if 'e' in query_dict:
variables = {
v.id: v for v in models.Variable.objects
.filter(id__in=[x[0] for x in query_dict['e']])
.prefetch_related(Prefetch(
'codes',
queryset=models.CodeDescription.objects
.filter(id__in=[x[0] for x in query_dict['e']])))
}
# There can be multiple filters, so we must aggregate the results.
for varid, criteria in groupby(
sorted(query_dict['e'], key=lambda c: c[0]),
key=lambda x: x[0]
):
join_values(varid, criteria)
for variable in models.Variable.objects.filter(id__in=[x[0] for x in query_dict['e']]):
result_set.environmental_variables.add(serializers.VariableCode(variable.codes, variable))
if 'p' in query_dict:
sql_joins.append(('geographicregion', 'r', 'r.id = s.region_id'))
sql_where.append('r.id IN %s' % id_array(query_dict['p']))
for region in models.GeographicRegion.objects.filter(id__in=query_dict['p']):
result_set.geographic_regions.add(region)
if sql_where:
cursor = connection.cursor()
sql = "select distinct s.id from dplace_app_society as s %s where %s" % (
' '.join('join dplace_app_%s as %s on %s' % t for t in sql_joins),
' AND '.join(sql_where))
cursor.execute(sql)
soc_ids = [r[0] for r in cursor.fetchall()]
else:
soc_ids = []
soc_query = models.Society.objects.filter(id__in=soc_ids)\
.select_related('source', 'language__family', 'region')
if result_set.geographic_regions:
soc_query = soc_query.select_related('region')
if result_set.variable_descriptions:
soc_query = soc_query.prefetch_related(Prefetch(
'value_set',
to_attr='selected_cvalues',
queryset=models.Value.objects
# FIXME: this selects possibly too many values, in case there are multiple
# values for the same variable, not all of them matching the criteria.
.filter(variable_id__in=[v.variable.id for v in result_set.variable_descriptions])
.select_related('code')
.prefetch_related('references')))
if result_set.environmental_variables:
soc_query = soc_query.prefetch_related(Prefetch(
'value_set',
to_attr='selected_evalues',
queryset=models.Value.objects
.filter(variable_id__in=[v.variable.id for v in result_set.environmental_variables])
.prefetch_related('references')))
for i, soc in enumerate(soc_query):
soc_result = serializers.SocietyResult(soc)
if result_set.variable_descriptions:
for cval in soc.selected_cvalues:
soc_result.variable_coded_values.add(cval)
result_set.sources = result_set.sources.union(
r.source_id for r in cval.references.all())
if result_set.environmental_variables:
for eval in soc.selected_evalues:
soc_result.environmental_values.add(eval)
result_set.sources = result_set.sources.union(
r.source_id for r in eval.references.all())
result_set.societies.add(soc_result)
result_set.sources = models.Source.objects.filter(id__in=result_set.sources).all()
log.info('mid 1: %s' % (time() - _s,))
# Filter the results to those that matched all criteria
#result_set.finalize(criteria)
log.info('mid 2: %s' % (time() - _s,))
return result_set
@api_view(['GET'])
@permission_classes((AllowAny,))
def trees_from_societies(request):
language_trees, labels, soc_ids = [], [], []
for k, v in request.query_params.lists():
soc_ids = v
labels = models.LanguageTreeLabels.objects.filter(societies__id__in=soc_ids).all()
for t in models.LanguageTree.objects\
.filter(taxa__societies__id__in=soc_ids)\
.prefetch_related(
'taxa__languagetreelabelssequence_set__labels',
'taxa__languagetreelabelssequence_set__society',
)\
.distinct():
if update_newick(t, labels):
language_trees.append(t)
return Response(serializers.LanguageTreeSerializer(language_trees, many=True).data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def find_societies(request):
"""
View to find the societies that match an input request. Currently expects
{ language_filters: [{language_ids: [1,2,3]}], variable_codes: [4,5,6...],
environmental_filters: [{id: 1, operator: 'gt', params: [0.0]},
{id:3, operator 'inrange', params: [10.0,20.0] }] }
Returns serialized collection of SocietyResult objects
"""
from time import time
from django.db import connection
s = time()
log.info('%s find_societies 1: %s queries' % (time() - s, len(connection.queries)))
query = {}
if 'name' in request.query_params:
result_set = serializers.SocietyResultSet()
q = request.query_params['name']
if q:
soc = models.Society.objects.filter(
Q(name__icontains=q) | Q(alternate_names__unaccent__icontains=q))
for s in soc:
if s.value_set.count():
result_set.societies.add(serializers.SocietyResult(s))
return Response(serializers.SocietyResultSetSerializer(result_set).data)
for k, v in request.query_params.lists():
#if str(k) == 'c':
# query[k] = v
#else:
query[k] = [json.loads(vv) for vv in v]
result_set = result_set_from_query_dict(query)
log.info('%s find_societies 2: %s queries' % (time() - s, len(connection.queries)))
d = serializers.SocietyResultSetSerializer(result_set).data
log.info('%s find_societies 3: %s queries' % (time() - s, len(connection.queries)))
for i, q in enumerate(
sorted(connection.queries, key=lambda q: q['time'], reverse=True)):
if 10 < i < 20: # pragma: no cover
log.info('%s for %s' % (q['time'], q['sql'][:500]))
return Response(d)
@api_view(['GET'])
@permission_classes((AllowAny,))
def get_categories(request):
"""
Filters categories for sources, as some categories are empty for some sources
"""
query_dict = get_query_from_json(request)
categories = models.Category.objects.filter(type='cultural')
source_categories = []
if 'source' in query_dict:
source = models.Source.objects.filter(id=query_dict['source'])
variables = models.Variable.objects.filter(source=source)
for c in categories:
if variables.filter(index_categories=c.id):
source_categories.append(c)
return Response(
serializers.CategorySerializer(source_categories, many=True).data)
return Response(serializers.CategorySerializer(categories, many=True).data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def get_dataset_sources(request):
return Response(
serializers.SourceSerializer(
models.Source.objects.filter(societies__isnull=False).distinct(),
many=True).data)
class GeographicRegionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.GeographicRegionSerializer
model = models.GeographicRegion
filter_fields = ('region_nam', 'continent')
queryset = models.GeographicRegion.objects.all()
@api_view(['GET'])
@permission_classes((AllowAny,))
@renderer_classes((JSONRenderer,))
def get_min_and_max(request):
res = {}
varid = get_query_from_json(request).get('id')
if varid:
values = [
v.coded_value_float for v in models.Value.objects.filter(variable__id=varid)
if v.coded_value_float is not None]
vmin = min(values) if values else 0.0
vmax = max(values) if values else 0.0
res = {'min': format(vmin, '.4f'), 'max': format(vmax, '.4f')}
return Response(res)
@api_view(['GET'])
@permission_classes((AllowAny,))
@renderer_classes((DPLACECSVRenderer,))
def csv_download(request):
query_dict = get_query_from_json(request)
result_set = result_set_from_query_dict(query_dict)
response = Response(serializers.SocietyResultSetSerializer(result_set).data)
filename = "dplace-societies-%s.csv" % datetime.datetime.now().strftime("%Y-%m-%d")
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
return response
| {
"content_hash": "b17b110a671bbcb3ca555ffcdeb389af",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 137,
"avg_line_length": 39.21304347826087,
"alnum_prop": 0.6389289278190486,
"repo_name": "shh-dlce/dplace",
"id": "3df2b7174a3d2e0c0f44c84e7f8ede7876dbf8e9",
"size": "18038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dplace_app/api_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10863"
},
{
"name": "HTML",
"bytes": "147145"
},
{
"name": "JavaScript",
"bytes": "179528"
},
{
"name": "Makefile",
"bytes": "204"
},
{
"name": "Python",
"bytes": "112603"
},
{
"name": "Shell",
"bytes": "587"
},
{
"name": "TeX",
"bytes": "239"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
README = readme_file.read()
with open('requirements.txt') as file:
REQUIREMENTS = file.read()
setup(
name='autolearn',
version='0.1.0',
description="Automatic Machine Learning",
long_description=README,
author="Austin McConnell",
author_email='[email protected]',
url='https://github.com/austinmcconnell/autolearn',
packages=find_packages(),
entry_points={
'console_scripts': [
'autolearn=autolearn.cli:main'
]
},
install_requires=REQUIREMENTS,
license="MIT license",
zip_safe=False,
keywords='autolearn',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests')
| {
"content_hash": "9a491d18cb4e5276c9e730d6172165b9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 55,
"avg_line_length": 30.216216216216218,
"alnum_prop": 0.6198568872987478,
"repo_name": "austinmcconnell/autolearn",
"id": "a045ff7dbbf7bdca03f69577ce6e24cec36a9261",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2615156"
},
{
"name": "Python",
"bytes": "13772"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import filters
from sentry.api.bases.project import ProjectEndpoint
class ProjectFiltersEndpoint(ProjectEndpoint):
def get(self, request, project):
"""
List a project's filters
Retrieve a list of filters for a given project.
{method} {path}
"""
results = []
for f_cls in filters.all():
filter = f_cls(project)
results.append(
{
'id': filter.id,
# 'active' will be either a boolean or list for the legacy browser filters
# all other filters will be boolean
'active': filter.is_enabled(),
'description': filter.description,
'name': filter.name,
}
)
results.sort(key=lambda x: x['name'])
return Response(results)
| {
"content_hash": "82bd7ba89c802b0a60741eb0bf12d488",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 94,
"avg_line_length": 29.727272727272727,
"alnum_prop": 0.5443425076452599,
"repo_name": "jean/sentry",
"id": "bde5de51cbdaa0a966ba0d84a5ca6dc23efb1f35",
"size": "981",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/project_filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import json
import re
from rbtools.api.errors import APIError
from rbtools.commands import (Command,
CommandError,
CommandExit,
Option,
ParseError)
class APIGet(Command):
name = 'api-get'
author = 'The Review Board Project'
description = 'Retrieve raw API resource payloads.'
args = '<path> [--<query-arg>=<value> ...]'
option_list = [
Option('--pretty',
action='store_true',
dest='pretty_print',
config_key='API_GET_PRETTY_PRINT',
default=False,
help='Pretty prints the resulting API payload.'),
Command.server_options,
]
def _dumps(self, payload):
if self.options.pretty_print:
return json.dumps(payload, sort_keys=True, indent=4)
else:
return json.dumps(payload)
def main(self, path, *args):
query_args = {}
query_arg_re = re.compile('^--(?P<name>.*)=(?P<value>.*)$')
for arg in args:
m = query_arg_re.match(arg)
if m:
query_args[m.group('name')] = m.group('value')
else:
raise ParseError('Unexpected query argument %s' % arg)
if self.options.server:
server_url = self.options.server
else:
repository_info, tool = self.initialize_scm_tool()
server_url = self.get_server_url(repository_info, tool)
api_client, api_root = self.get_api(server_url)
try:
if path.startswith('http://') or path.startswith('https://'):
resource = api_client.get_url(path, **query_args)
else:
resource = api_client.get_path(path, **query_args)
except APIError as e:
if e.rsp:
print(self._dumps(e.rsp))
raise CommandExit(1)
else:
raise CommandError('Could not retrieve the requested '
'resource: %s' % e)
print(self._dumps(resource.rsp))
| {
"content_hash": "8c58951f42dedd23b4b5ea76a0cc0cca",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 73,
"avg_line_length": 32.4264705882353,
"alnum_prop": 0.5165532879818594,
"repo_name": "davidt/rbtools",
"id": "7b8c51d5877c6ad112155f4652c0ad9ad108cb6f",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rbtools/commands/api_get.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9410"
},
{
"name": "HTML",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "746695"
},
{
"name": "Shell",
"bytes": "39731"
}
],
"symlink_target": ""
} |
"""
timings.py
Created by William Katz on 2008-05-04.
Copyright (c) 2008 Publishare LLC. Distributed under MIT License.
"""
__author__ = "William T. Katz"
# Global that stores timing runs, all keyed to incoming url path.
# Note that since this is a global, you'll only get stats from the
# currently visited server and it could be reset. The timing
# utility is not meant to be comprehensive but only a hack that
# doesn't interfere with memcached stats.
TIMINGS = {}
import time
import urlparse
import os
from handlers import restful
from utils import authorized
import view
def start_run():
url = os.environ['PATH_INFO']
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
global TIMINGS
if not path in TIMINGS:
TIMINGS[path] = {
"runs": 0,
"duration": 0.0,
"min_time": None,
"max_time": None,
"mutex_lock": False
}
timing = TIMINGS[path]
if not timing["mutex_lock"]:
timing["mutex_lock"] = True
timing["start_time"] = time.time()
return path
return None
def stop_run(path):
global TIMINGS
if path and path in TIMINGS:
timing = TIMINGS[path]
elapsed_time = time.time() - timing["start_time"]
timing["duration"] += elapsed_time
timing["runs"] += 1
if (not timing["min_time"]) or timing["min_time"] > elapsed_time:
timing["min_time"] = elapsed_time
if (not timing["max_time"]) or timing["max_time"] < elapsed_time:
timing["max_time"] = elapsed_time
timing["mutex_lock"] = False
class TimingHandler(restful.Controller):
@authorized.role("admin")
def get(self):
global TIMINGS
stats = []
total_time = 0.0
avg_speed = 0.0
total_calls = 0
total_full_renders = 0
for key in TIMINGS:
full_renders = 0
if key in view.NUM_FULL_RENDERS:
full_renders = view.NUM_FULL_RENDERS[key]
total_full_renders += full_renders
url_timing = TIMINGS[key]
if url_timing["runs"] > 0:
url_stats = url_timing.copy()
url_stats.update({'url': key,
'avg_speed': url_timing["duration"] /
url_timing["runs"],
'full_renders': full_renders})
stats.append(url_stats)
total_time += url_timing["duration"]
total_calls += url_timing["runs"]
if total_calls > 0:
avg_speed = total_time / total_calls
view.ViewPage(cache_time=0).render(self, {"stats": stats,
"avg_speed": avg_speed,
"total_time": total_time,
"total_calls": total_calls,
"total_full_renders":
total_full_renders})
@authorized.role("admin")
def delete(self):
global TIMINGS
TIMINGS = {} | {
"content_hash": "3a3367958ee14f1df086e633854faca6",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 33.552083333333336,
"alnum_prop": 0.5184725240608506,
"repo_name": "arcticio/ice-bloc-hdr",
"id": "ff1de4ddeddec6316b4672efac83639655b6795e",
"size": "4345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "handlers/gablog/timings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64712"
},
{
"name": "HTML",
"bytes": "106443"
},
{
"name": "JavaScript",
"bytes": "2060684"
},
{
"name": "Python",
"bytes": "802278"
},
{
"name": "Shell",
"bytes": "29529"
}
],
"symlink_target": ""
} |
from parameterized import parameterized
from tests.test_utils.system_tests_class import SystemTest
class TestExampleDagsSystem(SystemTest):
@parameterized.expand([
"example_bash_operator",
"example_branch_operator"
])
def test_dag_example(self, dag_id):
self.run_dag(dag_id=dag_id)
| {
"content_hash": "d840e9f705205f4180e839e1f7d9e9ad",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 26.75,
"alnum_prop": 0.7071651090342679,
"repo_name": "mtagle/airflow",
"id": "cbf549d8edf21129a85952f566c8d1833597a6c3",
"size": "1108",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_example_dags_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
'''The following code is proof of concept for multi/single plots; from 0.4.4b1.
See BETA for class abstractions to Distribplot and PanelPlot.
'''
import math
import itertools as it
import matplotlib as mpl
import matplotlib.pyplot as plt
# PLOT ------------------------------------------------------------------------
# colorblind from seaborn; grayscale is web-safe
LAMANA_PALETTES = dict(
#bold=['#FC0D00','#FC7700','#018C99','#00C318','#6A07A9','#009797','#CF0069'],
bold=['#EB0C00','#FC7700','#018C99','#00C318','#6A07A9','#009797','#CF0069'],
colorblind=['#0072B2', '#009E73', '#D55E00', '#CC79A7', '#F0E442', '#56B4E9'],
grayscale=['#FFFFFF', '#999999', '#666666', '#333333', '#000000'],
HAPSu=['#E7940E', '#F5A9A9', '#FCEB00', '#0B4EA5'],
)
def _cycle_depth(iterable, n=None):
'''Return a cycler that iterates n items into an iterable.'''
if n is None:
n = len(iterable)
return it.cycle(it.islice(iterable, n))
def _distribplot(
LMs, x=None, y=None, normalized=True, halfplot=None, extrema=True,
legend_on=True, colorblind=False, grayscale=False, annotate=False, ax=None,
linestyles=None, linecolors=None, markerstyles=None, layercolors=None,
plot_kw=None, patch_kw=None, annotate_kw=None, legend_kw=None,
sublabel_kw=None, **kwargs
):
'''Return an axes plot of stress distributions.
Characteristics
===============
- multiplot: plot multiple geometries
- halfplot: plots only compressive or tensile side
- annotate: write layer type names
Parameters
==========
LMs : list
List of LaminateModels.
x, y : str
DataFrame column names. Users can pass in other columns names.
normalized : bool
If true, plots y = k_; else plots y = d_ unless specified otherwise.
halfplot : str
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
extrema : bool
Plot minima and maxima only; equivalent to p=2. Default: True.
legend_on : bool
Turn on/off plot. Default: True.
colorblind : bool
Set line and marker colors as colorblind-safe. Default: False.
grayscale : bool
Set everything to grayscale. Overrides colorblind.
annotate : bool
Annotate names of layer types.
ax : matplotlib axes
An axes containing the plots.
These keywords control general plotting aesthetics.
{lines, marker, layer}_styles/colors : dict
Processes cycled iterables for matplotlib keywords.
- linestyles: ["-","--","-.",":"]
- linecolors: LAMANA_PALETTES['bold']
- markerstyles: mpl.lines.Line2D.filled_markers
- layercolors: LAMANA_PALETTES['HAPSu']
{plot, patch, annotate, legend, sublabel}_kw : dict
Default keywords are initialized to set up the distribution plots.
- plot: |linewidth=1.8|markersize=8|alpha=1.0|clip_on=False|
- patch: |linewidth=1.0|alpha=0.15|
- annotate: write layer types |fontsize=20|alpha=.7|ha='left'|va='center'|
- legend: |loc=1|fontsize='large'|
- sublabel: default is lower case alphabet
|x=0.12|y=0.94|s=''|fontsize=20|weight='bold'|ha='center'|va='center'|
Or users can override kwargs normal mpl style.
'''
# -------------------------------------------------------------------------
'''Make cyclers colorblind and grayscale friendly'''
if ax is None:
ax = plt.gca()
# Default axis labels and DataFrame columns for normalized plots
if x is None:
# 'stress_f (MPa/N)' is in Wilson_LT; so the following triggers handling
##x = 'stress_f (MPa/N)'
x = 'stress'
if normalized:
y = 'k'
elif not normalized and y is None:
y = 'd(m)'
'''Will have trouble standardizing the name of the stress column.'''
'''Need to de-hard-code x label since changes with model'''
'''Try looking for stress columns, and select last one, else look for strain.'''
# see loop on handling stress column
# Plot Defaults -----------------------------------------------------------
# Set defaults for plotting keywords with dicts
# If no kwd found, make an empty dict; only update keys not passed in
plot_kw = {} if plot_kw is None else plot_kw
plot_dft = dict(linewidth=1.8, markersize=8, alpha=1.0, clip_on=False,)
plot_kw.update({k: v for k, v in plot_dft.items() if k not in plot_kw})
#print('plot_kw (pre-loop): ', plot_kw)
patch_kw = {} if patch_kw is None else patch_kw
patch_dft = dict(linewidth=1.0, alpha=0.15,)
patch_kw.update({k: v for k, v in patch_dft.items() if k not in patch_kw})
#print('patch_kw: ', patch_kw)
annotate_kw = {} if annotate_kw is None else annotate_kw
annotate_dft = dict(fontsize=20, alpha=.7, ha='left', va='center',)
annotate_kw.update({k: v for k, v in annotate_dft.items() if k not in annotate_kw})
#print('annotate_kw: ', annotate_kw)
legend_kw = {} if legend_kw is None else legend_kw
legend_dft = dict(loc=1, fontsize='large',)
legend_kw.update({k: v for k, v in legend_dft.items()
if k not in legend_kw and legend_on})
#print('legend_kw: ', legend_kw)
sublabel_kw = {} if sublabel_kw is None else sublabel_kw
sublabel_dft = dict(
x=0.12, y=0.94, s='', fontsize=20, weight='bold', ha='center',
va='center', transform=ax.transAxes
)
sublabel_kw.update({k: v for k, v in sublabel_dft.items()
if k not in sublabel_kw})
#print('sublabel_kw: ', sublabel_kw)
# Style Cyclers -----------------------------------------------------------
# Set defaults for the line/marker styles, colors and layer patch colors
if linestyles is None:
linestyles = it.cycle(["-", "--", "-.", ":"])
if linecolors is None:
linecolors = LAMANA_PALETTES['bold']
if markerstyles is None:
markerstyles = [mrk for mrk in mpl.lines.Line2D.filled_markers
if mrk not in ('None', None)]
if layercolors is None:
layercolors = LAMANA_PALETTES['HAPSu']
##layercolors = ['#E7940E', '#F5A9A9', '#FCEB00', '#0B4EA5']
if colorblind:
linecolors = LAMANA_PALETTES['colorblind']
'''Add special color blind to layers'''
if grayscale:
linecolors = ['#000000']
layercolors = reversed(LAMANA_PALETTES['grayscale'][:-1]) # exclude black
patch_kw.update(dict(alpha=0.5))
if colorblind:
print('Grayscale has overriden the colorblind option.')
marker_cycle = it.cycle(markerstyles)
##marker_cycle = it.cycle(reversed(markerstyles))
line_cycle = it.cycle(linestyles)
color_cycle = it.cycle(linecolors)
# Plotting ----------------------------------------------------------------
minX, maxX = (0, 0)
for i, LM in enumerate(LMs):
if extrema:
df = LM.extrema # plots p=2
else:
df = LM.LMFrame
nplies = LM.nplies
materials = LM.materials
lbl = LM.Geometry.string
stack_order = LM.stack_order
# Handle arbitrary name of x column by
# selecting last 'stress' column; assumes 'stress_f (MPa)' for Wilson_LT
# if none found, exception is raised. user should input x value
try:
df[x]
except KeyError:
stress_names = df.columns.str.startswith('stress')
stress_cols = df.loc[:, stress_names]
##stress_cols = df.loc[stress_names]
x_series = stress_cols.iloc[:, -1]
x = x_series.name
#print ('stress_cols ', stress_cols)
#print(x)
except KeyError:
raise Exception("Stress column '{}' not found. "
'Specify y column in plot() method.'.format(x))
x_series, y_series = df[x], df[y]
xs, ys = x_series.tolist(), y_series.tolist()
# Update plot boundaries
if min(xs) < minX: minX = float(min(xs))
if max(xs) > maxX: maxX = float(max(xs))
#print(minX, maxX)
# Keyword Updates;
# Use the cycler if plot_kw is empty, otherwise let the user manually change plot_kw
plot_kw.update({
'label': lbl,
#'marker': 'o',
#'color': 'b',
'marker': next(marker_cycle),
'color': next(color_cycle),
'linestyle': next(line_cycle)
})
'''Put following into info.'''
#print(LM.Geometry, LM.Geometry.string, LM.name, LM.nplies, LM.p)
# Label caselets with sublabels, e.g. a,b,c, i,ii,iii...
ax.tick_params(axis='x', pad=10)
ax.tick_params(axis='y', pad=10)
ax.plot(xs, ys, **plot_kw)
width = maxX - minX # sets rectangle width
minY = y_series.min()
maxY = y_series.max()
# Smart-cycle layer colors list; slice iterable the length of materials
# Draw layers only for # y = {k_ and d_(if nplies=1)}
layer_cycle = _cycle_depth(layercolors, n=len(materials)) # assumes all Cases materials equiv.
# -------------------------------------------------------------------------
# Annotations anchored to layers instead of plot; iterates layers
incrementer = 0
for layer_, (type_, t_, matl_) in stack_order.items():
if normalized:
ypos, thick = layer_, 1 # thick is a unit thick (k-k_1)
elif (not normalized and len(LMs) == 1):
thick = t_ / 1e6
ypos = incrementer
else:
'''Add this to warning.'''
print('CAUTION: Unnormalized plots (y=d(m)) is cumbersome for '
'geometries>1. Consider normalized=True for multi-geometry '
'plots.')
return None
patch_kw.update({'facecolor': next(layer_cycle)}) # adv. cyclers
rect = mpl.patches.Rectangle((minX, ypos), width, thick, **patch_kw)
ax.add_artist(rect)
'''add these to a kw dict somehow.. preferably to annotate_kw'''
xpad = 0.02
ypad_layer = 0.15
ypad_plot = 0.03
if normalized:
ypad = (rect.get_height() * ypad_layer) # relative to layers
elif not normalized:
#print(ax.get_ylim()[1])
ypad = ax.get_ylim()[1] * ypad_plot # relative to plot
#print(ypad)
rx, ry = rect.get_xy()
cx = rx + (rect.get_width() * xpad)
cy = ry + ypad
if annotate:
ax.annotate(type_, (cx, cy), **annotate_kw)
incrementer += thick
# -------------------------------------------------------------------------
# Set plot limits
#ax.axis([minX, maxX, minY, maxY])
if halfplot is None:
ax.axis([minX, maxX, minY, maxY])
elif halfplot is not None:
if halfplot.lower().startswith('comp'):
ax.set_xlim([minX, 0.0])
ax.set_ylim([minY, maxY])
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
else: # default tensile
ax.set_xlim([0.0, maxX])
ax.set_ylim([minY, maxY])
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
# '''Fix overlapping; no way to do automatically'''
# major_ticks = np.arange(0.0, maxX, 0.1)
# ax.set_xticks(major_ticks)
# Set legend parameters and axes labels
if legend_kw is not None and legend_on:
ax.legend(**legend_kw)
ax.text(**sublabel_kw) # figure sublabel
#TODO: Refactor for less limited parameter-setting of axes labels.
axtitle = kwargs.get('label', '')
xlabel = kwargs.get('xlabel', x)
ylabel = kwargs.get('ylabel', y)
ax.set_title(axtitle)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
##ax.xaxis.labelpad = 20
##ax.yaxis.labelpad = 20
return ax
def _multiplot(
caselets, x=None, y=None, title=None, normalized=True, halfplot='tensile',
colorblind=False, grayscale=False, annotate=False, labels_off=False,
suptitle_kw=None, subplots_kw=None, patch_kw=None, plot_kw=None,
legend_kw=None, labels_kw=None, **kwargs
):
'''Return figure of axes containing several plots.
Characteristics
===============
- multiple plots
- kwarg/arg passing
- global labels and titles
- delete remaining subplots if less than remaining axes.
labels_kw : dict
One stop for custom labels and annotated text passed in from user.
axestitle, sublabels, legendtitles are lists of labels for each caselet.
'''
# DEFAULTS ----------------------------------------------------------------
title = '' if title is None else title
if labels_off:
kwargs['xlabel'], kwargs['ylabel'] = ('', '') # turn off axes labels
subplots_kw = {} if subplots_kw is None else subplots_kw
subplots_dft = dict(ncols=4)
subplots_kw.update({k: v for k, v in subplots_dft.items() if k not in subplots_kw})
#print('subplots_kw: ', subplots_kw)
patch_kw = {} if patch_kw is None else patch_kw
#print('patch_kw: ', patch_kw)
plot_kw = {} if plot_kw is None else plot_kw
plot_dft = dict(clip_on=True) # needed in halfplots; else BUG
plot_kw.update({k: v for k, v in plot_dft.items() if k not in plot_kw})
#print('plot_kw: ', plot_kw)
legend_kw = {} if legend_kw is None else legend_kw
legend_dft = dict(loc=1, fontsize='small')
legend_kw.update({k: v for k, v in legend_dft.items() if k not in legend_kw})
#print('legend_kw: ', legend_kw)
suptitle_kw = {} if suptitle_kw is None else suptitle_kw
suptitle_dft = dict(t='', fontsize=22, fontweight='bold')
if title: suptitle_dft.update(dict(t=title))
suptitle_kw.update({k: v for k, v in suptitle_dft.items() if k not in suptitle_kw})
#print('suptitle_kw: ', suptitle_kw)
# Main dict to handle all text
# sublabels defaults to no labels after letter 'z'.
# Will auto label subplots from a to z. Afterwhich, the user must supply labels.
labels_kw = {} if labels_kw is None else labels_kw
alphabet = map(chr, range(97, 123)) # to label subplots; REF 037
labels_dft = dict(suptitle=None, sublabels=list(alphabet),
axes_titles=None, legend_titles=None,)
if title: labels_dft.update(suptitle=title) # compliment convenience kw arg
labels_kw.update({k: v for k, v in labels_dft.items() if k not in labels_kw})
if labels_kw['suptitle']: suptitle_kw.update(t=labels_kw['suptitle'])
# if labels_kw['subtitle']: subtitle=labels_kw['subtitle']
# if labels_kw['xlabel']: kwargs['xlabel'] = '' # remove axlabels; use text()
# if labels_kw['ylabel']: kwargs['ylabel'] = '' # remove axlabels; use text()
#print('labels_kw: ', labels_kw)
'''Consider cycling linecolors for each single geo, multiplot.'''
# FIGURE ------------------------------------------------------------------
# Reset figure dimensions
ncaselets = len(caselets)
ncols_dft = subplots_kw['ncols']
nrows = math.ceil(ncaselets / ncols_dft)
subplots_kw['figsize'] = (24, 8 * nrows)
if ncaselets < ncols_dft:
ncols_dft = ncaselets
subplots_kw['ncols'] = ncaselets
# Set defaults for lists of titles/labels
for key in ['axes_titles', 'legend_titles', 'sublabels']:
if labels_kw[key] is None:
labels_kw[key] = [''] * ncaselets
if ncaselets > len(labels_kw['sublabels']):
labels_kw['sublabels'] = [' '] * ncaselets
print('There are more cases than sublabels. Bypassing default... '
"Consider adding custom labels to 'axestext_kw'.")
fig, axes = plt.subplots(nrows=nrows, **subplots_kw)
#print('args: {}'.format(args))
#print('kwargs:{} '.format(kwargs))
#print('nrows: {}, ncols: {}'.format(nrows, ncols_dft))
def plot_caselets(i, ax):
'''Iterate axes of the subplots; apply a small plot ("caselet").
Caselets could contain cases (iterable) or LaminateModels (not iterable).
'''
try:
caselet, axtitle, ltitle, sublabel = (
caselets[i],
labels_kw['axes_titles'][i],
labels_kw['legend_titles'][i],
labels_kw['sublabels'][i]
)
# Plot LMs on each axes per case (and legend notes if there)
#print(ltitle, axsub)
kwargs.update(label=axtitle)
legend_kw.update(title=ltitle)
sublabel_kw = dict(s=sublabel)
# Caselet could be a case or LM, but distribplot needs LMs
try:
LMs = caselet.LMs
except (AttributeError):
# Case is actually a LaminateModel; see distributions.Case.plot().
LMs = [caselet]
#print('Exception was caught; not a case')
_distribplot(
LMs, x=x, y=y, halfplot=halfplot, annotate=annotate,
normalized=normalized, ax=ax, colorblind=colorblind,
grayscale=grayscale, plot_kw=plot_kw, patch_kw=patch_kw,
legend_kw=legend_kw, sublabel_kw=sublabel_kw, **kwargs
)
except(IndexError, KeyError):
# Cleanup; remove the remaining plots
fig.delaxes(ax)
def iter_vector():
'''Return axes for nrow=1; uses single loop.'''
for i, ax in enumerate(axes):
plot_caselets(i, ax)
def iter_matrix():
'''Return axes for nrow>1; uses nested loop.'''
i = 0
for ax_row in axes:
for ax in ax_row:
plot_caselets(i, ax)
i += 1
if nrows == 1:
iter_vector()
else:
iter_matrix()
# Common Figure Labels
fig.suptitle(**suptitle_kw)
plt.rcParams.update({'font.size': 18})
plt.show()
| {
"content_hash": "cafd33d37b6277ca9f93f84910ca1a87",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 103,
"avg_line_length": 39.816192560175054,
"alnum_prop": 0.5643548032534623,
"repo_name": "par2/lamana-test",
"id": "0ed1e1b891a868b3697249fb368cc8c4f5462821",
"size": "18534",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lamana/output_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "340051"
}
],
"symlink_target": ""
} |
import json
import logging
from django.conf import settings
from minio import Minio
from minio.error import MinioException, S3Error
from urllib3.exceptions import RequestError
logger = logging.getLogger(__name__)
bucket_list = ["files", "cache"]
if settings.ENABLE_PGP:
bucket_list.append("pgp-keys")
def get_read_only_policy(bucket_name: str) -> dict:
"""
Minio policy: files are publicly readable, cache and pgp keys are private
"""
return {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {"AWS": ["*"]},
"Action": ["s3:GetBucketLocation", "s3:ListBucket"],
"Resource": ["arn:aws:s3:::" + bucket_name],
},
{
"Effect": "Allow",
"Principal": {"AWS": ["*"]},
"Action": ["s3:GetObject"],
"Resource": ["arn:aws:s3:::" + bucket_name + "/*"],
},
],
}
minio_file_bucket = settings.MINIO_PREFIX + "files"
minio_cache_bucket = settings.MINIO_PREFIX + "cache"
minio_pgp_keys_bucket = settings.MINIO_PREFIX + "pgp-keys"
def setup_minio():
minio = minio_client()
for bucket in bucket_list:
# Checking beforehand is not race-safe
try:
minio.make_bucket(settings.MINIO_PREFIX + bucket)
logger.info(f"Creating minio bucket {settings.MINIO_PREFIX + bucket}")
except MinioException:
logger.info(f"minio bucket {settings.MINIO_PREFIX + bucket} already exists")
files_bucket = settings.MINIO_PREFIX + "files"
try:
minio.set_bucket_policy(
files_bucket, json.dumps(get_read_only_policy(files_bucket))
)
except S3Error as e:
# Ignore missing backblaze API, we have set that value already
if e.message != "Backblaze B2 does not support this API call.":
raise
_minio_singleton = None
_minio_public_singleton = None
def minio_client(public: bool = False) -> Minio:
"""
If we eagerly create a minio connection, we can only allow importing inside of functions
because otherwise django autoloading will load minio and we don't even get to mocking for the tests.
"""
global _minio_singleton
global _minio_public_singleton
if not _minio_singleton:
_minio_singleton = Minio(
settings.MINIO_HOST,
access_key=settings.MINIO_ACCESS_KEY,
secret_key=settings.MINIO_SECRET_KEY,
secure=settings.MINIO_SECURE,
region=settings.MINIO_REGION,
)
if settings.MINIO_PUBLIC_HOST:
_minio_public_singleton = Minio(
settings.MINIO_PUBLIC_HOST,
access_key=settings.MINIO_ACCESS_KEY,
secret_key=settings.MINIO_SECRET_KEY,
secure=settings.MINIO_PUBLIC_SECURE,
region=settings.MINIO_REGION,
)
else:
_minio_public_singleton = None
# Give a helpful error message
try:
_minio_singleton.bucket_exists(minio_file_bucket)
except RequestError as e:
raise RuntimeError(
f"Could not reach minio at {settings.MINIO_HOST}. Please make sure that minio is working."
) from e
return _minio_singleton if not public else _minio_public_singleton
| {
"content_hash": "3af7025b6923e3c04d6abe4cb1ad8a9e",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 106,
"avg_line_length": 32.075471698113205,
"alnum_prop": 0.5947058823529412,
"repo_name": "meine-stadt-transparent/meine-stadt-transparent",
"id": "79ed402129ab3130e7a71a7f4f471f6f190a4fa3",
"size": "3400",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mainapp/functions/minio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2397"
},
{
"name": "HTML",
"bytes": "158632"
},
{
"name": "JavaScript",
"bytes": "62206"
},
{
"name": "Python",
"bytes": "601144"
},
{
"name": "SCSS",
"bytes": "40214"
},
{
"name": "Shell",
"bytes": "1363"
}
],
"symlink_target": ""
} |
from ezdxf.math import ConstructionLine, Vec2
class TestConstructionLine:
def test_is_vertical(self):
assert ConstructionLine((0, 0), (10, 0)).is_vertical is False
assert ConstructionLine((5, -5), (5, 5)).is_vertical is True
def test_left_of_line(self):
line = ConstructionLine((0, 0), (0.1, 1))
assert line.is_point_left_of_line(Vec2(-1, 0)) is True
assert line.is_point_left_of_line(Vec2(1, 0)) is False
assert line.is_point_left_of_line(Vec2(-1, -1)) is True
line = ConstructionLine((0, 0), (0, -1))
assert line.is_point_left_of_line(Vec2(1, 0)) is True
line = ConstructionLine((0, 0), (-1, 0.1))
assert line.is_point_left_of_line(Vec2(-1, 0)) is True
line = ConstructionLine((0, 0), (10, 0))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 1)) is True
assert line.is_point_left_of_line(Vec2(10, -1)) is False
line = ConstructionLine((10, 0), (0, 0))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 1)) is False
assert line.is_point_left_of_line(Vec2(10, -1)) is True
line = ConstructionLine((0, 0), (0, 10))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(0, 10)) is False
assert line.is_point_left_of_line(Vec2(1, 10)) is False
assert line.is_point_left_of_line(Vec2(-1, 10)) is True
line = ConstructionLine((0, 10), (0, 0))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(0, 10)) is False
assert line.is_point_left_of_line(Vec2(1, 10)) is True
assert line.is_point_left_of_line(Vec2(-1, 10)) is False
def test_intersect_horizontal_line(self):
line = ConstructionLine((0, 0), (10, 0))
assert line.intersect(ConstructionLine((0, 0), (10, 0))) is None
assert line.intersect(ConstructionLine((0, 1), (10, 1))) is None
assert line.intersect(ConstructionLine((0, -1), (10, 1))) == (5, 0)
assert line.intersect(ConstructionLine((5, 5), (5, -5))) == (5, 0)
assert line.intersect(ConstructionLine((5, 5), (5, 1))) is None
assert line.intersect(ConstructionLine((0, 0), (5, 5))) == (0, 0)
def test_intersect_vertical_line(self):
line = ConstructionLine((0, 0), (0, 10))
assert line.intersect(ConstructionLine((0, 0), (0, 10))) is None
assert line.intersect(ConstructionLine((1, 0), (1, 10))) is None
assert line.intersect(ConstructionLine((-1, 0), (1, 10))) == (0, 5)
assert line.intersect(ConstructionLine((-1, 0), (1, 0))) == (0, 0)
assert line.intersect(ConstructionLine((-1, 10), (1, 10))) == (0, 10)
assert line.intersect(ConstructionLine((-1, 11), (1, 11))) is None
def test_bounding_box(self):
line = ConstructionLine((0, 0), (7, 10))
bbox = line.bounding_box
assert bbox.extmin == (0, 0)
assert bbox.extmax == (7, 10)
def test_translate(self):
line = ConstructionLine((0, 0), (0, 10))
line.translate(3, 7)
assert line.start == (3, 7)
assert line.end == (3, 17)
bbox = line.bounding_box
assert bbox.extmin == (3, 7)
assert bbox.extmax == (3, 17)
| {
"content_hash": "5818e791556e4e2cb748d25fe39d30bd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 46.44736842105263,
"alnum_prop": 0.5963172804532578,
"repo_name": "mozman/ezdxf",
"id": "9f801e9b8a2d8c918dc7e3675ed7509c3b8744c0",
"size": "3595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_06_math/test_642_construction_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
} |
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1RouteStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'ingress': 'list[V1RouteIngress]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'ingress': 'ingress'
}
def __init__(self, ingress=None):
"""
V1RouteStatus - a model defined in Swagger
"""
self._ingress = ingress
@property
def ingress(self):
"""
Gets the ingress of this V1RouteStatus.
Ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready`
:return: The ingress of this V1RouteStatus.
:rtype: list[V1RouteIngress]
"""
return self._ingress
@ingress.setter
def ingress(self, ingress):
"""
Sets the ingress of this V1RouteStatus.
Ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready`
:param ingress: The ingress of this V1RouteStatus.
:type: list[V1RouteIngress]
"""
self._ingress = ingress
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1RouteStatus.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "67dc14147f4fc7d7d4bcb86c853677cd",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 193,
"avg_line_length": 28.265625,
"alnum_prop": 0.5809839690436706,
"repo_name": "detiber/lib_openshift",
"id": "2b259b6ac4eae089406dcebe20f8e16c8f943fc0",
"size": "3635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib_openshift/models/v1_route_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
# coding: utf-8
from elasticsearch import Elasticsearch, helpers
import psycopg2, pprint, sys, time, os
CHUNK_SIZE = 10000
def getEnvOrExit(var):
environment = ''
try:
environment = os.environ[var]
except:
print('[Error]: Environment variable ' + var + ' not defined.')
sys.exit(1)
return environment
dbparams = getEnvOrExit('PANTSU_DBPARAMS')
pantsu_index = getEnvOrExit('PANTSU_ELASTICSEARCH_INDEX')
torrent_tablename = getEnvOrExit('PANTSU_TORRENT_TABLENAME')
es = Elasticsearch()
pgconn = psycopg2.connect(dbparams)
cur = pgconn.cursor()
# We MUST use NO QUERY CACHE because the values are insert on triggers and
# not through pgppool.
cur.execute('/*NO QUERY CACHE*/ SELECT reindex_torrents_id, torrent_id, action FROM reindex_{torrent_tablename}'.format(torrent_tablename=torrent_tablename))
fetches = cur.fetchmany(CHUNK_SIZE)
while fetches:
actions = list()
delete_cur = pgconn.cursor()
for reindex_id, torrent_id, action in fetches:
new_action = {
'_op_type': action,
'_index': pantsu_index,
'_type': 'torrents',
'_id': torrent_id
}
if action == 'index':
select_cur = pgconn.cursor()
select_cur.execute("""SELECT torrent_id, torrent_name, description, hidden, category, sub_category, status,
torrent_hash, date, uploader, downloads, filesize, seeders, leechers, completed, language
FROM {torrent_tablename}
WHERE torrent_id = {torrent_id}""".format(torrent_id=torrent_id, torrent_tablename=torrent_tablename))
torrent_id, torrent_name, description, hidden, category, sub_category, status, torrent_hash, date, uploader, downloads, filesize, seeders, leechers, completed, language = select_cur.fetchone()
doc = {
'id': torrent_id,
'name': torrent_name.decode('utf-8'),
'category': str(category),
'sub_category': str(sub_category),
'status': status,
'hidden': hidden,
'description': description,
'hash': torrent_hash,
'date': date,
'uploader_id': uploader,
'downloads': downloads,
'filesize': filesize,
'seeders': seeders,
'leechers': leechers,
'completed': completed,
'language': language
}
new_action['_source'] = doc
select_cur.close()
delete_cur.execute('DELETE FROM reindex_{torrent_tablename} WHERE reindex_torrents_id = {reindex_id}'.format(reindex_id=reindex_id,torrent_tablename=torrent_tablename))
actions.append(new_action)
pgconn.commit() # Commit the deletes transaction
delete_cur.close()
helpers.bulk(es, actions, chunk_size=CHUNK_SIZE, request_timeout=120)
del(fetches)
fetches = cur.fetchmany(CHUNK_SIZE)
cur.close()
pgconn.close()
| {
"content_hash": "0fa489b98a99c58fb20f4c7cdb3b13d2",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 204,
"avg_line_length": 40.66216216216216,
"alnum_prop": 0.6134928547690263,
"repo_name": "Nutjob/nyaa",
"id": "0b8874060834ce3f506c402a9623a9425385e33c",
"size": "3009",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "deploy/ansible/roles/elasticsearch/files/reindex_nyaapantsu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30872"
},
{
"name": "Go",
"bytes": "371174"
},
{
"name": "HTML",
"bytes": "123082"
},
{
"name": "JavaScript",
"bytes": "30265"
},
{
"name": "Python",
"bytes": "5107"
},
{
"name": "Shell",
"bytes": "2834"
}
],
"symlink_target": ""
} |
"""Utilities for all our deps-management stuff."""
import os
import shutil
import sys
import subprocess
import tarfile
import time
import zipfile
def RunSubprocessWithRetry(cmd):
"""Invokes the subprocess and backs off exponentially on fail."""
for i in range(5):
try:
subprocess.check_call(cmd)
return
except subprocess.CalledProcessError as exception:
backoff = pow(2, i)
print 'Got %s, retrying in %d seconds...' % (exception, backoff)
time.sleep(backoff)
print 'Giving up.'
raise exception
def DownloadFilesFromGoogleStorage(path, auto_platform=True):
print 'Downloading files in %s...' % path
extension = 'bat' if 'win32' in sys.platform else 'py'
cmd = ['download_from_google_storage.%s' % extension,
'--bucket=chromium-webrtc-resources',
'--directory', path]
if auto_platform:
cmd += ['--auto_platform', '--recursive']
subprocess.check_call(cmd)
# Code partially copied from
# https://cs.chromium.org#chromium/build/scripts/common/chromium_utils.py
def RemoveDirectory(*path):
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at *path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
file_path = os.path.join(*path)
print 'Deleting `{}`.'.format(file_path)
if not os.path.exists(file_path):
print '`{}` does not exist.'.format(file_path)
return
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
file_path = os.path.normcase(file_path)
for _ in xrange(3):
print 'RemoveDirectory running %s' % (' '.join(
['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))
if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]):
break
print ' Failed'
time.sleep(3)
return
else:
shutil.rmtree(file_path, ignore_errors=True)
def UnpackArchiveTo(archive_path, output_dir):
extension = os.path.splitext(archive_path)[1]
if extension == '.zip':
_UnzipArchiveTo(archive_path, output_dir)
else:
_UntarArchiveTo(archive_path, output_dir)
def _UnzipArchiveTo(archive_path, output_dir):
print 'Unzipping {} in {}.'.format(archive_path, output_dir)
zip_file = zipfile.ZipFile(archive_path)
try:
zip_file.extractall(output_dir)
finally:
zip_file.close()
def _UntarArchiveTo(archive_path, output_dir):
print 'Untarring {} in {}.'.format(archive_path, output_dir)
tar_file = tarfile.open(archive_path, 'r:gz')
try:
tar_file.extractall(output_dir)
finally:
tar_file.close()
def GetPlatform():
if sys.platform.startswith('win'):
return 'win'
if sys.platform.startswith('linux'):
return 'linux'
if sys.platform.startswith('darwin'):
return 'mac'
raise Exception("Can't run on platform %s." % sys.platform)
def GetExecutableExtension():
return '.exe' if GetPlatform() == 'win' else ''
| {
"content_hash": "e2d1cdb1270da4f60f40b22b5d9ac481",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 29.704347826086956,
"alnum_prop": 0.6759367681498829,
"repo_name": "koobonil/Boss2D",
"id": "e0f679d6b51ed53b14fbf57ec60a16aaa5168dd2",
"size": "3832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Boss2D/addon/webrtc-jumpingyang001_for_boss/rtc_tools/testing/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "4820445"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "89930"
},
{
"name": "C",
"bytes": "119747922"
},
{
"name": "C#",
"bytes": "87505"
},
{
"name": "C++",
"bytes": "272329620"
},
{
"name": "CMake",
"bytes": "1199656"
},
{
"name": "CSS",
"bytes": "42679"
},
{
"name": "Clojure",
"bytes": "1487"
},
{
"name": "Cuda",
"bytes": "1651996"
},
{
"name": "DIGITAL Command Language",
"bytes": "239527"
},
{
"name": "Dockerfile",
"bytes": "9638"
},
{
"name": "Emacs Lisp",
"bytes": "15570"
},
{
"name": "Go",
"bytes": "858185"
},
{
"name": "HLSL",
"bytes": "3314"
},
{
"name": "HTML",
"bytes": "2958385"
},
{
"name": "Java",
"bytes": "2921052"
},
{
"name": "JavaScript",
"bytes": "178190"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "M4",
"bytes": "775724"
},
{
"name": "MATLAB",
"bytes": "74606"
},
{
"name": "Makefile",
"bytes": "3941551"
},
{
"name": "Meson",
"bytes": "2847"
},
{
"name": "Module Management System",
"bytes": "2626"
},
{
"name": "NSIS",
"bytes": "4505"
},
{
"name": "Objective-C",
"bytes": "4090702"
},
{
"name": "Objective-C++",
"bytes": "1702390"
},
{
"name": "PHP",
"bytes": "3530"
},
{
"name": "Perl",
"bytes": "11096338"
},
{
"name": "Perl 6",
"bytes": "11802"
},
{
"name": "PowerShell",
"bytes": "38571"
},
{
"name": "Python",
"bytes": "24123805"
},
{
"name": "QMake",
"bytes": "18188"
},
{
"name": "Roff",
"bytes": "1261269"
},
{
"name": "Ruby",
"bytes": "5890"
},
{
"name": "Scala",
"bytes": "5683"
},
{
"name": "Shell",
"bytes": "2879948"
},
{
"name": "TeX",
"bytes": "243507"
},
{
"name": "TypeScript",
"bytes": "1593696"
},
{
"name": "Verilog",
"bytes": "1215"
},
{
"name": "Vim Script",
"bytes": "3759"
},
{
"name": "Visual Basic",
"bytes": "16186"
},
{
"name": "eC",
"bytes": "9705"
}
],
"symlink_target": ""
} |
import base64
import boto.sts
import configparser
import logging
import os
import requests
import xml.etree.ElementTree as ET
import sys
import traceback
from bs4 import BeautifulSoup
from kerb_sts.awsrole import AWSRole
class DefaultRoleCredsGenerationError(Exception):
pass
class KerberosHandler:
"""
The KerberosHandler sends a request to an IdP endpoint. The handler can either use Kerberos auth
or can also be configured with a username and password and use NTLM auth. This handler takes
the SAML response from the IdP and parses out the available AWS IAM roles that the
user can assume. The handler then reaches out to AWS and generates temporary tokens for each of
the roles that the user can assume.
"""
def __init__(self):
"""
Creates a new KerberosHandler object
"""
self.output_format = 'json'
self.ssl_verification = True
def handle_sts_by_kerberos(self, region, url, credentials_filename, config_filename,
only_role, default_role, list_only, authenticator):
"""
Entry point for generating a set of temporary tokens from AWS.
:param region: The AWS region tokens are being requested for
:param url: The URL of the IdP endpoint to auth against
:param credentials_filename: Where should the tokens be written to
:param config_filename: Where should the region/format be written to
:param default_role: Which IAM role should be set as the default in the config file
:param list_only: If set, the IAM roles available will just be printed instead of assumed
:param authenticator: the Authenticator
"""
session = requests.Session()
headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko'}
response = session.get(
url,
verify=self.ssl_verification,
headers=headers,
auth=authenticator.get_auth_handler(session)
)
logging.debug("received {} IdP response".format(response.status_code))
if response.status_code != requests.codes.ok:
raise Exception(
"did not get a valid reply. response was: {} {}".format(response.status_code, response.text)
)
# We got a successful response from the IdP. Parse the assertion and pass it to AWS
self._handle_sts_from_response(response, region, credentials_filename, config_filename, only_role, default_role, list_only)
def _handle_sts_from_response(self, response, region, credentials_filename, config_filename, only_role, default_role, list_only):
"""
Takes a successful SAML response, parses it for valid AWS IAM roles, and then reaches out to
AWS and requests temporary tokens for each of the IAM roles.
:param response: The SAML response from a previous request to IdP endpoint
:param region: The AWS region tokens are being requested for
:param credentials_filename: Where should the region/format be written to
:param config_filename: Where should the tokens be written to
:param default_role: Which IAM role should be as as the default in the config file
:param list_only: If set, the IAM roles available will just be printed instead of assumed
"""
soup = BeautifulSoup(response.text, 'html.parser')
# Look for the SAMLResponse attribute of the input tag (determined by
# analyzing the debug print lines above)
assertion = None
for inputtag in soup.find_all('input'):
if inputtag.get('name') == 'SAMLResponse':
assertion = inputtag.get('value')
if not assertion:
raise Exception("did not get a valid SAML response. response was:\n%s" % response.text)
# Parse the returned assertion and extract the authorized roles
aws_roles = []
root = ET.fromstring(base64.b64decode(assertion))
for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):
if saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role':
for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):
aws_roles.append(saml2attributevalue.text)
if not aws_roles:
raise Exception("user does not have any valid aws roles.")
# Note the format of the attribute value should be role_arn,principal_arn
# but lots of blogs list it as principal_arn,role_arn so let's reverse
# them if needed
for aws_role in aws_roles:
chunks = aws_role.split(',')
if 'saml-provider' in chunks[0]:
new_aws_role = chunks[1] + ',' + chunks[0]
index = aws_roles.index(aws_role)
aws_roles.insert(index, new_aws_role)
aws_roles.remove(aws_role)
# If the user supplied to a default role make sure
# that role is available to them.
if default_role is not None:
found_default_role = False
for aws_role in aws_roles:
name = AWSRole(aws_role).name
if name == default_role:
found_default_role = True
break
if not found_default_role:
raise Exception("provided default role not found in list of available roles")
# Go through each of the available roles and
# attempt to get temporary tokens for each
try:
for aws_role in aws_roles:
profile = AWSRole(aws_role).name
if list_only:
logging.info("role: {}".format(profile))
elif only_role is not None:
if only_role == profile:
self._generate_credentials_for_role(region, credentials_filename, config_filename, default_role, assertion, aws_role, profile)
else:
logging.debug("skipping role {}; --only_role specified".format(profile))
else:
self._generate_credentials_for_role(region, credentials_filename, config_filename, default_role, assertion, aws_role, profile)
except DefaultRoleCredsGenerationError:
sys.exit(1)
def _generate_credentials_for_role(self, region, credentials_filename, config_filename, default_role, assertion, aws_role, profile):
try:
token = self._bind_assertion_to_role(assertion, aws_role, profile,
region, credentials_filename, config_filename, default_role)
if not token:
raise Exception('did not receive a valid token from aws')
expires_utc = token.credentials.expiration
if default_role == profile:
logging.info("default role: {} until {}".format(profile, expires_utc))
else:
logging.info("role: {} until {}".format(profile, expires_utc))
except Exception as ex:
if default_role == profile:
logging.error(
'failed to save temporary credentials for default role {}: {}'.format(profile, ex)
)
raise DefaultRoleCredsGenerationError('failed to save temporary credentials for default role {}: {}'.format(profile, ex))
else:
logging.warning('failed to save temporary credentials for role {}'.format(profile))
logging.debug(traceback.format_exc())
logging.debug(sys.exc_info()[0])
def _bind_assertion_to_role(self, assertion, role, profile, region,
credentials_filename, config_filename, default_role):
"""
Attempts to assume an IAM role using a given SAML assertion.
:param assertion: A SAML assertion authenticating the user
:param role: The IAM role being assumed
:param profile: The name of the role
:param region: The region the role is being assumed in
:param credentials_filename: Output file for the regions/formats for each profile
:param config_filename: Output file for the generated tokens
:param default_role: Which role should be set as default in the config
:return token: A valid token with temporary IAM credentials
"""
# Attempt to assume the IAM role
conn = boto.sts.connect_to_region(region, aws_secret_access_key='', aws_access_key_id='')
role_arn = role.split(',')[0]
principal_arn = role.split(',')[1]
token = conn.assume_role_with_saml(role_arn, principal_arn, assertion)
if not token:
raise Exception("failed to receive a valid token when assuming a role.")
# Write the AWS STS token into the AWS credential file
# Read in the existing config file
default_section = 'default'
credentials_config = configparser.RawConfigParser(default_section=default_section)
credentials_config.read(credentials_filename)
config = configparser.RawConfigParser(default_section=default_section)
config.read(config_filename)
# If the default_role was passed in on the command line we will overwrite
# the [default] section of the credentials file
sections = []
if default_role == profile:
sections.append(default_section)
# Make sure the section exists
if not credentials_config.has_section(profile):
credentials_config.add_section(profile)
if not config.has_section(profile):
config.add_section(profile)
sections.append(profile)
for section in sections:
self._set_config_section(credentials_config,
section,
output=self.output_format,
region=region,
aws_role_arn=role_arn,
aws_access_key_id=token.credentials.access_key,
aws_secret_access_key=token.credentials.secret_key,
aws_session_token=token.credentials.session_token,
aws_security_token=token.credentials.session_token,
aws_session_expires_utc=token.credentials.expiration)
self._set_config_section(config, section, output=self.output_format, region=region)
# Write the updated config file
if not os.path.exists(os.path.dirname(credentials_filename)):
try:
os.makedirs(os.path.dirname(credentials_filename))
except OSError as ex:
raise Exception("could not create credential file directory")
if not os.path.exists(os.path.dirname(config_filename)):
try:
os.makedirs(os.path.dirname(config_filename))
except OSError as ex:
raise Exception("could not create config file directory")
with open(credentials_filename, 'w+') as fp:
credentials_config.write(fp)
with open(config_filename, 'w+') as fp:
config.write(fp)
return token
@staticmethod
def _set_config_section(config, section, **kwargs):
"""
Set the configuration section in the file with the properties given. The section
must exist before calling this method.
:param config: the configuration object
:param section: the name of the section
:param kwargs: the key value pairs to put into the section
:return: Nothing
"""
for name, value in kwargs.items():
config.set(section, name, value)
| {
"content_hash": "1cc60a94eb46fefe83437c6a0b75e1a8",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 150,
"avg_line_length": 46.275193798449614,
"alnum_prop": 0.6180584638579446,
"repo_name": "commercehub-oss/kerb-sts",
"id": "4562f09d4a97ecf9f4ffd59d1ba47f84b25efd0a",
"size": "12527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kerb_sts/handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38485"
}
],
"symlink_target": ""
} |
import math
# Runs the Chebyshev algorithm up to 30 times, increasing degree n until the guess is sufficiently close.
# Outputs the calculated Chebyshev value, the degree of the polynomial where the best guess was calculated,
# and the actual value from the function.
# Example usage: chebyshev(-1, 1, 0.5, math.sin)
# Example output:
# Best Chebyshev value: 0.4796783062232612
# Degree of accurate guess: 4
# Actual function value: 0.479425538604203
def chebyshev(a, b, x, func):
actual_value = func(x)
for n in range(1, 30):
# Calculate roots using 1/2 * b - a and 1/2 b + a
bma = 0.5 * (b - a)
bpa = 0.5 * (b + a)
try:
f = [func(math.cos(math.pi * (k + 0.5) / n) * bma + bpa) for k in range(n)]
except ValueError:
print('Invalid interval. Make sure the function can support negative values.')
return
fac = 2.0 / n
c = [fac * sum([f[k] * math.cos(math.pi * j * (k + 0.5) / n)
for k in range(n)]) for j in range(n)]
if not a <= x <= b:
print('Invalid input. a <= x <= b')
return
y = (2.0 * x - a - b) * (1.0 / (b - a))
y2 = 2.0 * y
(d, dd) = (c[-1], 0) # Special case first step for efficiency
for cj in c[-2:0:-1]: # Clenshaw's recurrence
(d, dd) = (y2 * d - dd + cj, d)
# Calculate the guess
guess = y * d - dd + 0.5 * c[0]
# Check if it's close
if math.isclose(actual_value, guess, rel_tol=0.001):
print('Best Chebyshev value: ' + str(guess))
print('Degree of accurate guess: ' + str(n))
print('Actual function value: ' + str(func(x)))
return
elif n == 30:
print('Error: Could not solve within 30 degrees.')
| {
"content_hash": "31e4d74cc8a39587131d8acbd1356160",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 107,
"avg_line_length": 36.66,
"alnum_prop": 0.5384615384615384,
"repo_name": "protocol114/numerical-2.2",
"id": "9a24b0a70cb9e443bbbd4c5a640d662f0a6fbc50",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/chebyshev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43414"
}
],
"symlink_target": ""
} |
from dspl2.rdfutil import (LoadGraph, FrameGraph, SelectFromGraph)
from io import StringIO
import json
import rdflib
import rdflib.compare
import unittest
_SampleJson = '''{
"@context": "http://schema.org",
"@type": "StatisticalDataset",
"@id": "",
"url": "https://data.europa.eu/euodp/en/data/dataset/bAzn6fiusnRFOBwUeIo78w",
"identifier": "met_d3dens",
"name": "Eurostat Population Density",
"description": "Population density by metropolitan regions",
"dateCreated": "2015-10-16",
"dateModified": "2019-06-18",
"temporalCoverage": "1990-01-01/2016-01-01",
"distribution": {
"@type": "DataDownload",
"contentUrl": "http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?file=data/met_d3dens.tsv.gz&unzip=true",
"encodingFormat": "text/tab-separated-values"
},
"spatialCoverage":{
"@type":"Place",
"geo":{
"@type":"GeoShape",
"name": "European Union",
"box":"34.633285 -10.468556 70.096054 34.597916"
}
},
"license": "https://ec.europa.eu/eurostat/about/policies/copyright",
"creator":{
"@type":"Organization",
"url": "https://ec.europa.eu/eurostat",
"name":"Eurostat"
},
"publisher": {
"@type": "Organization",
"name": "Eurostat",
"url": "https://ec.europa.eu/eurostat",
"contactPoint": {
"@type": "ContactPoint",
"contactType": "User Support",
"url": "https://ec.europa.eu/eurostat/help/support"
}
}
}'''
class RdfUtilTests(unittest.TestCase):
def test_LoadGraph(self):
graph1 = LoadGraph(_SampleJson, '')
graph2 = LoadGraph(json.loads(_SampleJson), '')
graph3 = LoadGraph(StringIO(_SampleJson), '')
self.assertTrue(rdflib.compare.isomorphic(graph1, graph2))
self.assertTrue(rdflib.compare.isomorphic(graph1, graph3))
def test_FrameGraph(self):
json_val = FrameGraph(LoadGraph(_SampleJson, ''))
self.assertEqual(json_val['@context'], 'http://schema.org')
self.assertEqual(json_val['@type'], 'StatisticalDataset')
self.assertEqual(json_val['url'], 'https://data.europa.eu/euodp/en/data/dataset/bAzn6fiusnRFOBwUeIo78w')
self.assertEqual(json_val['identifier'], 'met_d3dens')
self.assertEqual(json_val['name'], 'Eurostat Population Density')
self.assertEqual(json_val['description'], 'Population density by metropolitan regions')
self.assertEqual(json_val['dateCreated'], '2015-10-16')
self.assertEqual(json_val['dateModified'], '2019-06-18')
self.assertEqual(json_val['temporalCoverage'], '1990-01-01/2016-01-01')
self.assertEqual(json_val['distribution']['@type'], 'DataDownload')
self.assertEqual(json_val['distribution']['contentUrl'], 'http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?file=data/met_d3dens.tsv.gz&unzip=true')
self.assertEqual(json_val['distribution']['encodingFormat'], 'text/tab-separated-values')
self.assertEqual(json_val['spatialCoverage']['@type'], "Place")
self.assertEqual(json_val['spatialCoverage']['geo']['@type'], "GeoShape")
self.assertEqual(json_val['spatialCoverage']['geo']['name'], 'European Union')
self.assertEqual(json_val['spatialCoverage']['geo']['box'], '34.633285 -10.468556 70.096054 34.597916')
self.assertEqual(json_val['license'], 'https://ec.europa.eu/eurostat/about/policies/copyright')
self.assertEqual(json_val['creator']['@type'], "Organization")
self.assertEqual(json_val['creator']['url'], 'https://ec.europa.eu/eurostat')
self.assertEqual(json_val['creator']['name'], 'Eurostat')
self.assertEqual(json_val['publisher']['@type'], 'Organization')
self.assertEqual(json_val['publisher']['name'], 'Eurostat')
self.assertEqual(json_val['publisher']['url'], 'https://ec.europa.eu/eurostat')
self.assertEqual(json_val['publisher']['contactPoint']['@type'], 'ContactPoint')
self.assertEqual(json_val['publisher']['contactPoint']['contactType'], 'User Support')
self.assertEqual(json_val['publisher']['contactPoint']['url'], 'https://ec.europa.eu/eurostat/help/support')
def test_SelectFromGraph(self):
graph = LoadGraph(_SampleJson, '')
results = list(SelectFromGraph(
graph,
('?ds', 'rdf:type', 'schema:StatisticalDataset'),
('?ds', 'schema:name', '?name')))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['name'], 'Eurostat Population Density')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1621c35c489e6bec8705b1dbfb1f1131",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 178,
"avg_line_length": 44.21,
"alnum_prop": 0.6760913820402624,
"repo_name": "google/dspl",
"id": "4ed458a8a8c035fc078eca93277e56baa4eafa6e",
"size": "4421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/dspl2/dspl2/tests/test_rdfutil.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1192"
},
{
"name": "HTML",
"bytes": "3718"
},
{
"name": "JavaScript",
"bytes": "6612"
},
{
"name": "Python",
"bytes": "269698"
}
],
"symlink_target": ""
} |
"""Example of using two different training methods at once in multi-agent.
Here we create a number of CartPole agents, some of which are trained with
DQN, and some of which are trained with PPO. We periodically sync weights
between the two trainers (note that no such syncing is needed when using just
a single training method).
For a simpler example, see also: multiagent_cartpole.py
"""
import argparse
import gym
import os
import ray
from ray.rllib.agents.dqn import DQNTrainer, DQNTFPolicy, DQNTorchPolicy
from ray.rllib.agents.ppo import PPOTrainer, PPOTFPolicy, PPOTorchPolicy
from ray.rllib.examples.env.multi_agent import MultiAgentCartPole
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
parser = argparse.ArgumentParser()
# Use torch for both policies.
parser.add_argument("--torch", action="store_true")
# Mix PPO=tf and DQN=torch if set.
parser.add_argument("--mixed-torch-tf", action="store_true")
parser.add_argument("--as-test", action="store_true")
parser.add_argument("--stop-iters", type=int, default=20)
parser.add_argument("--stop-reward", type=float, default=50)
parser.add_argument("--stop-timesteps", type=int, default=100000)
if __name__ == "__main__":
args = parser.parse_args()
assert not (args.torch and args.mixed_torch_tf),\
"Use either --torch or --mixed-torch-tf, not both!"
ray.init()
# Simple environment with 4 independent cartpole entities
register_env("multi_agent_cartpole",
lambda _: MultiAgentCartPole({"num_agents": 4}))
single_dummy_env = gym.make("CartPole-v0")
obs_space = single_dummy_env.observation_space
act_space = single_dummy_env.action_space
# You can also have multiple policies per trainer, but here we just
# show one each for PPO and DQN.
policies = {
"ppo_policy": (PPOTorchPolicy if args.torch else PPOTFPolicy,
obs_space, act_space, {}),
"dqn_policy": (DQNTorchPolicy if args.torch or args.mixed_torch_tf else
DQNTFPolicy, obs_space, act_space, {}),
}
def policy_mapping_fn(agent_id):
if agent_id % 2 == 0:
return "ppo_policy"
else:
return "dqn_policy"
ppo_trainer = PPOTrainer(
env="multi_agent_cartpole",
config={
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": ["ppo_policy"],
},
"explore": False,
# disable filters, otherwise we would need to synchronize those
# as well to the DQN agent
"observation_filter": "NoFilter",
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": "torch" if args.torch else "tf",
})
dqn_trainer = DQNTrainer(
env="multi_agent_cartpole",
config={
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": ["dqn_policy"],
},
"gamma": 0.95,
"n_step": 3,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": "torch" if args.torch or args.mixed_torch_tf else "tf"
})
# You should see both the printed X and Y approach 200 as this trains:
# info:
# policy_reward_mean:
# dqn_policy: X
# ppo_policy: Y
for i in range(args.stop_iters):
print("== Iteration", i, "==")
# improve the DQN policy
print("-- DQN --")
result_dqn = dqn_trainer.train()
print(pretty_print(result_dqn))
# improve the PPO policy
print("-- PPO --")
result_ppo = ppo_trainer.train()
print(pretty_print(result_ppo))
# Test passed gracefully.
if args.as_test and \
result_dqn["episode_reward_mean"] > args.stop_reward and \
result_ppo["episode_reward_mean"] > args.stop_reward:
print("test passed (both agents above requested reward)")
quit(0)
# swap weights to synchronize
dqn_trainer.set_weights(ppo_trainer.get_weights(["ppo_policy"]))
ppo_trainer.set_weights(dqn_trainer.get_weights(["dqn_policy"]))
# Desired reward not reached.
if args.as_test:
raise ValueError("Desired reward ({}) not reached!".format(
args.stop_reward))
| {
"content_hash": "8a3eae90a99fbd8ff7e473008e03a29c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 36.92,
"alnum_prop": 0.609967497291441,
"repo_name": "richardliaw/ray",
"id": "08d354d882d4a1bccaee840d332936d94440f4d1",
"size": "4615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/examples/multi_agent_two_trainers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
"""Helpers for components that manage entities."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
from datetime import timedelta
from itertools import chain
import logging
from types import ModuleType
from typing import Any, Callable
import voluptuous as vol
from homeassistant import config as conf_util
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ENTITY_NAMESPACE,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
config_per_platform,
config_validation as cv,
discovery,
entity,
service,
)
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.loader import async_get_integration, bind_hass
from homeassistant.setup import async_prepare_setup_platform
from .entity_platform import EntityPlatform
DEFAULT_SCAN_INTERVAL = timedelta(seconds=15)
DATA_INSTANCES = "entity_components"
@bind_hass
async def async_update_entity(hass: HomeAssistant, entity_id: str) -> None:
"""Trigger an update for an entity."""
domain = entity_id.split(".", 1)[0]
entity_comp = hass.data.get(DATA_INSTANCES, {}).get(domain)
if entity_comp is None:
logging.getLogger(__name__).warning(
"Forced update failed. Component for %s not loaded.", entity_id
)
return
entity_obj = entity_comp.get_entity(entity_id)
if entity_obj is None:
logging.getLogger(__name__).warning(
"Forced update failed. Entity %s not found.", entity_id
)
return
await entity_obj.async_update_ha_state(True)
class EntityComponent:
"""The EntityComponent manages platforms that manages entities.
This class has the following responsibilities:
- Process the configuration and set up a platform based component.
- Manage the platforms and their entities.
- Help extract the entities from a service call.
- Listen for discovery events for platforms related to the domain.
"""
def __init__(
self,
logger: logging.Logger,
domain: str,
hass: HomeAssistant,
scan_interval: timedelta = DEFAULT_SCAN_INTERVAL,
) -> None:
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.scan_interval = scan_interval
self.config: ConfigType | None = None
self._platforms: dict[
str | tuple[str, timedelta | None, str | None], EntityPlatform
] = {domain: self._async_init_entity_platform(domain, None)}
self.async_add_entities = self._platforms[domain].async_add_entities
self.add_entities = self._platforms[domain].add_entities
hass.data.setdefault(DATA_INSTANCES, {})[domain] = self
@property
def entities(self) -> Iterable[entity.Entity]:
"""Return an iterable that returns all entities."""
return chain.from_iterable(
platform.entities.values() for platform in self._platforms.values()
)
def get_entity(self, entity_id: str) -> entity.Entity | None:
"""Get an entity."""
for platform in self._platforms.values():
entity_obj = platform.entities.get(entity_id)
if entity_obj is not None:
return entity_obj
return None
def setup(self, config: ConfigType) -> None:
"""Set up a full entity component.
This doesn't block the executor to protect from deadlocks.
"""
self.hass.add_job(self.async_setup(config)) # type: ignore
async def async_setup(self, config: ConfigType) -> None:
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
This method must be run in the event loop.
"""
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_shutdown)
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
for p_type, p_config in config_per_platform(config, self.domain):
self.hass.async_create_task(self.async_setup_platform(p_type, p_config))
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.helpers.discovery.async_load_platform()
async def component_platform_discovered(
platform: str, info: dict[str, Any] | None
) -> None:
"""Handle the loading of a platform."""
await self.async_setup_platform(platform, {}, info)
discovery.async_listen_platform(
self.hass, self.domain, component_platform_discovered
)
async def async_setup_entry(self, config_entry: ConfigEntry) -> bool:
"""Set up a config entry."""
platform_type = config_entry.domain
platform = await async_prepare_setup_platform(
self.hass,
# In future PR we should make hass_config part of the constructor
# params.
self.config or {},
self.domain,
platform_type,
)
if platform is None:
return False
key = config_entry.entry_id
if key in self._platforms:
raise ValueError("Config entry has already been setup!")
self._platforms[key] = self._async_init_entity_platform(
platform_type,
platform,
scan_interval=getattr(platform, "SCAN_INTERVAL", None),
)
return await self._platforms[key].async_setup_entry(config_entry)
async def async_unload_entry(self, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
key = config_entry.entry_id
platform = self._platforms.pop(key, None)
if platform is None:
raise ValueError("Config entry was never loaded!")
await platform.async_reset()
return True
async def async_extract_from_service(
self, service_call: ServiceCall, expand_group: bool = True
) -> list[entity.Entity]:
"""Extract all known and available entities from a service call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
return await service.async_extract_entities(
self.hass, self.entities, service_call, expand_group
)
@callback
def async_register_entity_service(
self,
name: str,
schema: dict[str, Any] | vol.Schema,
func: str | Callable[..., Any],
required_features: list[int] | None = None,
) -> None:
"""Register an entity service."""
if isinstance(schema, dict):
schema = cv.make_entity_service_schema(schema)
async def handle_service(call: Callable) -> None:
"""Handle the service."""
await self.hass.helpers.service.entity_service_call(
self._platforms.values(), func, call, required_features
)
self.hass.services.async_register(self.domain, name, handle_service, schema)
async def async_setup_platform(
self,
platform_type: str,
platform_config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a platform for this component."""
if self.config is None:
raise RuntimeError("async_setup needs to be called first")
platform = await async_prepare_setup_platform(
self.hass, self.config, self.domain, platform_type
)
if platform is None:
return
# Use config scan interval, fallback to platform if none set
scan_interval = platform_config.get(
CONF_SCAN_INTERVAL, getattr(platform, "SCAN_INTERVAL", None)
)
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
self._platforms[key] = self._async_init_entity_platform(
platform_type, platform, scan_interval, entity_namespace
)
await self._platforms[key].async_setup(platform_config, discovery_info)
async def _async_reset(self) -> None:
"""Remove entities and reset the entity component to initial values.
This method must be run in the event loop.
"""
tasks = []
for key, platform in self._platforms.items():
if key == self.domain:
tasks.append(platform.async_reset())
else:
tasks.append(platform.async_destroy())
if tasks:
await asyncio.gather(*tasks)
self._platforms = {self.domain: self._platforms[self.domain]}
self.config = None
async def async_remove_entity(self, entity_id: str) -> None:
"""Remove an entity managed by one of the platforms."""
found = None
for platform in self._platforms.values():
if entity_id in platform.entities:
found = platform
break
if found:
await found.async_remove_entity(entity_id)
async def async_prepare_reload(
self, *, skip_reset: bool = False
) -> ConfigType | None:
"""Prepare reloading this entity component.
This method must be run in the event loop.
"""
try:
conf = await conf_util.async_hass_config_yaml(self.hass)
except HomeAssistantError as err:
self.logger.error(err)
return None
integration = await async_get_integration(self.hass, self.domain)
processed_conf = await conf_util.async_process_component_config(
self.hass, conf, integration
)
if processed_conf is None:
return None
if not skip_reset:
await self._async_reset()
return processed_conf
@callback
def _async_init_entity_platform(
self,
platform_type: str,
platform: ModuleType | None,
scan_interval: timedelta | None = None,
entity_namespace: str | None = None,
) -> EntityPlatform:
"""Initialize an entity platform."""
if scan_interval is None:
scan_interval = self.scan_interval
return EntityPlatform(
hass=self.hass,
logger=self.logger,
domain=self.domain,
platform_name=platform_type,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
async def _async_shutdown(self, event: Event) -> None:
"""Call when Home Assistant is stopping."""
await asyncio.gather(
*[platform.async_shutdown() for platform in chain(self._platforms.values())]
)
| {
"content_hash": "495fca77d6a0e82a68fc1fe0d5292235",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 88,
"avg_line_length": 33.07761194029851,
"alnum_prop": 0.6239509069578558,
"repo_name": "kennedyshead/home-assistant",
"id": "37c0a7620abc300c7d07650745a1f07c6728c3bd",
"size": "11081",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/entity_component.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
app_name = "tweet_monitor"
urlpatterns = [
# React view
url(r'^process_add_handle/$', views.add_handle, name='add_handle'),
# url(r'^$', TemplateView.as_view(template_name='tweet_monitor/react_index.html'), name='index'),
url(r'^$', views.index, name='index'),
# Pure Django views
# url(r'^/$', views.index, name='index2'),
# url(r'^list/$', views.TweetsView.as_view(), name='tweets_list'),
# url(r'^add_handle/$', views.add_handle, name='add_handle'),
# url(r'^filters/$', views.filters, name='filters'),
# url(r'^process_filter/$', views.process_filter, name='process_filter'),
# DRF views
url(r'^filters/user/(?P<username>\S+)/$', views.UserTweetsView.as_view(), name='tweets_by_username'),
url(r'^filters/date/(?P<date>.+)/$', views.DateRangeTweetsView.as_view(), name='tweets_by_date'),
url(r'^filters/text/(?P<text>\S+)/$', views.TextTweetsView.as_view(), name='tweets_by_text'),
url(r'^filters/hashtag/(?P<hashtag>\S+)/$', views.HashtagTweetsView.as_view(), name='tweets_by_hashtag'),
url(r'^list_hashtags/$', views.HashtagsView.as_view(), name='hashtags_list'),
url(r'^fetch/$', views.FetchTweetsView.as_view(), name='fetch_tweets'),
url(r'^.*/', TemplateView.as_view(template_name="tweet_monitor/react_index.html"), name='react_base'),
]
| {
"content_hash": "f663b5d6a80f68bc645f8eac8c01920d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 109,
"avg_line_length": 45.96774193548387,
"alnum_prop": 0.6498245614035087,
"repo_name": "lucabezerra/VinTwitta",
"id": "fef109b81568ce9b6286418aadbf312d89653ef0",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweet_monitor/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1115"
},
{
"name": "HTML",
"bytes": "5555"
},
{
"name": "JavaScript",
"bytes": "18996"
},
{
"name": "Makefile",
"bytes": "253"
},
{
"name": "Python",
"bytes": "39904"
},
{
"name": "Shell",
"bytes": "1197"
}
],
"symlink_target": ""
} |
__author__ = 'huqinghua'
# coding=gbk
import string, os, commands, time
class CommonUtils():
"""·ÖÏíÓÃsession"""
exeRoot = ''
@classmethod
def SaveExePath(cls):
cls.exeRoot = os.path.abspath(os.curdir)
@classmethod
def ReverseToExePath(cls):
os.chdir(cls.exeRoot) | {
"content_hash": "391e9ba0a3cf9ce64d26085f16449c30",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 20.125,
"alnum_prop": 0.6024844720496895,
"repo_name": "the7day/py-ui4win",
"id": "1ff377effcea4a38131e0125678a60157ab672d8",
"size": "322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/CommonUtil.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "316"
},
{
"name": "C",
"bytes": "657767"
},
{
"name": "C++",
"bytes": "1637556"
},
{
"name": "CMake",
"bytes": "2876"
},
{
"name": "Python",
"bytes": "56423"
}
],
"symlink_target": ""
} |
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_STACKDRIVER
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_STACKDRIVER)
class GCPTextToSpeechExampleDagSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_STACKDRIVER)
def test_run_example_dag(self):
self.run_dag("example_stackdriver", CLOUD_DAG_FOLDER)
| {
"content_hash": "6035b54197d9d82bb5455f6aa9148414",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 103,
"avg_line_length": 40.916666666666664,
"alnum_prop": 0.7942973523421588,
"repo_name": "sekikn/incubator-airflow",
"id": "1d7660342445f1fd59d9091a3f4efbeb433ec1a1",
"size": "1279",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/operators/test_stackdriver_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""
Flacon
-------------
Flask application manager
"""
from setuptools import setup
setup(
name='Flacon',
version='0.0.1',
url='',
license='BSD',
author='Mehdi Bayazee, Mostafa Rokooie',
author_email='[email protected], [email protected]',
description='Flask based web framework',
long_description=__doc__,
packages=['flacon', 'flacon.commands'],
include_package_data=True,
package_data={'flacon': ['flacon/actions/project_template/*']},
namespace_packages=['flacon'],
zip_safe=False,
platforms='any',
install_requires=[
'flask>=0.9'
],
# scripts=['flacon/actions/flacon.py'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"content_hash": "78ce6f480cc3444d3e5e0c5f9a8fe346",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 70,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.6088560885608856,
"repo_name": "bayazee/flacon",
"id": "5ff46d417b2da2aef5ed4b767b27b3fe8c20f4dd",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41462"
}
],
"symlink_target": ""
} |
import os
import sys
from .lang_aux import Const, ReloadAsConstModule
def env_bool(x):
return Const(os.environ.get(x, '0') == '1')
def env_int(x, val):
return Const(int(os.environ.get(x, str(val))))
VM_MEMORY_SIZE_IN_MB = env_int('TILCK_VM_MEM', 128)
GEN_TEST_DATA = env_bool('GEN_TEST_DATA')
IN_TRAVIS = env_bool('TRAVIS')
IN_CIRCLECI = env_bool('CIRCLECI')
IN_AZURE = env_bool('AZURE_HTTP_USER_AGENT')
CI = env_bool('CI')
DUMP_COV = env_bool('DUMP_COV')
REPORT_COV = env_bool('REPORT_COV')
VERBOSE = env_bool('VERBOSE')
IN_ANY_CI = Const(IN_TRAVIS.val or IN_CIRCLECI.val or IN_AZURE.val or CI.val)
ReloadAsConstModule(__name__)
| {
"content_hash": "1ada03503dfe31ffca84507a96977388",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 26.75,
"alnum_prop": 0.6869158878504673,
"repo_name": "vvaltchev/experimentOs",
"id": "3db62ca594a470366b81fcae9762bd06b120655e",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runners/lib/env.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "42065"
},
{
"name": "C",
"bytes": "589604"
},
{
"name": "C++",
"bytes": "94512"
},
{
"name": "CMake",
"bytes": "27647"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Objective-C",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "10657"
},
{
"name": "Shell",
"bytes": "30848"
}
],
"symlink_target": ""
} |
''' deactivate.py '''
from heron.common.src.python.utils.log import Log
import heron.tools.cli.src.python.cli_helper as cli_helper
def create_parser(subparsers):
'''
:param subparsers:
:return:
'''
return cli_helper.create_parser(subparsers, 'deactivate', 'Deactivate a topology')
# pylint: disable=unused-argument
def run(command, parser, cl_args, unknown_args):
'''
:param command:
:param parser:
:param cl_args:
:param unknown_args:
:return:
'''
Log.debug("Deactivate Args: %s", cl_args)
return cli_helper.run(command, cl_args, "deactivate topology")
| {
"content_hash": "4bd17b9526a497f28c4090ccc3f17f9e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 25.434782608695652,
"alnum_prop": 0.7008547008547008,
"repo_name": "tomncooper/heron",
"id": "9534de8164eabe4c3bf33e2315691335a39f9e53",
"size": "1434",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heron/tools/cli/src/python/deactivate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14063"
},
{
"name": "C++",
"bytes": "1722746"
},
{
"name": "CSS",
"bytes": "77709"
},
{
"name": "HCL",
"bytes": "5314"
},
{
"name": "HTML",
"bytes": "39228"
},
{
"name": "Java",
"bytes": "4744099"
},
{
"name": "JavaScript",
"bytes": "1107129"
},
{
"name": "M4",
"bytes": "18741"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "2143"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1692443"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "95609"
},
{
"name": "Shell",
"bytes": "195923"
},
{
"name": "Smarty",
"bytes": "528"
}
],
"symlink_target": ""
} |
"""This component provides HA switch support for Ring Door Bell/Chimes."""
from datetime import timedelta
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.dt as dt_util
from . import DATA_RING_STICKUP_CAMS, SIGNAL_UPDATE_RING
_LOGGER = logging.getLogger(__name__)
SIREN_ICON = "mdi:alarm-bell"
# It takes a few seconds for the API to correctly return an update indicating
# that the changes have been made. Once we request a change (i.e. a light
# being turned on) we simply wait for this time delta before we allow
# updates to take place.
SKIP_UPDATES_DELAY = timedelta(seconds=5)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the switches for the Ring devices."""
cameras = hass.data[DATA_RING_STICKUP_CAMS]
switches = []
for device in cameras:
if device.has_capability("siren"):
switches.append(SirenSwitch(device))
add_entities(switches, True)
class BaseRingSwitch(SwitchDevice):
"""Represents a switch for controlling an aspect of a ring device."""
def __init__(self, device, device_type):
"""Initialize the switch."""
self._device = device
self._device_type = device_type
self._unique_id = f"{self._device.id}-{self._device_type}"
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_RING, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
_LOGGER.debug("Updating Ring sensor %s (callback)", self.name)
self.async_schedule_update_ha_state(True)
@property
def name(self):
"""Name of the device."""
return f"{self._device.name} {self._device_type}"
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def should_poll(self):
"""Update controlled via the hub."""
return False
class SirenSwitch(BaseRingSwitch):
"""Creates a switch to turn the ring cameras siren on and off."""
def __init__(self, device):
"""Initialize the switch for a device with a siren."""
super().__init__(device, "siren")
self._no_updates_until = dt_util.utcnow()
self._siren_on = False
def _set_switch(self, new_state):
"""Update switch state, and causes HASS to correctly update."""
self._device.siren = new_state
self._siren_on = new_state > 0
self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY
self.schedule_update_ha_state()
@property
def is_on(self):
"""If the switch is currently on or off."""
return self._siren_on
def turn_on(self, **kwargs):
"""Turn the siren on for 30 seconds."""
self._set_switch(1)
def turn_off(self, **kwargs):
"""Turn the siren off."""
self._set_switch(0)
@property
def icon(self):
"""Return the icon."""
return SIREN_ICON
def update(self):
"""Update state of the siren."""
if self._no_updates_until > dt_util.utcnow():
_LOGGER.debug("Skipping update...")
return
self._siren_on = self._device.siren > 0
| {
"content_hash": "246992a791589dc936a464d08119fce6",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 86,
"avg_line_length": 30.945454545454545,
"alnum_prop": 0.6389541715628673,
"repo_name": "leppa/home-assistant",
"id": "16fc4a6717fe154a5d8f93fccefcb5d938032570",
"size": "3404",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ring/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
"""Diagnose cluster command."""
from apitools.base.py import encoding
from googlecloudsdk.api_lib.dataproc import exceptions
from googlecloudsdk.api_lib.dataproc import storage_helpers
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.util import retry
class Diagnose(base.Command):
"""Run a detailed diagnostic on a cluster."""
@staticmethod
def Args(parser):
parser.add_argument(
'name',
help='The name of the cluster to diagnose.')
def Run(self, args):
client = self.context['dataproc_client']
messages = self.context['dataproc_messages']
cluster_ref = util.ParseCluster(args.name, self.context)
request = messages.DataprocProjectsRegionsClustersDiagnoseRequest(
clusterName=cluster_ref.clusterName,
region=cluster_ref.region,
projectId=cluster_ref.projectId)
operation = client.projects_regions_clusters.Diagnose(request)
# TODO(user): Stream output during polling.
operation = util.WaitForOperation(
operation, self.context,
message='Waiting for cluster diagnose operation')
if not operation.response:
raise exceptions.OperationError('Operation is missing response')
properties = encoding.MessageToDict(operation.response)
output_uri = properties['outputUri']
if not output_uri:
raise exceptions.OperationError('Response is missing outputUri')
log.err.Print('Output from diagnostic:')
log.err.Print('-----------------------------------------------')
driver_log_stream = storage_helpers.StorageObjectSeriesStream(
output_uri)
# A single read might not read whole stream. Try a few times.
read_retrier = retry.Retryer(max_retrials=4, jitter_ms=None)
try:
read_retrier.RetryOnResult(
lambda: driver_log_stream.ReadIntoWritable(log.err),
sleep_ms=100,
should_retry_if=lambda *_: driver_log_stream.open)
except retry.MaxRetrialsException:
log.warn(
'Diagnostic finished succesfully, '
'but output did not finish streaming.')
log.err.Print('-----------------------------------------------')
return output_uri
| {
"content_hash": "5cb055b5f0aa385001e72b1ab489f77a",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 70,
"avg_line_length": 35.1875,
"alnum_prop": 0.6838365896980462,
"repo_name": "KaranToor/MA450",
"id": "867cc8197347329149fe758368be91afa27e7372",
"size": "2848",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/dataproc/clusters/diagnose.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
from cryptography import exceptions, utils
from cryptography.hazmat.backends.openssl import aead
from cryptography.hazmat.backends.openssl.backend import backend
class ChaCha20Poly1305(object):
def __init__(self, key):
if not backend.aead_cipher_supported(self):
raise exceptions.UnsupportedAlgorithm(
"ChaCha20Poly1305 is not supported by this version of OpenSSL",
exceptions._Reasons.UNSUPPORTED_CIPHER
)
utils._check_bytes("key", key)
if len(key) != 32:
raise ValueError("ChaCha20Poly1305 key must be 32 bytes.")
self._key = key
@classmethod
def generate_key(cls):
return os.urandom(32)
def encrypt(self, nonce, data, associated_data):
if associated_data is None:
associated_data = b""
self._check_params(nonce, data, associated_data)
return aead._encrypt(
backend, self, nonce, data, associated_data, 16
)
def decrypt(self, nonce, data, associated_data):
if associated_data is None:
associated_data = b""
self._check_params(nonce, data, associated_data)
return aead._decrypt(
backend, self, nonce, data, associated_data, 16
)
def _check_params(self, nonce, data, associated_data):
utils._check_bytes("nonce", nonce)
utils._check_bytes("data", data)
utils._check_bytes("associated_data", associated_data)
if len(nonce) != 12:
raise ValueError("Nonce must be 12 bytes")
class AESCCM(object):
def __init__(self, key, tag_length=16):
utils._check_bytes("key", key)
if len(key) not in (16, 24, 32):
raise ValueError("AESCCM key must be 128, 192, or 256 bits.")
self._key = key
if not isinstance(tag_length, int):
raise TypeError("tag_length must be an integer")
if tag_length not in (4, 6, 8, 12, 14, 16):
raise ValueError("Invalid tag_length")
self._tag_length = tag_length
if not backend.aead_cipher_supported(self):
raise exceptions.UnsupportedAlgorithm(
"AESCCM is not supported by this version of OpenSSL",
exceptions._Reasons.UNSUPPORTED_CIPHER
)
@classmethod
def generate_key(cls, bit_length):
if not isinstance(bit_length, int):
raise TypeError("bit_length must be an integer")
if bit_length not in (128, 192, 256):
raise ValueError("bit_length must be 128, 192, or 256")
return os.urandom(bit_length // 8)
def encrypt(self, nonce, data, associated_data):
if associated_data is None:
associated_data = b""
self._check_params(nonce, data, associated_data)
self._validate_lengths(nonce, len(data))
return aead._encrypt(
backend, self, nonce, data, associated_data, self._tag_length
)
def decrypt(self, nonce, data, associated_data):
if associated_data is None:
associated_data = b""
self._check_params(nonce, data, associated_data)
return aead._decrypt(
backend, self, nonce, data, associated_data, self._tag_length
)
def _validate_lengths(self, nonce, data_len):
# For information about computing this, see
# https://tools.ietf.org/html/rfc3610#section-2.1
l = 15 - len(nonce)
if 2 ** (8 * l) < data_len:
raise ValueError("Nonce too long for data")
def _check_params(self, nonce, data, associated_data):
utils._check_bytes("nonce", nonce)
utils._check_bytes("data", data)
utils._check_bytes("associated_data", associated_data)
if not 7 <= len(nonce) <= 13:
raise ValueError("Nonce must be between 7 and 13 bytes")
class AESGCM(object):
def __init__(self, key):
utils._check_bytes("key", key)
if len(key) not in (16, 24, 32):
raise ValueError("AESGCM key must be 128, 192, or 256 bits.")
self._key = key
@classmethod
def generate_key(cls, bit_length):
if not isinstance(bit_length, int):
raise TypeError("bit_length must be an integer")
if bit_length not in (128, 192, 256):
raise ValueError("bit_length must be 128, 192, or 256")
return os.urandom(bit_length // 8)
def encrypt(self, nonce, data, associated_data):
if associated_data is None:
associated_data = b""
self._check_params(nonce, data, associated_data)
return aead._encrypt(
backend, self, nonce, data, associated_data, 16
)
def decrypt(self, nonce, data, associated_data):
if associated_data is None:
associated_data = b""
self._check_params(nonce, data, associated_data)
return aead._decrypt(
backend, self, nonce, data, associated_data, 16
)
def _check_params(self, nonce, data, associated_data):
utils._check_bytes("nonce", nonce)
utils._check_bytes("data", data)
utils._check_bytes("associated_data", associated_data)
| {
"content_hash": "994f48665a7ff6d3329f21ebf808b784",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 33.32911392405063,
"alnum_prop": 0.6029244208127611,
"repo_name": "jamesylgan/szechuantech",
"id": "07b6bce616728d493dcf30bd66c87f636ff2eedb",
"size": "5447",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python-scripts/cryptography/hazmat/primitives/ciphers/aead.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5058"
},
{
"name": "C",
"bytes": "242058"
},
{
"name": "CSS",
"bytes": "62239"
},
{
"name": "HTML",
"bytes": "3147"
},
{
"name": "JavaScript",
"bytes": "13258"
},
{
"name": "Python",
"bytes": "4249231"
}
],
"symlink_target": ""
} |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = 'ds98fdf9S(DF*(S)f8)(SUDFUF)sdui-0dfisdfk'
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + \
os.path.join(basedir, 'data-dev.sqlite')
config = {
'development': DevConfig,
'default': DevConfig
} | {
"content_hash": "f54ad8550b946e556581e10d2b660bdb",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 19.681818181818183,
"alnum_prop": 0.6004618937644342,
"repo_name": "cwimbrow/veganeyes-api",
"id": "c287e0c7f14ae33f20aa8181b13ae70d0603bfcb",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "13584"
}
],
"symlink_target": ""
} |
import logging
from owslib import crs
from pycsw.core import util
LOGGER = logging.getLogger(__name__)
TYPES = ['gml:Point', 'gml:LineString', 'gml:Polygon', 'gml:Envelope']
DEFAULT_SRS = crs.Crs('urn:x-ogc:def:crs:EPSG:6.11:4326')
def _poslist2wkt(poslist, axisorder):
"""Repurpose gml:posList into WKT aware list"""
tmp = poslist.split()
poslist2 = []
xlist = tmp[1::2]
ylist = tmp[::2]
if axisorder == 'yx':
for i, j in zip(ylist, xlist):
poslist2.append('%s %s' % (i, j))
else:
for i, j in zip(xlist, ylist):
poslist2.append('%s %s' % (i, j))
return ', '.join(poslist2)
class Geometry(object):
"""base geometry class"""
def __init__(self, element, nsmap):
"""initialize geometry parser"""
self.nsmap = nsmap
self.type = None
self.wkt = None
self.crs = None
self._exml = element
# return OGC WKT for GML geometry
operand = element.xpath(
'|'.join(TYPES),
namespaces={'gml': 'http://www.opengis.net/gml'})[0]
if 'srsName' in operand.attrib:
LOGGER.debug('geometry srsName detected')
self.crs = crs.Crs(operand.attrib['srsName'])
else:
LOGGER.debug('setting default geometry srsName %s' % DEFAULT_SRS)
self.crs = DEFAULT_SRS
self.type = util.xmltag_split(operand.tag)
if self.type == 'Point':
self._get_point()
elif self.type == 'LineString':
self._get_linestring()
elif self.type == 'Polygon':
self._get_polygon()
elif self.type == 'Envelope':
self._get_envelope()
else:
raise RuntimeError('Unsupported geometry type (Must be one of %s)'
% ','.join(TYPES))
# reproject data if needed
if self.crs is not None and self.crs.code not in [4326, 'CRS84']:
LOGGER.debug('transforming geometry to 4326')
try:
self.wkt = self.transform(self.crs.code, DEFAULT_SRS.code)
except Exception as err:
raise RuntimeError('Reprojection error: Invalid srsName '
'"%s": %s' % (self.crs.id, str(err)))
def _get_point(self):
"""Parse gml:Point"""
tmp = self._exml.find(util.nspath_eval('gml:Point/gml:pos',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Point geometry. Missing gml:pos')
else:
xypoint = tmp.text.split()
if self.crs.axisorder == 'yx':
self.wkt = 'POINT(%s %s)' % (xypoint[1], xypoint[0])
else:
self.wkt = 'POINT(%s %s)' % (xypoint[0], xypoint[1])
def _get_linestring(self):
"""Parse gml:LineString"""
tmp = self._exml.find(util.nspath_eval('gml:LineString/gml:posList',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:LineString geometry.\
Missing gml:posList')
else:
self.wkt = 'LINESTRING(%s)' % _poslist2wkt(tmp.text,
self.crs.axisorder)
def _get_polygon(self):
"""Parse gml:Polygon"""
tmp = self._exml.find('.//%s' % util.nspath_eval('gml:posList',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:LineString geometry.\
Missing gml:posList')
else:
self.wkt = 'POLYGON((%s))' % _poslist2wkt(tmp.text,
self.crs.axisorder)
def _get_envelope(self):
"""Parse gml:Envelope"""
tmp = self._exml.find(util.nspath_eval('gml:Envelope/gml:lowerCorner',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Envelope geometry.\
Missing gml:lowerCorner')
else:
lower_left = tmp.text
tmp = self._exml.find(util.nspath_eval('gml:Envelope/gml:upperCorner',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Envelope geometry.\
Missing gml:upperCorner')
else:
upper_right = tmp.text
llmin = lower_left.split()
urmax = upper_right.split()
if len(llmin) < 2 or len(urmax) < 2:
raise RuntimeError('Invalid gml:Envelope geometry. \
gml:lowerCorner and gml:upperCorner must hold at least x and y')
if self.crs.axisorder == 'yx':
self.wkt = util.bbox2wktpolygon('%s,%s,%s,%s' % (llmin[1],
llmin[0], urmax[1], urmax[0]))
else:
self.wkt = util.bbox2wktpolygon('%s,%s,%s,%s' % (llmin[0],
llmin[1], urmax[0], urmax[1]))
def transform(self, src, dest):
"""transform coordinates from one CRS to another"""
import pyproj
from shapely.geometry import Point, LineString, Polygon
from shapely.wkt import loads
LOGGER.debug('Transforming geometry from %s to %s' % (src, dest))
vertices = []
try:
proj_src = pyproj.Proj(init='epsg:%s' % src)
except:
raise RuntimeError('Invalid source projection')
try:
proj_dst = pyproj.Proj(init='epsg:%s' % dest)
except:
raise RuntimeError('Invalid destination projection')
geom = loads(self.wkt)
if geom.type == 'Point':
newgeom = Point(pyproj.transform(proj_src, proj_dst,
geom.x, geom.y))
wkt2 = newgeom.wkt
elif geom.type == 'LineString':
for vertice in list(geom.coords):
newgeom = pyproj.transform(proj_src, proj_dst,
vertice[0], vertice[1])
vertices.append(newgeom)
linestring = LineString(vertices)
wkt2 = linestring.wkt
elif geom.type == 'Polygon':
for vertice in list(geom.exterior.coords):
newgeom = pyproj.transform(proj_src, proj_dst,
vertice[0], vertice[1])
vertices.append(newgeom)
polygon = Polygon(vertices)
wkt2 = polygon.wkt
return wkt2
| {
"content_hash": "4765f7e2749da0135600869a08e7b384",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 33.32338308457712,
"alnum_prop": 0.5034338608539862,
"repo_name": "mwengren/pycsw",
"id": "09b23ccb4a9614b66e9b7b5fbcf2e526e2117c1b",
"size": "8016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycsw/ogc/gml/gml3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "65"
},
{
"name": "Python",
"bytes": "624222"
},
{
"name": "Shell",
"bytes": "2584"
}
],
"symlink_target": ""
} |
"""All the test files for API renderers plugins are imported here."""
# These need to register plugins so, pylint: disable=unused-import
from grr.gui.api_plugins import aff4_test
from grr.gui.api_plugins import hunt_test
| {
"content_hash": "91e924a05207f7bfb6a1cf7723e98ff8",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 32,
"alnum_prop": 0.7723214285714286,
"repo_name": "pchaigno/grreat",
"id": "e5ebb0a641b8858dde170d5624b875727546775e",
"size": "246",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gui/api_plugins/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36573"
},
{
"name": "JavaScript",
"bytes": "831111"
},
{
"name": "Makefile",
"bytes": "5482"
},
{
"name": "Perl",
"bytes": "483"
},
{
"name": "Python",
"bytes": "4517593"
},
{
"name": "Shell",
"bytes": "31210"
}
],
"symlink_target": ""
} |
"""Services for the Plex integration."""
import json
import logging
from plexapi.exceptions import NotFound
import voluptuous as vol
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
DOMAIN,
PLEX_UPDATE_PLATFORMS_SIGNAL,
SERVERS,
SERVICE_REFRESH_LIBRARY,
SERVICE_SCAN_CLIENTS,
)
REFRESH_LIBRARY_SCHEMA = vol.Schema(
{vol.Optional("server_name"): str, vol.Required("library_name"): str}
)
_LOGGER = logging.getLogger(__package__)
async def async_setup_services(hass):
"""Set up services for the Plex component."""
async def async_refresh_library_service(service_call: ServiceCall) -> None:
await hass.async_add_executor_job(refresh_library, hass, service_call)
async def async_scan_clients_service(_: ServiceCall) -> None:
_LOGGER.warning(
"This service is deprecated in favor of the scan_clients button entity. "
"Service calls will still work for now but the service will be removed in a future release"
)
for server_id in hass.data[DOMAIN][SERVERS]:
async_dispatcher_send(hass, PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id))
hass.services.async_register(
DOMAIN,
SERVICE_REFRESH_LIBRARY,
async_refresh_library_service,
schema=REFRESH_LIBRARY_SCHEMA,
)
hass.services.async_register(
DOMAIN, SERVICE_SCAN_CLIENTS, async_scan_clients_service
)
return True
def refresh_library(hass: HomeAssistant, service_call: ServiceCall) -> None:
"""Scan a Plex library for new and updated media."""
plex_server_name = service_call.data.get("server_name")
library_name = service_call.data["library_name"]
plex_server = get_plex_server(hass, plex_server_name)
try:
library = plex_server.library.section(title=library_name)
except NotFound:
_LOGGER.error(
"Library with name '%s' not found in %s",
library_name,
[x.title for x in plex_server.library.sections()],
)
return
_LOGGER.debug("Scanning %s for new and updated media", library_name)
library.update()
def get_plex_server(hass, plex_server_name=None):
"""Retrieve a configured Plex server by name."""
if DOMAIN not in hass.data:
raise HomeAssistantError("Plex integration not configured")
plex_servers = hass.data[DOMAIN][SERVERS].values()
if not plex_servers:
raise HomeAssistantError("No Plex servers available")
if plex_server_name:
plex_server = next(
(x for x in plex_servers if x.friendly_name == plex_server_name), None
)
if plex_server is not None:
return plex_server
friendly_names = [x.friendly_name for x in plex_servers]
raise HomeAssistantError(
f"Requested Plex server '{plex_server_name}' not found in {friendly_names}"
)
if len(plex_servers) == 1:
return next(iter(plex_servers))
friendly_names = [x.friendly_name for x in plex_servers]
raise HomeAssistantError(
f"Multiple Plex servers configured, choose with 'plex_server' key: {friendly_names}"
)
def lookup_plex_media(hass, content_type, content_id):
"""Look up Plex media for other integrations using media_player.play_media service payloads."""
content = json.loads(content_id)
if isinstance(content, int):
content = {"plex_key": content}
content_type = DOMAIN
plex_server_name = content.pop("plex_server", None)
plex_server = get_plex_server(hass, plex_server_name)
if playqueue_id := content.pop("playqueue_id", None):
try:
playqueue = plex_server.get_playqueue(playqueue_id)
except NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
return playqueue
shuffle = content.pop("shuffle", 0)
media = plex_server.lookup_media(content_type, **content)
if media is None:
raise HomeAssistantError(f"Plex media not found using payload: '{content_id}'")
if shuffle:
return plex_server.create_playqueue(media, shuffle=shuffle)
return media
| {
"content_hash": "9ed69df1e9197972a0cb396da8e3fab0",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 103,
"avg_line_length": 33.152671755725194,
"alnum_prop": 0.6663596592217361,
"repo_name": "GenericStudent/home-assistant",
"id": "0433ba836cd8a87376c3680554b11f1bc22810a9",
"size": "4343",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/plex/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from ui.voyager import get_open_library_item_title
class OpenLibraryTitleTest(TestCase):
def test_correct_title(self):
"this is from issue 487 where open libray link points to correct title"
title = "Life on the Mississippi"
open_library_link = "http://openlibrary.org/books/OL6710196M/Life_on_the_Mississippi"
open_library_title = get_open_library_item_title(open_library_link)
self.assertEqual(title[0:10], open_library_title[0:10])
def test_incorrect_title(self):
"from issue 420"
title = "Frank Lloyd Wright's Hanna House : the clients' report Paul R. and Jean S. Hanna"
open_library_link = "http://openlibrary.org/books/OL24933180M/The_Baptist_position_as_to_the_Bible"
open_library_title = get_open_library_item_title(open_library_link)
self.assertNotEqual(title[0:10], open_library_title[0:10])
| {
"content_hash": "b36cca373646f87b734b11eab8d85d17",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 107,
"avg_line_length": 48.78947368421053,
"alnum_prop": 0.7044228694714132,
"repo_name": "gwu-libraries/launchpad",
"id": "cae9d8be88022a2be6ff0dde1439a7e7712a4708",
"size": "927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lp/ui/tests/open_library_title_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17663"
},
{
"name": "HTML",
"bytes": "80921"
},
{
"name": "JavaScript",
"bytes": "5844"
},
{
"name": "Python",
"bytes": "227887"
}
],
"symlink_target": ""
} |
"""passlib.handlers.fshp
"""
#=============================================================================
# imports
#=============================================================================
# core
from base64 import b64encode, b64decode
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import to_unicode
import passlib.utils.handlers as uh
from passlib.utils.compat import b, bytes, bascii_to_str, iteritems, u,\
unicode
from passlib.utils.pbkdf2 import pbkdf1
# local
__all__ = [
'fshp',
]
#=============================================================================
# sha1-crypt
#=============================================================================
class fshp(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements the FSHP password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:param salt:
Optional raw salt string.
If not specified, one will be autogenerated (this is recommended).
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 16 bytes, but can be any non-negative value.
:param rounds:
Optional number of rounds to use.
Defaults to 50000, must be between 1 and 4294967295, inclusive.
:param variant:
Optionally specifies variant of FSHP to use.
* ``0`` - uses SHA-1 digest (deprecated).
* ``1`` - uses SHA-2/256 digest (default).
* ``2`` - uses SHA-2/384 digest.
* ``3`` - uses SHA-2/512 digest.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "fshp"
setting_kwds = ("salt", "salt_size", "rounds", "variant")
checksum_chars = uh.PADDED_BASE64_CHARS
ident = u("{FSHP")
# checksum_size is property() that depends on variant
#--HasRawSalt--
default_salt_size = 16 # current passlib default, FSHP uses 8
min_salt_size = 0
max_salt_size = None
#--HasRounds--
# FIXME: should probably use different default rounds
# based on the variant. setting for default variant (sha256) for now.
default_rounds = 50000 # current passlib default, FSHP uses 4096
min_rounds = 1 # set by FSHP
max_rounds = 4294967295 # 32-bit integer limit - not set by FSHP
rounds_cost = "linear"
#--variants--
default_variant = 1
_variant_info = {
# variant: (hash name, digest size)
0: ("sha1", 20),
1: ("sha256", 32),
2: ("sha384", 48),
3: ("sha512", 64),
}
_variant_aliases = dict(
[(unicode(k),k) for k in _variant_info] +
[(v[0],k) for k,v in iteritems(_variant_info)]
)
#===================================================================
# instance attrs
#===================================================================
variant = None
#===================================================================
# init
#===================================================================
def __init__(self, variant=None, **kwds):
# NOTE: variant must be set first, since it controls checksum size, etc.
self.use_defaults = kwds.get("use_defaults") # load this early
self.variant = self._norm_variant(variant)
super(fshp, self).__init__(**kwds)
def _norm_variant(self, variant):
if variant is None:
if not self.use_defaults:
raise TypeError("no variant specified")
variant = self.default_variant
if isinstance(variant, bytes):
variant = variant.decode("ascii")
if isinstance(variant, unicode):
try:
variant = self._variant_aliases[variant]
except KeyError:
raise ValueError("invalid fshp variant")
if not isinstance(variant, int):
raise TypeError("fshp variant must be int or known alias")
if variant not in self._variant_info:
raise ValueError("invalid fshp variant")
return variant
@property
def checksum_alg(self):
return self._variant_info[self.variant][0]
@property
def checksum_size(self):
return self._variant_info[self.variant][1]
#===================================================================
# formatting
#===================================================================
_hash_regex = re.compile(u(r"""
^
\{FSHP
(\d+)\| # variant
(\d+)\| # salt size
(\d+)\} # rounds
([a-zA-Z0-9+/]+={0,3}) # digest
$"""), re.X)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
variant, salt_size, rounds, data = m.group(1,2,3,4)
variant = int(variant)
salt_size = int(salt_size)
rounds = int(rounds)
try:
data = b64decode(data.encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
salt = data[:salt_size]
chk = data[salt_size:]
return cls(salt=salt, checksum=chk, rounds=rounds, variant=variant)
@property
def _stub_checksum(self):
return b('\x00') * self.checksum_size
def to_string(self):
chk = self.checksum or self._stub_checksum
salt = self.salt
data = bascii_to_str(b64encode(salt+chk))
return "{FSHP%d|%d|%d}%s" % (self.variant, len(salt), self.rounds, data)
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
# NOTE: for some reason, FSHP uses pbkdf1 with password & salt reversed.
# this has only a minimal impact on security,
# but it is worth noting this deviation.
return pbkdf1(
secret=self.salt,
salt=secret,
rounds=self.rounds,
keylen=self.checksum_size,
hash=self.checksum_alg,
)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
| {
"content_hash": "e829940d7f2996c25c9e8e288c94db30",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 145,
"avg_line_length": 37.262135922330096,
"alnum_prop": 0.47498697238144866,
"repo_name": "Glottotopia/aagd",
"id": "1eca6197302499042c505ebeab5599660be51652",
"size": "7676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/support/passlib/handlers/fshp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
import datetime
import mock
from testtools.matchers import HasLength
from ironic.common import exception
from ironic import objects
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
class TestPortObject(base.DbTestCase):
def setUp(self):
super(TestPortObject, self).setUp()
self.fake_port = utils.get_test_port()
def test_get_by_id(self):
port_id = self.fake_port['id']
with mock.patch.object(self.dbapi, 'get_port_by_id',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
port = objects.Port.get(self.context, port_id)
mock_get_port.assert_called_once_with(port_id)
self.assertEqual(self.context, port._context)
def test_get_by_uuid(self):
uuid = self.fake_port['uuid']
with mock.patch.object(self.dbapi, 'get_port_by_uuid',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
port = objects.Port.get(self.context, uuid)
mock_get_port.assert_called_once_with(uuid)
self.assertEqual(self.context, port._context)
def test_get_by_address(self):
address = self.fake_port['address']
with mock.patch.object(self.dbapi, 'get_port_by_address',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
port = objects.Port.get(self.context, address)
mock_get_port.assert_called_once_with(address)
self.assertEqual(self.context, port._context)
def test_get_bad_id_and_uuid_and_address(self):
self.assertRaises(exception.InvalidIdentity,
objects.Port.get, self.context, 'not-a-uuid')
def test_save(self):
uuid = self.fake_port['uuid']
address = "b2:54:00:cf:2d:40"
test_time = datetime.datetime(2000, 1, 1, 0, 0)
with mock.patch.object(self.dbapi, 'get_port_by_uuid',
autospec=True) as mock_get_port:
mock_get_port.return_value = self.fake_port
with mock.patch.object(self.dbapi, 'update_port',
autospec=True) as mock_update_port:
mock_update_port.return_value = (
utils.get_test_port(address=address, updated_at=test_time))
p = objects.Port.get_by_uuid(self.context, uuid)
p.address = address
p.save()
mock_get_port.assert_called_once_with(uuid)
mock_update_port.assert_called_once_with(
uuid, {'address': "b2:54:00:cf:2d:40"})
self.assertEqual(self.context, p._context)
res_updated_at = (p.updated_at).replace(tzinfo=None)
self.assertEqual(test_time, res_updated_at)
def test_refresh(self):
uuid = self.fake_port['uuid']
returns = [self.fake_port,
utils.get_test_port(address="c3:54:00:cf:2d:40")]
expected = [mock.call(uuid), mock.call(uuid)]
with mock.patch.object(self.dbapi, 'get_port_by_uuid',
side_effect=returns,
autospec=True) as mock_get_port:
p = objects.Port.get_by_uuid(self.context, uuid)
self.assertEqual("52:54:00:cf:2d:31", p.address)
p.refresh()
self.assertEqual("c3:54:00:cf:2d:40", p.address)
self.assertEqual(expected, mock_get_port.call_args_list)
self.assertEqual(self.context, p._context)
def test_list(self):
with mock.patch.object(self.dbapi, 'get_port_list',
autospec=True) as mock_get_list:
mock_get_list.return_value = [self.fake_port]
ports = objects.Port.list(self.context)
self.assertThat(ports, HasLength(1))
self.assertIsInstance(ports[0], objects.Port)
self.assertEqual(self.context, ports[0]._context)
| {
"content_hash": "8fa93f168488365aaf58d89a01d8677f",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 41.44444444444444,
"alnum_prop": 0.5776261272239824,
"repo_name": "bacaldwell/ironic",
"id": "445ca84b7c95708f709179579272fe2658525027",
"size": "4695",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/objects/test_port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "4207766"
},
{
"name": "Shell",
"bytes": "69242"
}
],
"symlink_target": ""
} |
"""
Script to identify outliers for each subject. Compares the mean MRSS values from running GLM on the basic np.convolve convolved time course, before and after dropping the outliers.
"""
import numpy as np
import nibabel as nib
import os
import sys
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
# Relative paths to project and data.
project_path = "../../../"
path_to_data = project_path+"data/ds009/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
behav_suffix = "/behav/task001_run001/behavdata.txt"
sys.path.append(location_of_functions)
from event_related_fMRI_functions import hrf_single, convolution_specialized
from noise_correction import mean_underlying_noise, fourier_predict_underlying_noise
sub_list = os.listdir(path_to_data)[0:2]
# saving to compare number of cuts in the beginning
num_cut=np.zeros(len(sub_list))
i=0
# Loop through all the subjects.
for name in sub_list:
# amount of beginning TRs not standardized at 6
behav=pd.read_table(path_to_data+name+behav_suffix,sep=" ")
num_TR = float(behav["NumTRs"])
# Load image data.
img = nib.load(path_to_data+ name+ "/BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
# Drop the appropriate number of volumes from the beginning.
first_n_vols=data.shape[-1]
num_TR_cut=int(first_n_vols-num_TR)
num_cut[i]=num_TR_cut
i+=1
data = data[...,num_TR_cut:]
data_2d = data.reshape((-1,data.shape[-1]))
# Run PCA on the covariance matrix and plot explained variance.
pca = PCA(n_components=20)
pca.fit(data_2d.T.dot(data_2d))
exp_var = pca.explained_variance_ratio_
plt.plot(range(1,21), exp_var)
plt.savefig(location_of_images+'pca'+name+'.png')
plt.close()
| {
"content_hash": "d018adb3dc457e24818c5c3e3df573cb",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 181,
"avg_line_length": 33.160714285714285,
"alnum_prop": 0.6984383414108778,
"repo_name": "reychil/project-alpha-1",
"id": "ccc1b1ed353bc3feaafa417606085c7bb8d35223",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/utils/scripts/pca_script.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3020"
},
{
"name": "Python",
"bytes": "166513"
},
{
"name": "TeX",
"bytes": "58819"
}
],
"symlink_target": ""
} |
"""
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "2fec3ccb3f5306c93b0db65a3f0aa5e9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.214285714285715,
"alnum_prop": 0.7690288713910761,
"repo_name": "dugancathal/polyspec",
"id": "8446814ae73cbed8b0e8c12fa44661766353179c",
"size": "381",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "spec/fixtures/django-app/app/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "311"
},
{
"name": "Python",
"bytes": "2932"
},
{
"name": "Ruby",
"bytes": "8330"
}
],
"symlink_target": ""
} |
from django.db import models
class Tag(models.Model):
name=models.CharField(max_length=128,unique=True)
def __unicode__(self):
return self.name
class Link(models.Model):
title=models.CharField(max_length=128,unique=True)
url=models.URLField()
tags=models.ManyToManyField(Tag)
def __unicode__(self):
return self.title
| {
"content_hash": "7bb6c12f89be57bbe6140cecb86f5dd8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 51,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.7492447129909365,
"repo_name": "zturchan/CMPUT410-Lab6",
"id": "4f0433d5f739f53968de97f11ba851bdc9dd26ef",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookmarks/main/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44059"
},
{
"name": "HTML",
"bytes": "83005"
},
{
"name": "JavaScript",
"bytes": "102019"
},
{
"name": "Python",
"bytes": "5862074"
},
{
"name": "Shell",
"bytes": "3947"
}
],
"symlink_target": ""
} |
"""Filename globbing utility."""
import os
import fnmatch
import re
__all__ = ["glob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la fnmatch.
"""
if not has_magic(pathname):
if os.path.exists(pathname):
return [pathname]
else:
return []
dirname, basename = os.path.split(pathname)
if has_magic(dirname):
list = glob(dirname)
else:
list = [dirname]
if not has_magic(basename):
result = []
for dirname in list:
if basename or os.path.isdir(dirname):
name = os.path.join(dirname, basename)
if os.path.exists(name):
result.append(name)
else:
result = []
for dirname in list:
sublist = glob1(dirname, basename)
for name in sublist:
result.append(os.path.join(dirname, name))
return result
def glob1(dirname, pattern):
if not dirname: dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return []
result = []
for name in names:
if name[0] != '.' or pattern[0] == '.':
if fnmatch.fnmatch(name, pattern):
result.append(name)
return result
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None
| {
"content_hash": "e3aa188c0cf28130dea37f6f7a4bb4b0",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 70,
"avg_line_length": 25.43859649122807,
"alnum_prop": 0.5627586206896552,
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"id": "eeb6bdd6468aeaaa0a20fdaa6850eb2a9099b5c6",
"size": "1450",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/glob.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "HTML",
"bytes": "2243"
},
{
"name": "Java",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2973691"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "robotgear.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "34bdffb97d38b018d9530715956f9961",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.38095238095238,
"alnum_prop": 0.621656050955414,
"repo_name": "robotgear/robotgear",
"id": "c97d81739a2bc4d8fcba9f7412d3d354306c7d36",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3150090"
},
{
"name": "Dockerfile",
"bytes": "156"
},
{
"name": "HTML",
"bytes": "47366"
},
{
"name": "JavaScript",
"bytes": "2745371"
},
{
"name": "Python",
"bytes": "49333"
}
],
"symlink_target": ""
} |
"""
PyPI XML-RPC Client helper
To use the :data:`pyshop.helpers.pypi.proxy`, the method
:method:`pyshop.helpers.pypi.set_proxy`must be called to
install the XML RPC proxy.
Also it is possible to install an HTTP Proxy in case the pyshop host does not
have a direct access to internet.
This is actually a configuration key ``pyshop.pypi.transport_proxy`` from the
paste ini file.
.. warning::
This code use an unsecured connection (http)
and should be modified since PyPI is available in https.
.. :data:`proxy`:: The XML RPC Proxy
"""
import logging
try:
import xmlrpc.client as xmlrpc
except ImportError:
import xmlrpclib as xmlrpc
import requests
log = logging.getLogger(__name__)
proxy = None
PYPI_URL = None
class RequestsTransport(xmlrpc.Transport):
"""
Drop in Transport for xmlrpclib that uses Requests instead of httplib
# https://gist.github.com/chrisguitarguy/2354951
"""
# change our user agent to reflect Requests
user_agent = "PyShop"
def __init__(self, use_https):
xmlrpc.Transport.__init__(self) # Transport does not inherit object
self.scheme = 'https' if use_https else 'http'
def request(self, host, handler, request_body, verbose):
"""
Make an xmlrpc request.
"""
headers = {'User-Agent': self.user_agent,
#Proxy-Connection': 'Keep-Alive',
#'Content-Range': 'bytes oxy1.0/-1',
'Accept': 'text/xml',
'Content-Type': 'text/xml' }
url = self._build_url(host, handler)
try:
resp = requests.post(url, data=request_body, headers=headers)
except ValueError:
raise
except Exception:
raise # something went wrong
else:
try:
resp.raise_for_status()
except requests.RequestException as e:
raise xmlrpc.ProtocolError(url, resp.status_code,
str(e), resp.headers)
else:
return self.parse_response(resp)
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
p.feed(resp.content)
p.close()
return u.close()
def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_http
property
"""
return '%s://%s%s' % (self.scheme, host, handler)
def get_json_package_info(name):
url = '/'.join([PYPI_URL, name, 'json'])
resp = requests.get(url)
resp.raise_for_status()
return resp.json()
def resolve_name(package_name):
""" Resolve the real name of the upstream package """
log.info('Resolving hyphenation of %s', package_name)
package = get_json_package_info(package_name)
return package['info']['name']
def set_proxy(proxy_url, transport_proxy=None):
"""Create the proxy to PyPI XML-RPC Server"""
global proxy, PYPI_URL
PYPI_URL = proxy_url
proxy = xmlrpc.ServerProxy(
proxy_url,
transport=RequestsTransport(proxy_url.startswith('https://')),
allow_none=True)
| {
"content_hash": "c7dea605398fc88315ba9cf146b1ad64",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 29.01801801801802,
"alnum_prop": 0.6047811238745732,
"repo_name": "qpython-android/QPYPI",
"id": "724c1c0769bb798630943b31bc223d99c375d830",
"size": "3221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyshop/helpers/pypi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10039"
},
{
"name": "HTML",
"bytes": "30875"
},
{
"name": "Python",
"bytes": "139081"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
from parse_prca import parse | {
"content_hash": "c287eb56ca21533b0822bf88e04309ae",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 28,
"alnum_prop": 0.8571428571428571,
"repo_name": "spudmind/undertheinfluence",
"id": "57b4eaae75d50f0e3e239d2dfb8925cb3cc95f7c",
"size": "28",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "parsers/prca/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2366948"
},
{
"name": "HTML",
"bytes": "153611"
},
{
"name": "JavaScript",
"bytes": "16718"
},
{
"name": "Python",
"bytes": "449237"
}
],
"symlink_target": ""
} |
"""
Handling of RSA and DSA keys.
Maintainer: U{Paul Swartz}
"""
# base library imports
import base64
import warnings
import itertools
# external library imports
from Crypto.Cipher import DES3
from Crypto.PublicKey import RSA, DSA
from Crypto import Util
from pyasn1.type import univ
from pyasn1.codec.ber import decoder as berDecoder
from pyasn1.codec.ber import encoder as berEncoder
# twisted
from twisted.python import randbytes
from twisted.python.hashlib import md5, sha1
# sibling imports
from twisted.conch.ssh import common, sexpy
class BadKeyError(Exception):
"""
Raised when a key isn't what we expected from it.
XXX: we really need to check for bad keys
"""
class EncryptedKeyError(Exception):
"""
Raised when an encrypted key is presented to fromString/fromFile without
a password.
"""
class Key(object):
"""
An object representing a key. A key can be either a public or
private key. A public key can verify a signature; a private key can
create or verify a signature. To generate a string that can be stored
on disk, use the toString method. If you have a private key, but want
the string representation of the public key, use Key.public().toString().
@ivar keyObject: The C{Crypto.PublicKey.pubkey.pubkey} object that
operations are performed with.
"""
def fromFile(Class, filename, type=None, passphrase=None):
"""
Return a Key object corresponding to the data in filename. type
and passphrase function as they do in fromString.
"""
return Class.fromString(file(filename, 'rb').read(), type, passphrase)
fromFile = classmethod(fromFile)
def fromString(Class, data, type=None, passphrase=None):
"""
Return a Key object corresponding to the string data.
type is optionally the type of string, matching a _fromString_*
method. Otherwise, the _guessStringType() classmethod will be used
to guess a type. If the key is encrypted, passphrase is used as
the decryption key.
@type data: C{str}
@type type: C{None}/C{str}
@type passphrase: C{None}/C{str}
@rtype: C{Key}
"""
if type is None:
type = Class._guessStringType(data)
if type is None:
raise BadKeyError('cannot guess the type of %r' % data)
method = getattr(Class, '_fromString_%s' % type.upper(), None)
if method is None:
raise BadKeyError('no _fromString method for %s' % type)
if method.func_code.co_argcount == 2: # no passphrase
if passphrase:
raise BadKeyError('key not encrypted')
return method(data)
else:
return method(data, passphrase)
fromString = classmethod(fromString)
def _fromString_BLOB(Class, blob):
"""
Return a public key object corresponding to this public key blob.
The format of a RSA public key blob is::
string 'ssh-rsa'
integer e
integer n
The format of a DSA public key blob is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
@type blob: C{str}
@return: a C{Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == 'ssh-rsa':
e, n, rest = common.getMP(rest, 2)
return Class(RSA.construct((n, e)))
elif keyType == 'ssh-dss':
p, q, g, y, rest = common.getMP(rest, 4)
return Class(DSA.construct((y, g, p, q)))
else:
raise BadKeyError('unknown blob type: %s' % keyType)
_fromString_BLOB = classmethod(_fromString_BLOB)
def _fromString_PRIVATE_BLOB(Class, blob):
"""
Return a private key object corresponding to this private key blob.
The blob formats are as follows:
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
@type blob: C{str}
@return: a C{Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == 'ssh-rsa':
n, e, d, u, p, q, rest = common.getMP(rest, 6)
rsakey = Class(RSA.construct((n, e, d, p, q, u)))
return rsakey
elif keyType == 'ssh-dss':
p, q, g, y, x, rest = common.getMP(rest, 5)
dsakey = Class(DSA.construct((y, g, p, q, x)))
return dsakey
else:
raise BadKeyError('unknown blob type: %s' % keyType)
_fromString_PRIVATE_BLOB = classmethod(_fromString_PRIVATE_BLOB)
def _fromString_PUBLIC_OPENSSH(Class, data):
"""
Return a public key object corresponding to this OpenSSH public key
string. The format of an OpenSSH public key string is::
<key type> <base64-encoded public key blob>
@type data: C{str}
@return: A {Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if the blob type is unknown.
"""
blob = base64.decodestring(data.split()[1])
return Class._fromString_BLOB(blob)
_fromString_PUBLIC_OPENSSH = classmethod(_fromString_PUBLIC_OPENSSH)
def _fromString_PRIVATE_OPENSSH(Class, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string. If the key is encrypted, passphrase MUST be provided.
Providing a passphrase for an unencrypted key is an error.
The format of an OpenSSH private key string is::
-----BEGIN <key type> PRIVATE KEY-----
[Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,<initialization value>]
<base64-encoded ASN.1 structure>
------END <key type> PRIVATE KEY------
The ASN.1 structure of a RSA key is::
(0, n, e, d, p, q)
The ASN.1 structure of a DSA key is::
(0, p, q, g, y, x)
@type data: C{str}
@type passphrase: C{str}
@return: a C{Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* a passphrase is not provided for an encrypted key
* the ASN.1 encoding is incorrect
"""
lines = [x + '\n' for x in data.split('\n')]
kind = lines[0][11:14]
if lines[1].startswith('Proc-Type: 4,ENCRYPTED'): # encrypted key
ivdata = lines[2].split(',')[1][:-1]
iv = ''.join([chr(int(ivdata[i:i + 2], 16)) for i in range(0,
len(ivdata), 2)])
if not passphrase:
raise EncryptedKeyError('encrypted key with no passphrase')
ba = md5(passphrase + iv).digest()
bb = md5(ba + passphrase + iv).digest()
decKey = (ba + bb)[:24]
b64Data = base64.decodestring(''.join(lines[3:-1]))
keyData = DES3.new(decKey, DES3.MODE_CBC, iv).decrypt(b64Data)
removeLen = ord(keyData[-1])
keyData = keyData[:-removeLen]
else:
b64Data = ''.join(lines[1:-1])
keyData = base64.decodestring(b64Data)
try:
decodedKey = berDecoder.decode(keyData)[0]
except Exception, e:
raise BadKeyError, 'something wrong with decode'
if kind == 'RSA':
if len(decodedKey) == 2: # alternate RSA key
decodedKey = decodedKey[0]
if len(decodedKey) < 6:
raise BadKeyError('RSA key failed to decode properly')
n, e, d, p, q = [long(value) for value in decodedKey[1:6]]
if p > q: # make p smaller than q
p, q = q, p
return Class(RSA.construct((n, e, d, p, q)))
elif kind == 'DSA':
p, q, g, y, x = [long(value) for value in decodedKey[1: 6]]
if len(decodedKey) < 6:
raise BadKeyError('DSA key failed to decode properly')
return Class(DSA.construct((y, g, p, q, x)))
_fromString_PRIVATE_OPENSSH = classmethod(_fromString_PRIVATE_OPENSSH)
def _fromString_PUBLIC_LSH(Class, data):
"""
Return a public key corresponding to this LSH public key string.
The LSH public key string format is::
<s-expression: ('public-key', (<key type>, (<name, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e.
The names for a DSA (key type 'dsa') key are: y, g, p, q.
@type data: C{str}
@return: a C{Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(base64.decodestring(data[1:-1]))
assert sexp[0] == 'public-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == 'dsa':
return Class(DSA.construct((kd['y'], kd['g'], kd['p'], kd['q'])))
elif sexp[1][0] == 'rsa-pkcs1-sha1':
return Class(RSA.construct((kd['n'], kd['e'])))
else:
raise BadKeyError('unknown lsh key type %s' % sexp[1][0])
_fromString_PUBLIC_LSH = classmethod(_fromString_PUBLIC_LSH)
def _fromString_PRIVATE_LSH(Class, data):
"""
Return a private key corresponding to this LSH private key string.
The LSH private key string format is::
<s-expression: ('private-key', (<key type>, (<name>, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e, d, p, q.
The names for a DSA (key type 'dsa') key are: y, g, p, q, x.
@type data: C{str}
@return: a {Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(data)
assert sexp[0] == 'private-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == 'dsa':
assert len(kd) == 5, len(kd)
return Class(DSA.construct((kd['y'], kd['g'], kd['p'],
kd['q'], kd['x'])))
elif sexp[1][0] == 'rsa-pkcs1':
assert len(kd) == 8, len(kd)
if kd['p'] > kd['q']: # make p smaller than q
kd['p'], kd['q'] = kd['q'], kd['p']
return Class(RSA.construct((kd['n'], kd['e'], kd['d'],
kd['p'], kd['q'])))
else:
raise BadKeyError('unknown lsh key type %s' % sexp[1][0])
_fromString_PRIVATE_LSH = classmethod(_fromString_PRIVATE_LSH)
def _fromString_AGENTV3(Class, data):
"""
Return a private key object corresponsing to the Secure Shell Key
Agent v3 format.
The SSH Key Agent v3 format for a RSA key is::
string 'ssh-rsa'
integer e
integer d
integer n
integer u
integer p
integer q
The SSH Key Agent v3 format for a DSA key is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
@type data: C{str}
@return: a C{Crypto.PublicKey.pubkey.pubkey} object
@raises BadKeyError: if the key type (the first string) is unknown
"""
keyType, data = common.getNS(data)
if keyType == 'ssh-dss':
p, data = common.getMP(data)
q, data = common.getMP(data)
g, data = common.getMP(data)
y, data = common.getMP(data)
x, data = common.getMP(data)
return Class(DSA.construct((y,g,p,q,x)))
elif keyType == 'ssh-rsa':
e, data = common.getMP(data)
d, data = common.getMP(data)
n, data = common.getMP(data)
u, data = common.getMP(data)
p, data = common.getMP(data)
q, data = common.getMP(data)
return Class(RSA.construct((n,e,d,p,q,u)))
else:
raise BadKeyError("unknown key type %s" % keyType)
_fromString_AGENTV3 = classmethod(_fromString_AGENTV3)
def _guessStringType(Class, data):
"""
Guess the type of key in data. The types map to _fromString_*
methods.
"""
if data.startswith('ssh-'):
return 'public_openssh'
elif data.startswith('-----BEGIN'):
return 'private_openssh'
elif data.startswith('{'):
return 'public_lsh'
elif data.startswith('('):
return 'private_lsh'
elif data.startswith('\x00\x00\x00\x07ssh-'):
ignored, rest = common.getNS(data)
count = 0
while rest:
count += 1
ignored, rest = common.getMP(rest)
if count > 4:
return 'agentv3'
else:
return 'blob'
_guessStringType = classmethod(_guessStringType)
def __init__(self, keyObject):
"""
Initialize a PublicKey with a C{Crypto.PublicKey.pubkey.pubkey}
object.
@type keyObject: C{Crypto.PublicKey.pubkey.pubkey}
"""
self.keyObject = keyObject
def __eq__(self, other):
"""
Return True if other represents an object with the same key.
"""
if type(self) == type(other):
return self.type() == other.type() and self.data() == other.data()
else:
return NotImplemented
def __ne__(self, other):
"""
Return True if other represents anything other than this key.
"""
result = self.__eq__(other)
if result == NotImplemented:
return result
return not result
def __repr__(self):
"""
Return a pretty representation of this object.
"""
lines = ['<%s %s (%s bits)' % (self.type(),
self.isPublic() and 'Public Key' or 'Private Key',
self.keyObject.size())]
for k, v in self.data().items():
lines.append('attr %s:' % k)
by = common.MP(v)[4:]
while by:
m = by[:15]
by = by[15:]
o = ''
for c in m:
o = o + '%02x:' % ord(c)
if len(m) < 15:
o = o[:-1]
lines.append('\t' + o)
lines[-1] = lines[-1] + '>'
return '\n'.join(lines)
def isPublic(self):
"""
Returns True if this Key is a public key.
"""
return not self.keyObject.has_private()
def public(self):
"""
Returns a version of this key containing only the public key data.
If this is a public key, this may or may not be the same object
as self.
"""
return Key(self.keyObject.publickey())
def fingerprint(self):
"""
Get the user presentation of the fingerprint of this L{Key}. As
described by U{RFC 4716 section
4<http://tools.ietf.org/html/rfc4716#section-4>}::
The fingerprint of a public key consists of the output of the MD5
message-digest algorithm [RFC1321]. The input to the algorithm is
the public key data as specified by [RFC4253]. (...) The output
of the (MD5) algorithm is presented to the user as a sequence of 16
octets printed as hexadecimal with lowercase letters and separated
by colons.
@since: 8.2
@return: the user presentation of this L{Key}'s fingerprint, as a
string.
@rtype: L{str}
"""
return ':'.join([x.encode('hex') for x in md5(self.blob()).digest()])
def type(self):
"""
Return the type of the object we wrap. Currently this can only be
'RSA' or 'DSA'.
"""
# the class is Crypto.PublicKey.<type>.<stuff we don't care about>
klass = str(self.keyObject.__class__)
if klass.startswith('Crypto.PublicKey'):
type = klass.split('.')[2]
else:
raise RuntimeError('unknown type of object: %r' % self.keyObject)
if type in ('RSA', 'DSA'):
return type
else:
raise RuntimeError('unknown type of key: %s' % type)
def sshType(self):
"""
Return the type of the object we wrap as defined in the ssh protocol.
Currently this can only be 'ssh-rsa' or 'ssh-dss'.
"""
return {'RSA':'ssh-rsa', 'DSA':'ssh-dss'}[self.type()]
def data(self):
"""
Return the values of the public key as a dictionary.
@rtype: C{dict}
"""
keyData = {}
for name in self.keyObject.keydata:
value = getattr(self.keyObject, name, None)
if value is not None:
keyData[name] = value
return keyData
def blob(self):
"""
Return the public key blob for this key. The blob is the
over-the-wire format for public keys:
RSA keys::
string 'ssh-rsa'
integer e
integer n
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
@rtype: C{str}
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS('ssh-rsa') + common.MP(data['e']) +
common.MP(data['n']))
elif type == 'DSA':
return (common.NS('ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']))
def privateBlob(self):
"""
Return the private key blob for this key. The blob is the
over-the-wire format for private keys:
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS('ssh-rsa') + common.MP(data['n']) +
common.MP(data['e']) + common.MP(data['d']) +
common.MP(data['u']) + common.MP(data['p']) +
common.MP(data['q']))
elif type == 'DSA':
return (common.NS('ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']) + common.MP(data['x']))
def toString(self, type, extra=None):
"""
Create a string representation of this key. If the key is a private
key and you want the represenation of its public key, use
C{key.public().toString()}. type maps to a _toString_* method.
@param type: The type of string to emit. Currently supported values
are C{'OPENSSH'}, C{'LSH'}, and C{'AGENTV3'}.
@type type: L{str}
@param extra: Any extra data supported by the selected format which
is not part of the key itself. For public OpenSSH keys, this is
a comment. For private OpenSSH keys, this is a passphrase to
encrypt with.
@type extra: L{str} or L{NoneType}
@rtype: L{str}
"""
method = getattr(self, '_toString_%s' % type.upper(), None)
if method is None:
raise BadKeyError('unknown type: %s' % type)
if method.func_code.co_argcount == 2:
return method(extra)
else:
return method()
def _toString_OPENSSH(self, extra):
"""
Return a public or private OpenSSH string. See
_fromString_PUBLIC_OPENSSH and _fromString_PRIVATE_OPENSSH for the
string formats. If extra is present, it represents a comment for a
public key, or a passphrase for a private key.
@type extra: C{str}
@rtype: C{str}
"""
data = self.data()
if self.isPublic():
b64Data = base64.encodestring(self.blob()).replace('\n', '')
if not extra:
extra = ''
return ('%s %s %s' % (self.sshType(), b64Data, extra)).strip()
else:
lines = ['-----BEGIN %s PRIVATE KEY-----' % self.type()]
if self.type() == 'RSA':
p, q = data['p'], data['q']
objData = (0, data['n'], data['e'], data['d'], q, p,
data['d'] % (q - 1), data['d'] % (p - 1),
data['u'])
else:
objData = (0, data['p'], data['q'], data['g'], data['y'],
data['x'])
asn1Sequence = univ.Sequence()
for index, value in itertools.izip(itertools.count(), objData):
asn1Sequence.setComponentByPosition(index, univ.Integer(value))
asn1Data = berEncoder.encode(asn1Sequence)
if extra:
iv = randbytes.secureRandom(8)
hexiv = ''.join(['%02X' % ord(x) for x in iv])
lines.append('Proc-Type: 4,ENCRYPTED')
lines.append('DEK-Info: DES-EDE3-CBC,%s\n' % hexiv)
ba = md5(extra + iv).digest()
bb = md5(ba + extra + iv).digest()
encKey = (ba + bb)[:24]
padLen = 8 - (len(asn1Data) % 8)
asn1Data += (chr(padLen) * padLen)
asn1Data = DES3.new(encKey, DES3.MODE_CBC,
iv).encrypt(asn1Data)
b64Data = base64.encodestring(asn1Data).replace('\n', '')
lines += [b64Data[i:i + 64] for i in range(0, len(b64Data), 64)]
lines.append('-----END %s PRIVATE KEY-----' % self.type())
return '\n'.join(lines)
def _toString_LSH(self):
"""
Return a public or private LSH key. See _fromString_PUBLIC_LSH and
_fromString_PRIVATE_LSH for the key formats.
@rtype: C{str}
"""
data = self.data()
if self.isPublic():
if self.type() == 'RSA':
keyData = sexpy.pack([['public-key', ['rsa-pkcs1-sha1',
['n', common.MP(data['n'])[4:]],
['e', common.MP(data['e'])[4:]]]]])
elif self.type() == 'DSA':
keyData = sexpy.pack([['public-key', ['dsa',
['p', common.MP(data['p'])[4:]],
['q', common.MP(data['q'])[4:]],
['g', common.MP(data['g'])[4:]],
['y', common.MP(data['y'])[4:]]]]])
return '{' + base64.encodestring(keyData).replace('\n', '') + '}'
else:
if self.type() == 'RSA':
p, q = data['p'], data['q']
return sexpy.pack([['private-key', ['rsa-pkcs1',
['n', common.MP(data['n'])[4:]],
['e', common.MP(data['e'])[4:]],
['d', common.MP(data['d'])[4:]],
['p', common.MP(q)[4:]],
['q', common.MP(p)[4:]],
['a', common.MP(data['d'] % (q - 1))[4:]],
['b', common.MP(data['d'] % (p - 1))[4:]],
['c', common.MP(data['u'])[4:]]]]])
elif self.type() == 'DSA':
return sexpy.pack([['private-key', ['dsa',
['p', common.MP(data['p'])[4:]],
['q', common.MP(data['q'])[4:]],
['g', common.MP(data['g'])[4:]],
['y', common.MP(data['y'])[4:]],
['x', common.MP(data['x'])[4:]]]]])
def _toString_AGENTV3(self):
"""
Return a private Secure Shell Agent v3 key. See
_fromString_AGENTV3 for the key format.
@rtype: C{str}
"""
data = self.data()
if not self.isPublic():
if self.type() == 'RSA':
values = (data['e'], data['d'], data['n'], data['u'],
data['p'], data['q'])
elif self.type() == 'DSA':
values = (data['p'], data['q'], data['g'], data['y'],
data['x'])
return common.NS(self.sshType()) + ''.join(map(common.MP, values))
def sign(self, data):
"""
Returns a signature with this Key.
@type data: C{str}
@rtype: C{str}
"""
if self.type() == 'RSA':
digest = pkcs1Digest(data, self.keyObject.size()/8)
signature = self.keyObject.sign(digest, '')[0]
ret = common.NS(Util.number.long_to_bytes(signature))
elif self.type() == 'DSA':
digest = sha1(data).digest()
randomBytes = randbytes.secureRandom(19)
sig = self.keyObject.sign(digest, randomBytes)
# SSH insists that the DSS signature blob be two 160-bit integers
# concatenated together. The sig[0], [1] numbers from obj.sign
# are just numbers, and could be any length from 0 to 160 bits.
# Make sure they are padded out to 160 bits (20 bytes each)
ret = common.NS(Util.number.long_to_bytes(sig[0], 20) +
Util.number.long_to_bytes(sig[1], 20))
return common.NS(self.sshType()) + ret
def verify(self, signature, data):
"""
Returns true if the signature for data is valid for this Key.
@type signature: C{str}
@type data: C{str}
@rtype: C{bool}
"""
signatureType, signature = common.getNS(signature)
if signatureType != self.sshType():
return False
if self.type() == 'RSA':
numbers = common.getMP(signature)
digest = pkcs1Digest(data, self.keyObject.size() / 8)
elif self.type() == 'DSA':
signature = common.getNS(signature)[0]
numbers = [Util.number.bytes_to_long(n) for n in signature[:20],
signature[20:]]
digest = sha1(data).digest()
return self.keyObject.verify(digest, numbers)
def getPublicKeyString(filename=None, line=0, data=''):
"""
Return a public key string suitable for being sent over the wire.
Takes a filename or data of a public key. Currently handles OpenSSH
and LSH keys.
This function has been deprecated since Twisted Conch 0.9. Use
Key.fromString() instead.
@type filename: C{str}
@type line: C{int}
@type data: C{str}
@rtype: C{str}
"""
warnings.warn("getPublicKeyString is deprecated since Twisted Conch 0.9."
" Use Key.fromString().blob().",
DeprecationWarning, stacklevel=2)
if filename and data:
raise BadKeyError("either filename or data, not both")
if filename:
lines = open(filename).readlines()
data = lines[line]
return Key.fromString(data).blob()
def makePublicKeyString(obj, comment='', kind='openssh'):
"""
Return an public key given a C{Crypto.PublicKey.pubkey.pubkey}
object.
kind is one of ('openssh', 'lsh')
This function is deprecated since Twisted Conch 0.9. Instead use
Key(obj).toString().
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type comment: C{str}
@type kind: C{str}
@rtype: C{str}
"""
warnings.warn("makePublicKeyString is deprecated since Twisted Conch 0.9."
" Use Key(obj).public().toString().",
DeprecationWarning, stacklevel=2)
return Key(obj).public().toString(kind, comment)
def getPublicKeyObject(data):
"""
Return a C{Crypto.PublicKey.pubkey.pubkey} corresponding to the SSHv2
public key data. data is in the over-the-wire public key format.
This function is deprecated since Twisted Conch 0.9. Instead, use
Key.fromString().
@type data: C{str}
@rtype: C{Crypto.PublicKey.pubkey.pubkey}
"""
warnings.warn("getPublicKeyObject is deprecated since Twisted Conch 0.9."
" Use Key.fromString().",
DeprecationWarning, stacklevel=2)
return Key.fromString(data).keyObject
def getPrivateKeyObject(filename=None, data='', passphrase=''):
"""
Return a C{Crypto.PublicKey.pubkey.pubkey} object corresponding to the
private key file/data. If the private key is encrypted, passphrase B{must}
be specified, other wise a L{BadKeyError} will be raised.
This method is deprecated since Twisted Conch 0.9. Instead, use
the fromString or fromFile classmethods of Key.
@type filename: C{str}
@type data: C{str}
@type passphrase: C{str}
@rtype: C{Crypto.PublicKey.pubkey.pubkey}
@raises BadKeyError: if the key is invalid or a passphrase is not specified
"""
warnings.warn("getPrivateKeyObject is deprecated since Twisted Conch 0.9."
" Use Key.fromString().",
DeprecationWarning, stacklevel=2)
if filename and data:
raise BadKeyError("either filename or data, not both")
if filename:
return Key.fromFile(filename, passphrase=passphrase).keyObject
else:
return Key.fromString(data, passphrase=passphrase).keyObject
def makePrivateKeyString(obj, passphrase=None, kind='openssh'):
"""
Return an OpenSSH-style private key for a
C{Crypto.PublicKey.pubkey.pubkey} object. If passphrase is given, encrypt
the private key with it.
kind is one of ('openssh', 'lsh', 'agentv3')
This function is deprecated since Twisted Conch 0.9. Instead use
Key(obj).toString().
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type passphrase: C{str}/C{None}
@type kind: C{str}
@rtype: C{str}
"""
warnings.warn("makePrivateKeyString is deprecated since Twisted Conch 0.9."
" Use Key(obj).toString().",
DeprecationWarning, stacklevel=2)
return Key(obj).toString(kind, passphrase)
def makePublicKeyBlob(obj):
"""
Make a public key blob from a C{Crypto.PublicKey.pubkey.pubkey}.
This function is deprecated since Twisted Conch 0.9. Use
Key().blob() instead.
"""
warnings.warn("makePublicKeyBlob is deprecated since Twisted Conch 0.9."
" Use Key(obj).blob().",
DeprecationWarning, stacklevel=2)
return Key(obj).blob()
def objectType(obj):
"""
Return the SSH key type corresponding to a C{Crypto.PublicKey.pubkey.pubkey}
object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@rtype: C{str}
"""
keyDataMapping = {
('n', 'e', 'd', 'p', 'q'): 'ssh-rsa',
('n', 'e', 'd', 'p', 'q', 'u'): 'ssh-rsa',
('y', 'g', 'p', 'q', 'x'): 'ssh-dss'
}
try:
return keyDataMapping[tuple(obj.keydata)]
except (KeyError, AttributeError):
raise BadKeyError("invalid key object", obj)
def pkcs1Pad(data, messageLength):
"""
Pad out data to messageLength according to the PKCS#1 standard.
@type data: C{str}
@type messageLength: C{int}
"""
lenPad = messageLength - 2 - len(data)
return '\x01' + ('\xff' * lenPad) + '\x00' + data
def pkcs1Digest(data, messageLength):
"""
Create a message digest using the SHA1 hash algorithm according to the
PKCS#1 standard.
@type data: C{str}
@type messageLength: C{str}
"""
digest = sha1(data).digest()
return pkcs1Pad(ID_SHA1+digest, messageLength)
def lenSig(obj):
"""
Return the length of the signature in bytes for a key object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@rtype: C{long}
"""
return obj.size()/8
def signData(obj, data):
"""
Sign the data with the given C{Crypto.PublicKey.pubkey.pubkey} object.
This method is deprecated since Twisted Conch 0.9. Instead use
Key().sign().
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type data: C{str}
@rtype: C{str}
"""
warnings.warn("signData is deprecated since Twisted Conch 0.9."
" Use Key(obj).sign(data).",
DeprecationWarning, stacklevel=2)
return Key(obj).sign(data)
def verifySignature(obj, sig, data):
"""
Verify that the signature for the data is valid.
This method is deprecated since Twisted Conch 0.9. Use
Key().verify().
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type sig: C{str}
@type data: C{str}
@rtype: C{bool}
"""
warnings.warn("verifySignature is deprecated since Twisted Conch 0.9."
" Use Key(obj).verify(signature, data).",
DeprecationWarning, stacklevel=2)
return Key(obj).verify(sig, data)
def printKey(obj):
"""
Pretty print a C{Crypto.PublicKey.pubkey.pubkey} object.
This function is deprecated since Twisted Conch 0.9. Use
repr(Key()).
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
"""
warnings.warn("printKey is deprecated since Twisted Conch 0.9."
" Use repr(Key(obj)).",
DeprecationWarning, stacklevel=2)
return repr(Key(obj))[1:-1]
ID_SHA1 = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
| {
"content_hash": "dea1fe2de04b4b2908fc4c910ff16f4c",
"timestamp": "",
"source": "github",
"line_count": 937,
"max_line_length": 80,
"avg_line_length": 36.33617929562433,
"alnum_prop": 0.5419567069051605,
"repo_name": "sorenh/cc",
"id": "5dc47188d5ddedb9fe9e937abcd4c4fa89f880c7",
"size": "34185",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor/Twisted-10.0.0/twisted/conch/ssh/keys.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "707"
},
{
"name": "Python",
"bytes": "398663"
},
{
"name": "Shell",
"bytes": "12374"
}
],
"symlink_target": ""
} |
import copy
import numpy as np
from ..io.pick import pick_channels_cov
from ..forward import apply_forward
from ..utils import check_random_state, verbose, _time_mask
@verbose
def generate_evoked(fwd, stc, evoked, cov, snr=3, tmin=None, tmax=None,
iir_filter=None, random_state=None, verbose=None):
"""Generate noisy evoked data
Parameters
----------
fwd : dict
a forward solution.
stc : SourceEstimate object
The source time courses.
evoked : Evoked object
An instance of evoked used as template.
cov : Covariance object
The noise covariance
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) ).
tmin : float | None
start of time interval to estimate SNR. If None first time point
is used.
tmax : float
start of time interval to estimate SNR. If None last time point
is used.
iir_filter : None | array
IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
random_state : None | int | np.random.RandomState
To specify the random generator state.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked object
The simulated evoked data
"""
evoked = apply_forward(fwd, stc, evoked) # verbose
noise = generate_noise_evoked(evoked, cov, iir_filter, random_state)
evoked_noise = add_noise_evoked(evoked, noise, snr, tmin=tmin, tmax=tmax)
return evoked_noise
def generate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
"""Creates noise as a multivariate Gaussian
The spatial covariance of the noise is given from the cov matrix.
Parameters
----------
evoked : evoked object
an instance of evoked used as template
cov : Covariance object
The noise covariance
iir_filter : None | array
IIR filter coefficients (denominator)
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
noise : evoked object
an instance of evoked
"""
from scipy.signal import lfilter
noise = copy.deepcopy(evoked)
noise_cov = pick_channels_cov(cov, include=noise.info['ch_names'])
rng = check_random_state(random_state)
n_channels = np.zeros(noise.info['nchan'])
n_samples = evoked.data.shape[1]
c = np.diag(noise_cov.data) if noise_cov['diag'] else noise_cov.data
noise.data = rng.multivariate_normal(n_channels, c, n_samples).T
if iir_filter is not None:
noise.data = lfilter([1], iir_filter, noise.data, axis=-1)
return noise
def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
"""Adds noise to evoked object with specified SNR.
SNR is computed in the interval from tmin to tmax.
Parameters
----------
evoked : Evoked object
An instance of evoked with signal
noise : Evoked object
An instance of evoked with noise
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) )
tmin : float
start time before event
tmax : float
end time after event
Returns
-------
evoked_noise : Evoked object
An instance of evoked corrupted by noise
"""
evoked = copy.deepcopy(evoked)
tmask = _time_mask(evoked.times, tmin, tmax)
tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
np.mean((noise.data ** 2).ravel()))
noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
evoked.data += noise.data
return evoked
| {
"content_hash": "0af2b2d9e7088cc9d84485dfe3ad2ee3",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 77,
"avg_line_length": 32.293103448275865,
"alnum_prop": 0.6350774159103043,
"repo_name": "Odingod/mne-python",
"id": "5ba6f8676a8a8753337d6bcc72a9b3315fc5ee4b",
"size": "3964",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mne/simulation/evoked.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3403"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "3741370"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
"""Latex filters.
Module of useful filters for processing Latex within Jinja latex templates.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
LATEX_RE_SUBS = (
(re.compile(r'\.\.\.+'), r'\\ldots'),
)
# Latex substitutions for escaping latex.
# see: http://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates
LATEX_SUBS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
}
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
__all__ = ['escape_latex',
'strip_math_space']
def escape_latex(text):
"""
Escape characters that may conflict with latex.
Parameters
----------
text : str
Text containing characters that may conflict with Latex
"""
text = ''.join(LATEX_SUBS.get(c, c) for c in text)
for pattern, replacement in LATEX_RE_SUBS:
text = pattern.sub(replacement, text)
return text
def strip_math_space(text):
"""
Remove the space between latex math commands and enclosing $ symbols.
This filter is important because latex isn't as flexible as the notebook
front end when it comes to flagging math using ampersand symbols.
Parameters
----------
text : str
Text to filter.
"""
# First, scan through the markdown looking for $. If
# a $ symbol is found, without a preceding \, assume
# it is the start of a math block. UNLESS that $ is
# not followed by another within two math_lines.
math_regions = []
math_lines = 0
within_math = False
math_start_index = 0
ptext = ''
last_character = ""
skip = False
for index, char in enumerate(text):
#Make sure the character isn't preceeded by a backslash
if (char == "$" and last_character != "\\"):
# Close the math region if this is an ending $
if within_math:
within_math = False
skip = True
ptext = ptext+'$'+text[math_start_index+1:index].strip()+'$'
math_regions.append([math_start_index, index+1])
else:
# Start a new math region
within_math = True
math_start_index = index
math_lines = 0
# If we are in a math region, count the number of lines parsed.
# Cancel the math region if we find two line breaks!
elif char == "\n":
if within_math:
math_lines += 1
if math_lines > 1:
within_math = False
ptext = ptext+text[math_start_index:index]
# Remember the last character so we can easily watch
# for backslashes
last_character = char
if not within_math and not skip:
ptext = ptext+char
if skip:
skip = False
return ptext
| {
"content_hash": "94da1557a70c29795b986455bb805246",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 116,
"avg_line_length": 30.91869918699187,
"alnum_prop": 0.47225874309755456,
"repo_name": "noslenfa/tdjangorest",
"id": "a67b7d68e5f906993c562887745bcefec291e3ca",
"size": "3803",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "uw/lib/python2.7/site-packages/IPython/nbconvert/filters/latex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189930"
},
{
"name": "Groff",
"bytes": "7138"
},
{
"name": "HTML",
"bytes": "279754"
},
{
"name": "JavaScript",
"bytes": "1017625"
},
{
"name": "Makefile",
"bytes": "7062"
},
{
"name": "Python",
"bytes": "11886731"
},
{
"name": "Shell",
"bytes": "3741"
},
{
"name": "Smarty",
"bytes": "20972"
}
],
"symlink_target": ""
} |
"""Functional tests using WebTest."""
import httplib as http
import logging
import unittest
import markupsafe
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import re
from framework.mongo.utils import to_mongo_key
from framework.auth import cas
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from framework.auth.core import generate_verification_key
from tests.base import OsfTestCase
from tests.base import fake
from tests.factories import (UserFactory, AuthUserFactory, ProjectFactory, WatchConfigFactory, NodeFactory,
NodeWikiFactory, RegistrationFactory, UnregUserFactory, UnconfirmedUserFactory,
PrivateLinkFactory)
from website import settings, language
from website.util import web_url_for, api_url_for
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/').maybe_follow() # Redirects
assert_equal(res.status_code, 200)
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/')
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_logged_in_index_route_renders_home_template(self):
res = self.app.get('/', auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('My Projects', res) # Will change once home page populated
def test_logged_out_index_route_renders_landing_page(self):
res = self.app.get('/')
assert_in('Simplified Scholarly Collaboration', res)
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
res = self.app.get('/{0}/settings/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
@unittest.skip("Can't test this, since logs are dynamically loaded")
def test_sees_log_events_on_watched_projects(self):
# Another user has a public project
u2 = UserFactory(username='[email protected]', fullname='Bono')
project = ProjectFactory(creator=u2, is_public=True)
project.add_contributor(u2)
auth = Auth(user=u2)
project.save()
# User watches the project
watch_config = WatchConfigFactory(node=project)
self.user.watch(watch_config)
self.user.save()
# Goes to her dashboard, already logged in
res = self.app.get('/dashboard/', auth=self.auth, auto_follow=True)
# Sees logs for the watched project
assert_in('Watched Projects', res) # Watched Projects header
# The log action is in the feed
assert_in(project.title, res)
def test_sees_correct_title_home_page(self):
# User goes to homepage
res = self.app.get('/', auto_follow=True)
title = res.html.title.string
# page title is correct
assert_equal('OSF | Home', title)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('No wiki content', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page = 'home'
wiki_content = 'Kittens'
NodeWikiFactory(user=self.user, node=project, content=wiki_content, page_name=wiki_page)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page,
), auth=self.auth)
assert_not_in('No wiki content', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
non_ascii
), auth=self.auth, expect_errors=True)
project.update_node_wiki(non_ascii, 'new content', Auth(self.user))
assert_in(non_ascii, project.wiki_pages_current)
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('No wiki content', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('No wiki content', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in("Anonymous Contributors", res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.append(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': "not_valid"},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
# FIXME: These affect search in development environment. So need to migrate solr after running.
# # Remove this side effect.
@unittest.skipIf(not settings.SEARCH_ENGINE, 'Skipping because search is disabled')
class TestSearching(OsfTestCase):
'''Test searching using the search bar. NOTE: These may affect the
Solr database. May need to migrate after running these.
'''
def setUp(self):
super(TestSearching, self).setUp()
import website.search.search as search
search.delete_all()
self.user = AuthUserFactory()
self.auth = self.user.auth
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_user_from_home_page(self):
user = UserFactory()
# Goes to home page
res = self.app.get('/').maybe_follow()
# Fills search form
form = res.forms['searchBar']
form['q'] = user.fullname
res = form.submit().maybe_follow()
# The username shows as a search result
assert_in(user.fullname, res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_project_from_home_page(self):
project = ProjectFactory(title='Foobar Project', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
project.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the project is shown as a result
assert_in('Foobar Project', res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_component_from_home_page(self):
component = NodeFactory(title='Foobar Component', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
component.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the component is shown as a result
assert_in('Foobar Component', res)
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(category='hypothesis', creator=self.user)
self.project.nodes.append(self.component)
self.component.save()
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = NodeWikiFactory(user=self.user, node=self.component)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake.email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake.email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake.email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip("as long as E-mails cannot be changed")
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = '[email protected]'
user1.emails.append(email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = '[email protected]'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'password'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'password'
res = form.submit(auth=reg_user.auth).follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user._primary_key, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user._primary_key, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
class TestExplorePublicActivity(OsfTestCase):
def setUp(self):
super(TestExplorePublicActivity, self).setUp()
self.project = ProjectFactory(is_public=True)
self.registration = RegistrationFactory(project=self.project)
self.private_project = ProjectFactory(title="Test private project")
@unittest.skip("Can't test this, since hiding newest project page https://github.com/CenterForOpenScience/osf.io/commit/c50d436cbb6bd9fbe2f0cbbc3724c05ed1ccb94e")
@mock.patch('website.discovery.views.KeenClient')
def test_newest_public_project_and_registrations_show_in_explore_activity(self, mock_client):
mock_client.count.return_value = {
'result': [
{
'result': 5,
'node.id': self.project._id
},
{
'result': 5,
'node.id': self.registration._id
}
]
}
mock_client.count_unique.return_value = {
'result': [
{
'result': 2,
'node.id': self.project._id
},
{
'result': 2,
'node.id': self.registration._id
}
]
}
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_in(str(self.project.title), res)
assert_in(str(self.project.date_created.date()), res)
assert_in(str(self.registration.title), res)
assert_in(str(self.registration.registered_date.date()), res)
assert_not_in(str(self.private_project.title), res)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body)
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails[0]
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = '[email protected]'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key = None
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
assert_in('forgotPasswordForm', res.forms)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1320
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key(OSF) is set
self.user.reload()
assert_not_equal(self.user.verification_key, None)
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key(OSF) is not set
self.user.reload()
assert_equal(self.user.verification_key, None)
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
class TestResetPassword(OsfTestCase):
def setUp(self):
super(TestResetPassword, self).setUp()
self.user = AuthUserFactory()
self.another_user = AuthUserFactory()
self.osf_key = generate_verification_key()
self.user.verification_key = self.osf_key
self.user.save()
self.cas_key = None
self.get_url = web_url_for('reset_password_get', verification_key=self.osf_key)
self.get_url_invalid_key = web_url_for('reset_password_get', verification_key=generate_verification_key())
# load reset password page if verification_key is valid
def test_reset_password_view_returns_200(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
# raise http 400 error if verification_key(OSF) is invalid
def test_reset_password_view_raises_400(self):
res = self.app.get(self.get_url_invalid_key, expect_errors=True)
assert_equal(res.status_code, 400)
# successfully reset password if osf verification_key(OSF) is valid and form is valid
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_can_reset_password_if_form_success(self, mock_service_validate):
# load reset password page and submit email
res = self.app.get(self.get_url)
form = res.forms['resetPasswordForm']
form['password'] = 'newpassword'
form['password2'] = 'newpassword'
res = form.submit()
# successfully reset password if osf verification_key(OSF) is valid and form is valid
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_can_reset_password_if_form_success(self, mock_service_validate):
# load reset password page and submit email
res = self.app.get(self.get_url)
form = res.forms['resetPasswordForm']
form['password'] = 'newpassword'
form['password2'] = 'newpassword'
res = form.submit()
# check request URL is /resetpassword with verification_key(OSF)
request_url_path = res.request.path
assert_in('resetpassword', request_url_path)
assert_in(self.user.verification_key, request_url_path)
# check verification_key(OSF) is destroyed and a new verification_key(CAS) is in place
self.user.reload()
self.cas_key = self.user.verification_key
assert_not_equal(self.cas_key, self.osf_key)
# check redirection to CAS login with username and the new verification_key(CAS)
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_true('login?service=' in location)
assert_true('username={}'.format(self.user.username) in location)
assert_true('verification_key={}'.format(self.user.verification_key) in location)
# check if password was updated
self.user.reload()
assert_true(self.user.check_password('newpassword'))
# check if verification_key(CAS) is destroyed
mock_service_validate.return_value = cas.CasResponse(
authenticated=True,
user=self.user._primary_key,
attributes={'accessToken': fake.md5()}
)
ticket = fake.md5()
service_url = 'http://accounts.osf.io/?ticket=' + ticket
resp = cas.make_response_from_ticket(ticket, service_url)
assert_not_equal(self.user.verification_key, self.cas_key)
# logged-in user should be automatically logged out upon before reset password
def test_reset_password_logs_out_user(self):
# visit reset password link while another user is logged in
res = self.app.get(self.get_url, auth=self.another_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('resetpassword', location)
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, project=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._primary_key)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "496e7450cadd4d1c2e3aab1541c472b4",
"timestamp": "",
"source": "github",
"line_count": 1097,
"max_line_length": 166,
"avg_line_length": 39.249772105742935,
"alnum_prop": 0.6264951111317556,
"repo_name": "emetsger/osf.io",
"id": "6e4b96363d04b0d6d6b163e6c9382ad7f3cf0c4c",
"size": "43118",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/webtest_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160226"
},
{
"name": "HTML",
"bytes": "121662"
},
{
"name": "JavaScript",
"bytes": "1672685"
},
{
"name": "Mako",
"bytes": "660837"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "6189751"
}
],
"symlink_target": ""
} |
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GetQuestionnaireExecutionsResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'GetQuestionnaireExecutionsResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # GetQuestionnaireExecutionsResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
| {
"content_hash": "3de334cef94f906a0326b9dfdba9c35b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 31.675675675675677,
"alnum_prop": 0.6578498293515358,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "a7086a543ef57c4b42d1f50d19881c45f16026c2",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupdocs/models/GetQuestionnaireExecutionsResponse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
} |
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
from html import escape as html_escape
else:
from itertools import imap as map
from cgi import escape as html_escape
import warnings
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket, \
ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.", DeprecationWarning)
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
:param mode: specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
"""
if extended is not None and mode is not None:
raise Exception("extended and mode can not be specified simultaneously")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = extended is not None and isinstance(extended, bool)
# For the mode specified: df.explain(mode="formatted")
is_mode_case = mode is not None and isinstance(mode, basestring)
if not is_no_argument and not (is_extended_case or is_mode_case):
if extended is not None:
err_msg = "extended (optional) should be provided as bool" \
", got {0}".format(type(extended))
else: # For mode case
err_msg = "mode (optional) should be provided as str, got {0}".format(type(mode))
raise TypeError(err_msg)
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
@since(2.4)
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to ``True``, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
:param eager: Whether to checkpoint this :class:`DataFrame` immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.3)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this :class:`DataFrame` immediately
.. note:: Experimental
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (basestring, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
:param prefetchPartitions: If Spark should pre-fetch the next partition
before it is needed.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@ignore_unicode_prefix
@since(3.0)
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
>>> df.tail(1)
[Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK`).
.. note:: The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
:param numPartitions: int, to specify the target number of partitions
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since("2.4.0")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Note that due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default ``False``).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new :class:`DataFrame` that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
"""
if isinstance(col, basestring):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@since(2.3)
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
:param colName: string, column name specified as a regex.
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, basestring):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the :class:`DataFrame`.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name=u'Bob', height=85), Row(name=u'Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since("2.3.0")
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See also describe for basic statistics.
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
@since(2.3)
def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(2.4)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL.
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, long, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, long, basestring)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, basestring):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
.. note:: This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(3.0)
def transform(self, func):
"""Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.
:param func: a function that takes and returns a class:`DataFrame`.
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
@since(3.1)
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. note:: The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
.. note:: This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
.. note:: DeveloperApi
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise ValueError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
@since(3.1)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. note:: Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
.. note:: DeveloperApi
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
Row(name='Bob', spy=None, age=5),
Row(name='Mallory', spy=True, age=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "e6e0ddc1420baa9f98682a4aa0ab3e3b",
"timestamp": "",
"source": "github",
"line_count": 2334,
"max_line_length": 100,
"avg_line_length": 38.97814910025707,
"alnum_prop": 0.5681450948062654,
"repo_name": "kevinyu98/spark",
"id": "78b574685327c8ce2f58be9f62007c423321a159",
"size": "91760",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/dataframe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50108"
},
{
"name": "Batchfile",
"bytes": "25676"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "21814"
},
{
"name": "Dockerfile",
"bytes": "8811"
},
{
"name": "HTML",
"bytes": "40448"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4162292"
},
{
"name": "JavaScript",
"bytes": "211001"
},
{
"name": "Makefile",
"bytes": "1587"
},
{
"name": "PLSQL",
"bytes": "6849"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3222680"
},
{
"name": "R",
"bytes": "1203999"
},
{
"name": "Roff",
"bytes": "36516"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32702539"
},
{
"name": "Shell",
"bytes": "208775"
},
{
"name": "TSQL",
"bytes": "473509"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout,Div,Submit,HTML,Button,Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from apps.blog.models import *
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput())
def clean_username(self):
username = self.cleaned_data['username']
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise forms.ValidationError(
mark_safe(
('User not found. Registere <a href="{0}">Here</a>').format(reverse('signUp'))
)
)
return username
def clean_password(self):
username = self.cleaned_data.get('username',None)
password = self.cleaned_data['password']
try:
user = User.objects.get(username=username)
except:
user = None
if user is not None and not user.check_password(password):
raise forms.ValidationError("Invalid Password")
elif user is None:
pass
else:
return password
helper = FormHelper()
helper.form_method = 'POST'
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('username', css_class='input-lg-4'),
Field('password', css_class='input-lg-4'),
FormActions(Submit('login', 'Login', css_class='btn btn-primary')),
)
class SignUpForm(forms.Form):
username = forms.CharField(max_length=45)
password = forms.CharField(widget=forms.PasswordInput())
firstname = forms.CharField(max_length=45)
lastname = forms.CharField(max_length=45)
email = forms.CharField(widget=forms.EmailInput())
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(SignUpForm, self).__init__(*args, **kwargs)
helper = FormHelper()
helper.form_method = 'POST'
helper.form_calss = 'form_horizontal'
helper.layout = Layout(
Field('username'),
Field('password'),
Field('firstname'),
Field('lastname'),
Field('email'),
FormActions(Submit('signup', 'Sign Up', css_class='btn btn-primary'))
)
def save(self):
author = Author()
author.username = self.cleaned_data['username']
author.password = self.cleaned_data['password']
author.firstname = self.cleaned_data['firstname']
author.lastname = self.cleaned_data['lastname']
author.email = self.cleaned_data['email']
return author
class TagForm(forms.Form):
name = forms.CharField(max_length=200)
helper = FormHelper()
helper.form_method = 'POST'
helper.form_class = 'form_horizontal'
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(TagForm, self).__init__(*args, **kwargs)
self.fields['name'].label = "Tag Name"
if self.instance:
self.fields['name'].initial = self.instance.name
self.helper.layout = Layout(
Field('name'),
FormActions(Submit('add', 'Update', css_class='btn btn-primary'))
)
else :
self.helper.layout = Layout(
Field('name'),
FormActions(Submit('add', 'Add', css_class='btn btn-primary'))
)
def save(self):
tag = self.instance if self.instance else Tag()
if self.instance :
tag.name = self.cleaned_data['name']
return tag
else :
try:
cek = Tag.objects.get(name=self.cleaned_data['name'])
except:
tag.name = self.cleaned_data['name']
return tag
return None
class CategoryForm(forms.Form):
name = forms.CharField(max_length=200)
helper = FormHelper()
helper.form_method = 'POST'
helper.form_class = 'form_horizontal'
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(CategoryForm, self).__init__(*args, **kwargs)
self.fields['name'].label = "Category Name"
if self.instance:
self.fields['name'].initial = self.instance.name
self.helper.layout = Layout(
Field('name'),
FormActions(Submit('add', 'Update', css_class='btn btn-primary'))
)
else :
self.helper.layout = Layout(
Field('name'),
FormActions(Submit('add', 'Add', css_class='btn btn-primary'))
)
def save(self):
cats = self.instance if self.instance else Category()
if self.instance :
cats.name = self.cleaned_data['name']
return cats
else :
try:
cek = Category.objects.get(name=self.cleaned_data['name'])
except:
cats.name = self.cleaned_data['name']
return cats
return None
class PostForm(forms.Form):
title = forms.CharField(max_length=200)
content = forms.CharField(widget=forms.widgets.Textarea())
categories = forms.ChoiceField(widget=forms.widgets.Select(), required=False)
tags = forms.MultipleChoiceField(widget=forms.widgets.CheckboxSelectMultiple(), required=False)
is_published = forms.ChoiceField(widget=forms.widgets.Select(), required=False)
helper = FormHelper()
helper.form_method = 'POST'
helper.form_class = 'form_horizontal'
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(PostForm, self).__init__(*args, **kwargs)
self.fields['tags'].choices = [(tag.id, tag.name) for tag in Tag.objects]
self.fields['categories'].choices = [(cats.id, cats.name) for cats in Category.objects]
self.fields['is_published'].choices = [('0', 'Draft'), ('1', 'Publish')]
self.fields['is_published'].label = "Publish ?"
if self.instance:
self.fields['title'].initial = self.instance.title
self.fields['content'].initial = self.instance.content
self.fields['tags'].initial = [tag.id for tag in self.instance.tags]
self.fields['categories'].initial = self.instance.categories.id
self.fields['is_published'].initial = 1 if self.instance.is_published else 0
self.helper.layout = Layout(
Field('title'),
Field('content'),
Field('categories'),
Field('tags', style="padding-left: 30px;"),
Field('is_published'),
FormActions(Submit('update', 'Update', css_class='btn btn-primary'))
)
else :
self.helper.layout = Layout(
Field('title'),
Field('content'),
Field('categories'),
Field('tags', style="padding-left: 30px;"),
Field('is_published'),
FormActions(Submit('add', 'Add', css_class='btn btn-primary'))
)
def save(self):
post = self.instance if self.instance else Post()
post.title = self.cleaned_data['title']
post.content = self.cleaned_data['content']
post.categories = Category.objects(id=self.cleaned_data['categories'])[0]
post.tags = Tag.objects(id__in=self.cleaned_data['tags'])
post.is_published = True if self.cleaned_data['is_published'] == '1' else False
return post | {
"content_hash": "dd9bfd8f83ebca5cb7fdade2b9e52f40",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 96,
"avg_line_length": 31.796116504854368,
"alnum_prop": 0.6894656488549619,
"repo_name": "gusaul/gigsblog",
"id": "5b15f5d336372df1ed23f9edf73a95a5921c5460",
"size": "6550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/blog/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "24427"
}
],
"symlink_target": ""
} |
__author__ = 'nicolasfraison'
import os
workers = os.environ['WORKER_PROCESSES']
timeout = os.environ['TIMEOUT']
preload_app = True | {
"content_hash": "c3e05615bf5e0196a8c232a033056abf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 19,
"alnum_prop": 0.7218045112781954,
"repo_name": "ashangit/docker-graphite-api",
"id": "684773def69527fb119e72c03827f6d00b65ff63",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/gunicorn-conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""
URLConf for Django-Forum.
django-forum assumes that the forum application is living under
/forum/.
Usage in your base urls.py:
(r'^forum/', include('forum.urls')),
"""
from django.conf.urls.defaults import *
from forum.models import Forum
from forum.feeds import RssForumFeed, AtomForumFeed
from forum.sitemap import ForumSitemap, ThreadSitemap, PostSitemap
feed_dict = {
'rss' : RssForumFeed,
'atom': AtomForumFeed
}
sitemap_dict = {
'forums': ForumSitemap,
'threads': ThreadSitemap,
'posts': PostSitemap,
}
urlpatterns = patterns('',
url(r'^$', 'forum.views.forums_list', name='forum_index'),
url(r'^(?P<url>(rss|atom).*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feed_dict}),
url(r'^thread/(?P<thread>[0-9]+)/$', 'forum.views.thread', name='forum_view_thread'),
url(r'^thread/(?P<thread>[0-9]+)/reply/$', 'forum.views.reply', name='forum_reply_thread'),
url(r'^subscriptions/$', 'forum.views.updatesubs', name='forum_subscriptions'),
url(r'^(?P<slug>[-\w]+)/$', 'forum.views.forum', name='forum_thread_list'),
url(r'^(?P<forum>[-\w]+)/new/$', 'forum.views.newthread', name='forum_new_thread'),
url(r'^([-\w/]+/)(?P<forum>[-\w]+)/new/$', 'forum.views.newthread'),
url(r'^([-\w/]+/)(?P<slug>[-\w]+)/$', 'forum.views.forum', name='forum_subforum_thread_list'),
(r'^sitemap.xml$', 'django.contrib.sitemaps.views.index', {'sitemaps': sitemap_dict}),
(r'^sitemap-(?P<section>.+)\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemap_dict}),
)
| {
"content_hash": "f35fe41e269a8e9c3791a02ddfcec2f6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 109,
"avg_line_length": 33.95652173913044,
"alnum_prop": 0.6376440460947503,
"repo_name": "Pmisiurak/django-forum",
"id": "ba5e3303e0b4a263d0e595e883200eb5fc7ba893",
"size": "1562",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "forum/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "11909"
},
{
"name": "Python",
"bytes": "27870"
}
],
"symlink_target": ""
} |
import json
from cStringIO import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
from csvkit.exceptions import NonUniqueKeyColumnException
from csvkit.utilities.csvjson import CSVJSON
class TestCSVJSON(unittest.TestCase):
def test_simple(self):
args = ['examples/dummy.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
utility.main()
js = json.loads(output_file.getvalue())
self.assertDictEqual(js[0], {"a": "1", "c": "3", "b": "2"})
def test_indentation(self):
args = ['-i', '4', 'examples/dummy.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
utility.main()
js = json.loads(output_file.getvalue())
self.assertDictEqual(js[0], {"a": "1", "c": "3", "b": "2"})
def test_keying(self):
args = ['-k', 'a', 'examples/dummy.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
utility.main()
js = json.loads(output_file.getvalue())
self.assertDictEqual(js, { "1": {"a": "1", "c": "3", "b": "2"} })
def test_duplicate_keys(self):
args = ['-k', 'a', 'examples/dummy3.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
self.assertRaises(NonUniqueKeyColumnException, utility.main)
def test_geojson(self):
args = ['--lat', 'latitude', '--lon', 'longitude', 'examples/test_geo.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
utility.main()
geojson = json.loads(output_file.getvalue())
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertFalse('crs' in geojson)
self.assertEqual(geojson['bbox'], [-95.334619, 32.299076986939205, -95.250699, 32.351434])
self.assertEqual(len(geojson['features']), 17)
for feature in geojson['features']:
self.assertEqual(feature['type'], 'Feature')
self.assertFalse('id' in feature)
self.assertEqual(len(feature['properties']), 10)
geometry = feature['geometry']
self.assertEqual(len(geometry['coordinates']), 2)
self.assertTrue(isinstance(geometry['coordinates'][0], float))
self.assertTrue(isinstance(geometry['coordinates'][1], float))
def test_geojson_with_id(self):
args = ['--lat', 'latitude', '--lon', 'longitude', '-k', 'slug', 'examples/test_geo.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
utility.main()
geojson = json.loads(output_file.getvalue())
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertFalse('crs' in geojson)
self.assertEqual(geojson['bbox'], [-95.334619, 32.299076986939205, -95.250699, 32.351434])
self.assertEqual(len(geojson['features']), 17)
for feature in geojson['features']:
self.assertEqual(feature['type'], 'Feature')
self.assertTrue('id' in feature)
self.assertEqual(len(feature['properties']), 9)
geometry = feature['geometry']
self.assertEqual(len(geometry['coordinates']), 2)
self.assertTrue(isinstance(geometry['coordinates'][0], float))
self.assertTrue(isinstance(geometry['coordinates'][1], float))
def test_geojson_with_crs(self):
args = ['--lat', 'latitude', '--lon', 'longitude', '--crs', 'EPSG:4269', 'examples/test_geo.csv']
output_file = StringIO()
utility = CSVJSON(args, output_file)
utility.main()
geojson = json.loads(output_file.getvalue())
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertTrue('crs' in geojson)
self.assertEqual(geojson['bbox'], [-95.334619, 32.299076986939205, -95.250699, 32.351434])
self.assertEqual(len(geojson['features']), 17)
crs = geojson['crs']
self.assertEqual(crs['type'], 'name')
self.assertEqual(crs['properties']['name'], 'EPSG:4269')
| {
"content_hash": "37542ebe559daceb6463bc6e6f9ddbcb",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 105,
"avg_line_length": 34.17213114754098,
"alnum_prop": 0.5939074118493644,
"repo_name": "moradology/csvkit",
"id": "a1dac3e7d797244623ebd0f59a162c7a7d131288",
"size": "4192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utilities/test_csvjson.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "501000"
},
{
"name": "Python",
"bytes": "226716"
}
],
"symlink_target": ""
} |
import os
# from pilot.util.container import execute
from pilot.common.errorcodes import ErrorCodes
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def verify_setup_command(cmd):
"""
Verify the setup command.
:param cmd: command string to be verified (string).
:return: pilot error code (int), diagnostics (string).
"""
ec = 0
diagnostics = ""
return ec, diagnostics
def get_setup_command(job, prepareasetup):
"""
Return the path to asetup command, the asetup command itself and add the options (if desired).
If prepareasetup is False, the function will only return the path to the asetup script. It is then assumed
to be part of the job parameters.
Handle the case where environmental variables are set -
HARVESTER_CONTAINER_RELEASE_SETUP_FILE, HARVESTER_LD_LIBRARY_PATH, HARVESTER_PYTHONPATH
This will create the string need for the pilot to execute to setup the environment.
:param job: job object.
:param prepareasetup: not used.
:return: setup command (string).
"""
cmd = ""
# return immediately if there is no release or if user containers are used
if job.swrelease == 'NULL' or '--containerImage' in job.jobparams:
logger.debug('get_setup_command return value: {0}'.format(str(cmd)))
return cmd
# test if environmental variable HARVESTER_CONTAINER_RELEASE_SETUP_FILE is defined
setupfile = os.environ.get('HARVESTER_CONTAINER_RELEASE_SETUP_FILE', '')
if setupfile != "":
cmd = "source {};".format(setupfile)
# test if HARVESTER_LD_LIBRARY_PATH is defined
if os.environ.get('HARVESTER_LD_LIBRARY_PATH', '') != "":
cmd += "export LD_LIBRARY_PATH=$HARVESTER_LD_LIBRARY_PATH:$LD_LIBRARY_PATH;"
# test if HARVESTER_PYTHONPATH is defined
if os.environ.get('HARVESTER_PYTHONPATH', '') != "":
cmd += "export PYTHONPATH=$HARVESTER_PYTHONPATH:$PYTHONPATH;"
#unset FRONTIER_SERVER variable
cmd += "unset FRONTIER_SERVER"
logger.debug('get_setup_command return value: {0}'.format(str(cmd)))
return cmd
| {
"content_hash": "957c58c741fffb772f1ee982da833fa9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 110,
"avg_line_length": 34.095238095238095,
"alnum_prop": 0.6773743016759777,
"repo_name": "PalNilsson/pilot2",
"id": "597a8ed6ab45b8456d114d261ae0b741f71e242e",
"size": "2449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pilot/user/atlas/resource/cori.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1098187"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
} |
"""
Foundations of Python Network Programming
------------------------------------------
Chapter 02 - Super naive Client / Server with basic backoff support (for UDP client) to avoid congestion.
The server (when used with UDP) randomly drops packets to simulate a packet loss.
"""
import socket
import argparse
import time
import random
from collections import deque
def parse_arguments():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='type')
server_parser = subparsers.add_parser('server', help='Run as a server')
client_parser = subparsers.add_parser('client', help='Run as a UDP (only) client') # TCP is boring
server_protocol = server_parser.add_mutually_exclusive_group()
#server_protocol.add_argument('--tcp', help='Run a as TCP server', action='store_const', const=socket.SOCK_STREAM)
server_protocol.add_argument('--udp', help='Run as a UDP server', action='store_const', const=socket.SOCK_DGRAM)
server_parser.add_argument('--port', help='The port on which we listen', default=51150)
client_parser.add_argument('--server', help='The server IP')
client_parser.add_argument('--port', help='The server port', default=51150)
return parser.parse_args()
def server(port, protocol=socket.SOCK_STREAM, address='0.0.0.0'):
s = socket.socket(socket.AF_INET, protocol)
if protocol == socket.SOCK_STREAM:
pass
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind((address, port))
# s.listen(1)
# print("[TCP]: Server is listening on {0}:{1}".format(*s.getsockname()))
# while True:
# (conn, addr) = s.accept()
# print("Connected Established from %s" % str(addr))
# while True:
# data = conn.recv(1024)
# print(data.rstrip())
# if not data: break
elif protocol == socket.SOCK_DGRAM:
s.bind((address, port))
print("[UDP]: Server is listening on {0}:{1}".format(*s.getsockname()))
while True:
(data, addr) = s.recvfrom(1024)
print(data.rstrip())
if random.randint(0, 1):
s.sendto("[Server]: Replying back", addr)
def client(server_address, port, timeout=1):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((server_address, int(port)))
message = "[Client][%d] Congestion timeout: %.4f" % (time.time(), timeout)
successful_requests_times = deque(maxlen=10)
while True:
time.sleep(timeout)
s.settimeout(timeout)
try:
s.send(message)
start = time.time()
s.recv(1024)
successful_requests_times.append(time.time() - start)
timeout = sum(successful_requests_times) / len(successful_requests_times)
message = "[Client][%d] Congestion timeout: %.4f" % (time.time(), timeout)
except socket.timeout:
if timeout < 5.0:
timeout += 0.2
except socket.error:
if timeout < 5.0:
timeout += 0.5
if __name__ == "__main__":
arguments = parse_arguments()
if arguments.type == 'server':
proto = socket.SOCK_STREAM if arguments.tcp else socket.SOCK_DGRAM
server(port=arguments.port, protocol=proto)
elif arguments.type == 'client':
client(arguments.server, arguments.port)
| {
"content_hash": "c081592a777f5c8180d259ad9758b7ca",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 118,
"avg_line_length": 41.21686746987952,
"alnum_prop": 0.6088862905583163,
"repo_name": "nathanIL/books",
"id": "632e34833ca4aff1ca3623b4148fb0241bd88e2a",
"size": "3421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Foundations_of_Python_Network_Programming/Chapter02/udp_client_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7028"
}
],
"symlink_target": ""
} |
from neutron.api import extensions
from neutron.api.v2 import attributes
SERVICE_ROUTER = 'service_router'
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
SERVICE_ROUTER: {'allow_post': True, 'allow_put': False,
'convert_to': attributes.convert_to_boolean,
'default': False, 'is_visible': True},
}
}
class Servicerouter(extensions.ExtensionDescriptor):
"""Extension class supporting advanced service router."""
@classmethod
def get_name(cls):
return "Service Router"
@classmethod
def get_alias(cls):
return "service-router"
@classmethod
def get_description(cls):
return "Provides service router."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/service-router/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-08-08T00:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| {
"content_hash": "e9c9a9b8c491147649a3ca43fadb8bf5",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 25.595238095238095,
"alnum_prop": 0.6148837209302326,
"repo_name": "jumpstarter-io/neutron",
"id": "9033039b0d3548a7efda5dde325dadc229e9955e",
"size": "1704",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "neutron/plugins/vmware/extensions/servicerouter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from . import empirical_covariance, EmpiricalCovariance
from ..utils import check_array
from ..utils.validation import _deprecate_positional_args
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
emp_cov : array-like of shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage : float, default=0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Returns
-------
shrunk_cov : ndarray of shape (n_features, n_features)
Shrunk covariance.
Notes
-----
The regularized (shrunk) covariance is given by:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = check_array(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
shrinkage : float, default=0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import ShrunkCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = ShrunkCovariance().fit(X)
>>> cov.covariance_
array([[0.7387..., 0.2536...],
[0.2536..., 0.4110...]])
>>> cov.location_
array([0.0622..., 0.0193...])
Notes
-----
The regularized covariance is given by:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
@_deprecate_positional_args
def __init__(self, *, store_precision=True, assume_centered=False,
shrinkage=0.1):
super().__init__(store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
"""Fit the shrunk covariance model according to the given training data
and parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: Ignored
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = self._validate_data(X)
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split.
Returns
-------
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# optionally center data
if not assume_centered:
X = X - X.mean(0)
# A non-blocked version of the computation is present in the tests
# in tests/test_covariance.py
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in range(n_splits):
for j in range(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in range(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:],
X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
# We do this to prevent shrinking more than "1", which whould invert
# the value of covariances
beta = min(beta, delta)
# finally get shrinkage
shrinkage = 0 if beta == 0 else beta / delta
return shrinkage
@_deprecate_positional_args
def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split.
This is purely a memory optimization and does not affect results.
Returns
-------
shrunk_cov : ndarray of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_features = X.size
else:
_, n_features = X.shape
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation. This is purely a memory
optimization and does not affect results.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import LedoitWolf
>>> real_cov = np.array([[.4, .2],
... [.2, .8]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=50)
>>> cov = LedoitWolf().fit(X)
>>> cov.covariance_
array([[0.4406..., 0.1616...],
[0.1616..., 0.8022...]])
>>> cov.location_
array([ 0.0595... , -0.0075...])
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
@_deprecate_positional_args
def __init__(self, *, store_precision=True, assume_centered=False,
block_size=1000):
super().__init__(store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
"""Fit the Ledoit-Wolf shrunk covariance model according to the given
training data and parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
not used, present for API consistence purpose.
Returns
-------
self : object
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
X = self._validate_data(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True,
block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
# OAS estimator
@_deprecate_positional_args
def oas(X, *, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
Returns
-------
shrunk_cov : array-like of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS is slightly modified compared
to the one given in the article. See :class:`OAS` for more details.
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = 1. if den == 0 else min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator
Read more in the :ref:`User Guide <shrunk_covariance>`.
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. In the original article, formula (23) states that 2/p is
multiplied by Trace(cov*cov) in both the numerator and denominator, but
this operation is omitted because for a large p, the value of 2/p is
so small that it doesn't affect the value of the estimator.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import OAS
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> oas = OAS().fit(X)
>>> oas.covariance_
array([[0.7533..., 0.2763...],
[0.2763..., 0.3964...]])
>>> oas.precision_
array([[ 1.7833..., -1.2431... ],
[-1.2431..., 3.3889...]])
>>> oas.shrinkage_
0.0195...
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
"""Fit the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = self._validate_data(X)
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| {
"content_hash": "3d589e3013184a801b3e4138bfec700e",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 79,
"avg_line_length": 33.839669421487606,
"alnum_prop": 0.605871147364822,
"repo_name": "ndingwall/scikit-learn",
"id": "1949d67de11abac66c3d72b3e6e2d893dec7ad45",
"size": "20473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/covariance/_shrunk_covariance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
} |
from .direction import (RightDirection, LeftDirection,
DownDirection, UpDirection)
from .selector import SelectorFactory
class ObjectFactory(object):
def __new__(klass, direction_expr, expression, wnck_wrapper):
count = 1
if 'count' in expression:
count = int(''.join(expression['count']))
logical = {
'r': RightDirection,
'l': LeftDirection,
'u': UpDirection,
'd': DownDirection,
'e': RightDirection,
'w': LeftDirection,
'n': UpDirection,
's': DownDirection,
}
selector_expr = None if not direction_expr else direction_expr[0]
if 'logical' in expression:
return logical[expression['logical']](wnck_wrapper, count)
else:
return SelectorFactory(selector_expr, expression, wnck_wrapper)
| {
"content_hash": "461ef34acda2e64b97447b04aabc1596",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 32.357142857142854,
"alnum_prop": 0.5717439293598234,
"repo_name": "bitptr/wim",
"id": "907d5e442e7aaa76615abdddd7cb16bca5c58e04",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wim/object_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39269"
},
{
"name": "Shell",
"bytes": "798"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from convolutional import Convolutional
from tensorflow.examples.tutorials.mnist import input_data
'''
Main file for running of single digit recognition models
'''
def get_batch(dataset, inputs_placeholder, labels_placeholder):
inputs, labels = dataset.next_batch(50)
return {inputs_placeholder: inputs, labels_placeholder: labels}
def evaluate(dataset, session, operation, inputs_placeholder, labels_placeholder, name, summary_writer, learning_step):
steps_per_epoch = dataset.num_examples // 50
number_of_examples = steps_per_epoch * 50
correct_num = 0
for step in range(steps_per_epoch):
batch = get_batch(dataset, inputs_placeholder, labels_placeholder)
correct_num += session.run(operation, feed_dict=batch)
precision = correct_num / number_of_examples
summary = tf.Summary()
summary.value.add(tag='Accuracy_' + name, simple_value=precision)
summary_writer.add_summary(summary, learning_step)
print("Accuracy %.3f" % precision)
if __name__ == '__main__':
# Download mnist
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
with tf.Graph().as_default():
# Wiring and different models
#model = Feed_forward()
#model = Feed_forward_two_layers()
model = Convolutional()
inputs_placeholder, labels_placeholder = model.input_placeholders()
logits = model.inference(inputs_placeholder)
loss = model.loss(logits, labels_placeholder)
training = model.training(loss, 0.0001)
evaluation = model.evaluation(logits, labels_placeholder)
# Initialization
session = tf.InteractiveSession()
init = tf.global_variables_initializer()
session.run(init)
# visualize graph
writer = tf.summary.FileWriter("visualizations/" + model.get_name())
writer.add_graph(session.graph)
# Summaries
merged_summary = tf.summary.merge_all()
# Training
for step in range(10000 + 1):
batch = get_batch(mnist.train, inputs_placeholder, labels_placeholder)
loss_value, summary, _ = session.run([loss, merged_summary, training], feed_dict=batch)
writer.add_summary(summary, step)
if step % 100 == 0:
print("Step %d, loss %.3f" % (step, loss_value))
print("Train accuracy")
evaluate(mnist.train, session, evaluation, inputs_placeholder, labels_placeholder, "train", writer,
step)
print("Validation accuracy")
evaluate(mnist.validation, session, evaluation, inputs_placeholder, labels_placeholder, "validation",
writer, step)
print("Test accuracy")
evaluate(mnist.test, session, evaluation, inputs_placeholder, labels_placeholder, "test", writer, step)
print()
| {
"content_hash": "0e9a61e9354a2b416072d6dbde8783f3",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 119,
"avg_line_length": 40.12328767123287,
"alnum_prop": 0.6432229429839535,
"repo_name": "thePetrMarek/SequenceOfDigitsRecognition",
"id": "71c3e42ad6620f25d9bf32ea269e15e45c52d856",
"size": "2929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "single_digit/main_single_digit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150203"
}
],
"symlink_target": ""
} |
from google.cloud import tasks_v2beta3
def sample_create_queue():
# Create a client
client = tasks_v2beta3.CloudTasksClient()
# Initialize request argument(s)
request = tasks_v2beta3.CreateQueueRequest(
parent="parent_value",
)
# Make the request
response = client.create_queue(request=request)
# Handle the response
print(response)
# [END cloudtasks_v2beta3_generated_CloudTasks_CreateQueue_sync]
| {
"content_hash": "801db09b2ebaa2f802e1e7da2d69d32d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 23.57894736842105,
"alnum_prop": 0.7075892857142857,
"repo_name": "googleapis/python-tasks",
"id": "4a9e92b789a2255f35b0e7570ee6483eb53aeee7",
"size": "1832",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/cloudtasks_v2beta3_generated_cloud_tasks_create_queue_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1867840"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0154_stripecustomer_available_balance'),
('crowdsourcing', '0152_taskworker_started_at'),
]
operations = [
]
| {
"content_hash": "9f6a0ce6f15957c3ac9cf462b4427a98",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 21,
"alnum_prop": 0.6700680272108843,
"repo_name": "shirishgoyal/crowdsource-platform",
"id": "2aab0c46ab8bcc3403bb3a18146b790b623195d2",
"size": "364",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/migrations/0155_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63075"
},
{
"name": "HTML",
"bytes": "229504"
},
{
"name": "JavaScript",
"bytes": "312581"
},
{
"name": "Python",
"bytes": "748797"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
} |
import httplib
import json
from google.appengine.ext import webapp
from model import client as client_db
from model import product as product_db
class ProductHandler(webapp.RequestHandler):
"""A class to handle creating, reading, updating and deleting products.
Handles GET, POST and DELETE requests for /products/ and /products/<product>.
All functions have the same signature, even though they may not use all the
parameters, so that a single route can be used for the handler. Note that PUT
is not handled because a product has no extra information to update.
"""
def get(self, product_id):
"""Responds with information about all products or a specific product.
/products/
Responds with a JSON encoded object that contains a list of product IDs.
/products/<product>
Responds with a JSON encoded object of the product ID and its child client
IDs for the given product.
Args:
product_id. The product ID. May be empty.
"""
if not product_id:
products = product_db.Product.all()
products_result = [{'product_id': p.key().name()} for p in products]
result = {'products': products_result}
else:
product = product_db.Product.get_by_key_name(product_id)
if not product:
self.error(httplib.NOT_FOUND)
return
client_keys = client_db.Client.all(keys_only=True)
client_keys.ancestor(product)
client_ids = [key.name() for key in client_keys]
result = {'product_id': product.key().name(),
'client_ids': client_ids}
self.response.headers['Content-Type'] = 'application/json'
json.dump(result, self.response.out)
def post(self, product_id):
"""Creates a new product.
/products/
Creates a new product. The product ID should be specified in the body of
the request.
/products/<product>
Unused.
Args:
product_id: The product ID. Must be empty.
"""
# Validate input.
if product_id:
self.error(httplib.BAD_REQUEST)
return
product_id = self.request.get('product_id')
if not product_id:
self.error(httplib.BAD_REQUEST)
return
# Perform DB lookups.
# Make sure that this product ID does not already exist.
if product_db.Product.get_by_key_name(product_id):
self.error(httplib.BAD_REQUEST)
return
# Create a new product.
product = product_db.Product(key_name=product_id)
product.put()
self.response.set_status(httplib.CREATED, message='ProductCreated')
def delete(self, product_id):
"""Deletes a product.
/products/
Unused
/products/<product>
Deletes the specified product.
Args:
product_id: The product ID. Must not be empty.
"""
# Validate input.
if not product_id:
self.error(httplib.BAD_REQUEST)
return
# Perform DB lookups.
product = product_db.Product.get_by_key_name(product_id)
if not product:
self.error(httplib.NOT_FOUND)
return
# Delete the product.
product.delete()
| {
"content_hash": "9ae056bbb9a30dcc9082c6f9a573f8d4",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 80,
"avg_line_length": 29.123809523809523,
"alnum_prop": 0.6641595814257685,
"repo_name": "ericmckean/syzygy",
"id": "6bab4d66e90314981f95eb4c5003d98f8391bfb1",
"size": "3664",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "syzygy/dashboard/handler/product.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "13748"
},
{
"name": "C",
"bytes": "8422"
},
{
"name": "C++",
"bytes": "7598735"
},
{
"name": "CSS",
"bytes": "1333"
},
{
"name": "HTML",
"bytes": "3182"
},
{
"name": "Protocol Buffer",
"bytes": "6472"
},
{
"name": "Python",
"bytes": "841963"
},
{
"name": "Shell",
"bytes": "19040"
}
],
"symlink_target": ""
} |
"""All views for the extension."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import CreateView, FormView, TemplateView
from django.shortcuts import redirect
from django.db.models import BooleanField, Q, Case, When
from django.utils.safestring import mark_safe
from django.contrib import messages
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from braces.views import LoginRequiredMixin
from geokey.core.decorators import handle_exceptions_for_ajax
from geokey.projects.models import Project
from geokey.projects.views import ProjectContext
from .helpers.context_helpers import does_not_exist_msg
from .helpers.url_helpers import check_url
from .base import STATUS
from .exceptions import URLError
from .models import WebResource
from .forms import WebResourceForm
from .serializers import WebResourceSerializer
# ###########################
# ADMIN PAGES
# ###########################
class IndexPage(LoginRequiredMixin, TemplateView):
"""Main index page."""
template_name = 'wr_index.html'
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all projects (where user is an administrator) and available filters to
the context. It optionally filters projects by the filter provided on
the URL.
Returns
-------
dict
Context.
"""
projects = Project.objects.filter(admins=self.request.user).annotate(
with_webresources=Case(
When(
~Q(webresources__status='deleted') &
Q(webresources__isnull=False),
then=True
),
default=False,
output_field=BooleanField()
)
).distinct()
filters = {}
filter_for_projects = self.request.GET.get('filter')
filter_to_add = 'without-web-resources-only'
if filter_for_projects == filter_to_add:
projects = projects.filter(with_webresources=False)
filters[filter_to_add] = 'Without web resources'
filter_to_add = 'with-web-resources-only'
if filter_for_projects == filter_to_add:
projects = projects.filter(with_webresources=True)
filters[filter_to_add] = 'With web resources'
return super(IndexPage, self).get_context_data(
projects=projects,
filters=filters,
*args,
**kwargs
)
class AllWebResourcesPage(LoginRequiredMixin, ProjectContext, TemplateView):
"""All web resources page."""
template_name = 'wr_all_webresources.html'
class AddWebResourcePage(LoginRequiredMixin, ProjectContext, CreateView):
"""Add new web resource page."""
template_name = 'wr_add_webresource.html'
form_class = WebResourceForm
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
return super(AddWebResourcePage, self).get_context_data(
project_id,
*args,
**kwargs
)
def form_valid(self, form):
"""
Add web resource when form data is valid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
django.http.HttpResponse
Rendered template.
"""
context = self.get_context_data(form=form)
project = context.get('project')
if project:
if project.islocked:
messages.error(
self.request,
'The project is locked. New web resources cannot be added.'
)
else:
form.instance.project = project
form.instance.creator = self.request.user
try:
form.instance.dataformat = check_url(form.instance.url)
add_another_url = reverse(
'geokey_webresources:webresource_add',
kwargs={
'project_id': project.id
}
)
messages.success(
self.request,
mark_safe(
'The web resource has been added. <a href="%s">'
'Add another web resource.</a>' % add_another_url
)
)
return super(AddWebResourcePage, self).form_valid(form)
except URLError, error:
messages.error(self.request, error.to_html())
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
"""
Set URL redirection when web resource created successfully.
Returns
-------
str
URL for redirection.
"""
return reverse(
'geokey_webresources:all_webresources',
kwargs={
'project_id': self.kwargs['project_id']
}
)
class WebResourceContext(LoginRequiredMixin, ProjectContext):
"""Get web resource mixin."""
def get_context_data(self, project_id, webresource_id, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
a web resource and available status types to the context.
Parameters
----------
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
dict
Context.
"""
context = super(WebResourceContext, self).get_context_data(
project_id,
*args,
**kwargs
)
context['status_types'] = STATUS
try:
context['webresource'] = WebResource.objects.get(
pk=webresource_id,
project=context.get('project')
)
return context
except WebResource.DoesNotExist:
return {
'error': 'Not found.',
'error_description': does_not_exist_msg('Web resource')
}
class SingleWebResourcePage(WebResourceContext, FormView):
"""Single web resource page."""
template_name = 'wr_single_webresource.html'
def get_object(self):
"""
Get and return web resource object.
Returns
-------
geokey_webresource.models.WebResource
Web resource object.
"""
try:
return WebResource.objects.get(
pk=self.kwargs['webresource_id']
)
except WebResource.DoesNotExist:
return None
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID and web resource ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
webresource_id = self.kwargs['webresource_id']
return super(SingleWebResourcePage, self).get_context_data(
project_id,
webresource_id,
*args,
**kwargs
)
def get_form(self, form_class=WebResourceForm):
"""Attach instance object to form data."""
return form_class(instance=self.get_object(), **self.get_form_kwargs())
def form_valid(self, form):
"""
Update web resource when form data is valid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
django.http.HttpResponse
Rendered template.
"""
context = self.get_context_data(form=form)
project = context.get('project')
if project:
if project.islocked:
messages.error(
self.request,
'The project is locked. Web resources cannot be updated.'
)
else:
try:
form.instance.dataformat = check_url(form.instance.url)
if self.request.POST.get('symbol_clear') == 'true':
form.instance.symbol = None
form.save()
messages.success(
self.request,
mark_safe('The web resource has been updated.')
)
return super(SingleWebResourcePage, self).form_valid(form)
except URLError, error:
messages.error(self.request, error.to_html())
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
"""
Set URL redirection when web resource updated successfully.
Returns
-------
str
URL for redirection.
"""
return reverse(
'geokey_webresources:all_webresources',
kwargs={
'project_id': self.kwargs['project_id']
}
)
class RemoveWebResourcePage(WebResourceContext, TemplateView):
"""Remove web resource page."""
template_name = 'base.html'
def get(self, request, project_id, webresource_id):
"""
GET method for removing web resource.
Parameters
----------
request : django.http.HttpRequest
Object representing the request.
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
django.http.HttpResponseRedirect
Redirects to all web resources if web resource is removed, single
web resource page if project is locked.
django.http.HttpResponse
Rendered template if project or web resource does not exist.
"""
context = self.get_context_data(project_id, webresource_id)
webresource = context.get('webresource')
if webresource:
if webresource.project.islocked:
messages.error(
request,
'The project is locked. Web resource cannot be removed.'
)
return redirect(
'geokey_webresources:single_webresource',
project_id=project_id,
webresource_id=webresource_id
)
else:
webresource.delete()
messages.success(
request,
'The web resource has been removed.'
)
return redirect(
'geokey_webresources:all_webresources',
project_id=project_id
)
return self.render_to_response(context)
# ###########################
# ADMIN AJAX
# ###########################
class ReorderWebResourcesAjax(APIView):
"""Reorder web resources via Ajax."""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
POST method for reordering web resources.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.as_admin(request.user, project_id)
if project.islocked:
return Response(
{'error': 'Project is locked.'},
status=status.HTTP_403_FORBIDDEN
)
elif not project.webresources.exists():
return Response(
{'error': 'Project has no web resources.'},
status=status.HTTP_404_NOT_FOUND
)
try:
webresources = []
for order, webresource_id in enumerate(request.data.get('order')):
webresource = project.webresources.get(pk=webresource_id)
webresource.order = order
webresources.append(webresource)
for webresource in webresources:
webresource.save()
serializer = WebResourceSerializer(
project.webresources,
many=True
)
return Response(serializer.data)
except WebResource.DoesNotExist:
return Response(
{'error': 'One or more web resources were not found.'},
status=status.HTTP_400_BAD_REQUEST
)
class UpdateWebResourceAjax(APIView):
"""Update web resource via Ajax."""
@handle_exceptions_for_ajax
def put(self, request, project_id, webresource_id):
"""
PUT method for updating web resource.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.as_admin(request.user, project_id)
if project.islocked:
return Response(
{'error': 'Project is locked.'},
status=status.HTTP_403_FORBIDDEN
)
try:
webresource = project.webresources.get(pk=webresource_id)
serializer = WebResourceSerializer(
webresource,
data=request.data,
partial=True,
fields=('id', 'status')
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
except WebResource.DoesNotExist, error:
return Response(
{'error': str(error)},
status=status.HTTP_404_NOT_FOUND
)
# ###########################
# PUBLIC API
# ###########################
class AllWebResourcesAPI(APIView):
"""All web resources via API."""
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
GET method for all web resources of a project.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.get_single(request.user, project_id)
serializer = WebResourceSerializer(
project.webresources.filter(status=STATUS.active),
many=True
)
return Response(serializer.data)
class SingleWebResourceAPI(APIView):
"""Single web resource via API."""
@handle_exceptions_for_ajax
def get(self, request, project_id, webresource_id):
"""
GET method for a single web resource of a project.
Only active web resources are returned to anyone who has access to the
project.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.get_single(request.user, project_id)
try:
webresource = project.webresources.get(
pk=webresource_id,
status=STATUS.active
)
serializer = WebResourceSerializer(webresource)
return Response(serializer.data)
except WebResource.DoesNotExist:
return Response(
{'error': 'Web resource not found.'},
status=status.HTTP_404_NOT_FOUND
)
| {
"content_hash": "072b5c939f98f1f03721fe774b3f9d50",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 79,
"avg_line_length": 29.823723228995057,
"alnum_prop": 0.5449373032094128,
"repo_name": "ExCiteS/geokey-webresources",
"id": "2fc1ba7bc771a1019a21c7b59786046c17d69deb",
"size": "18103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geokey_webresources/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26005"
},
{
"name": "Python",
"bytes": "128079"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf import settings
from django.utils.html import escape
from django.utils.translation import ugettext as _
from sentry.utils.imports import import_string
def get_interface(name):
try:
import_path = settings.SENTRY_INTERFACES[name]
except KeyError:
raise ValueError('Invalid interface name: %s' % (name,))
try:
interface = import_string(import_path)
except Exception:
raise ValueError('Unable to load interface: %s' % (name,))
return interface
class InterfaceValidationError(Exception):
pass
class Interface(object):
"""
An interface is a structured representation of data, which may
render differently than the default ``extra`` metadata in an event.
"""
_data = None
score = 0
display_score = None
__slots__ = ['_data']
def __init__(self, **data):
self._data = data or {}
def __eq__(self, other):
if type(self) != type(other):
return False
return self._data == other._data
def __getstate__(self):
return dict(
(slot, self.__dict__.get(slot))
for slot in self.__slots__
)
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, '_data'):
self._data = {}
def __getattr__(self, name):
return self._data[name]
def __setattr__(self, name, value):
if name == '_data':
self.__dict__['_data'] = value
else:
self._data[name] = value
@classmethod
def to_python(cls, data):
return cls(data)
def get_api_context(self, is_public=False):
return self.to_json()
def to_json(self):
# eliminate empty values for serialization to compress the keyspace
# and save (seriously) ridiculous amounts of bytes
# XXX(dcramer): its important that we keep zero values here, but empty
# lists and strings get discarded as we've deemed them not important
return dict(
(k, v) for k, v in self._data.iteritems() if (v == 0 or v)
)
def get_path(self):
cls = type(self)
return '%s.%s' % (cls.__module__, cls.__name__)
def get_alias(self):
return self.get_slug()
def get_hash(self):
return []
def compute_hashes(self, platform):
result = self.get_hash()
if not result:
return []
return [result]
def get_slug(self):
return type(self).__name__.lower()
def get_title(self):
return _(type(self).__name__)
def get_display_score(self):
return self.display_score or self.score
def get_score(self):
return self.score
def to_string(self, event, is_public=False, **kwargs):
return ''
def to_email_html(self, event, **kwargs):
body = self.to_string(event)
if not body:
return ''
return '<pre>%s</pre>' % (escape(body),)
| {
"content_hash": "3b963e0d023de2cbad69de653be313d7",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 25.394957983193276,
"alnum_prop": 0.5771012574454004,
"repo_name": "nicholasserra/sentry",
"id": "972e8bfa0985f1ebdf5fadef9ffba1f47188ce81",
"size": "3022",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/interfaces/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174940"
},
{
"name": "HTML",
"bytes": "199996"
},
{
"name": "JavaScript",
"bytes": "609445"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "4816"
},
{
"name": "Python",
"bytes": "8613631"
}
],
"symlink_target": ""
} |
"""Home of estimator related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import export as export_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import models
from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_module
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _cast_tensor_to_floatx(x):
"""Cast tensor to keras's floatx dtype if it is not already the same dtype."""
if x.dtype == K.floatx():
return x
else:
return math_ops.cast(x, K.floatx())
def _create_ordered_io(keras_model, estimator_io_dict, is_input=True):
"""Create a list of tensors from IO dictionary based on Keras IO order.
Args:
keras_model: an instance of compiled keras model.
estimator_io_dict: features or labels dictionary from model_fn.
is_input: True if dictionary is for inputs.
Returns:
a list of tensors based on Keras IO order.
Raises:
ValueError: if dictionary keys cannot be found in Keras model input_names
or output_names.
"""
if is_input:
keras_io_names = keras_model.input_names
else:
keras_io_names = keras_model.output_names
for key in estimator_io_dict:
if key not in keras_io_names:
raise ValueError(
'Cannot find %s with name "%s" in Keras Model. It needs to match '
'one of the following: %s' % ('input' if is_input else 'output', key,
', '.join(keras_io_names)))
tensors = []
for io_name in keras_io_names:
tensors.append(_cast_tensor_to_floatx(estimator_io_dict[io_name]))
return tensors
def _clone_and_build_model(mode,
keras_model,
custom_objects,
features=None,
labels=None):
"""Clone and build the given keras_model.
Args:
mode: training mode.
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
features:
labels:
Returns:
The newly built model.
"""
# Set to True during training, False for inference.
K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
# Clone keras model.
input_tensors = None if features is None else _create_ordered_io(
keras_model, features)
if custom_objects:
with CustomObjectScope(custom_objects):
model = models.clone_model(keras_model, input_tensors=input_tensors)
else:
model = models.clone_model(keras_model, input_tensors=input_tensors)
# Compile/Build model
if mode is model_fn_lib.ModeKeys.PREDICT and not model.built:
model.build()
else:
optimizer_config = keras_model.optimizer.get_config()
optimizer = keras_model.optimizer.__class__.from_config(optimizer_config)
optimizer.iterations = training_util.get_or_create_global_step()
# Get list of outputs.
if labels is None:
target_tensors = None
elif isinstance(labels, dict):
target_tensors = _create_ordered_io(keras_model, labels, is_input=False)
else:
target_tensors = [
_cast_tensor_to_floatx(
sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(labels))
]
model.compile(
optimizer,
keras_model.loss,
metrics=keras_model.metrics,
loss_weights=keras_model.loss_weights,
sample_weight_mode=keras_model.sample_weight_mode,
weighted_metrics=keras_model.weighted_metrics,
target_tensors=target_tensors)
if isinstance(model, models.Sequential):
model = model.model
return model
def _create_keras_model_fn(keras_model, custom_objects=None):
"""Creates model_fn for keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
Returns:
The model_fn for a keras Estimator.
"""
def model_fn(features, labels, mode):
"""model_fn for keras Estimator."""
model = _clone_and_build_model(mode, keras_model, custom_objects, features,
labels)
# Get inputs to EstimatorSpec
predictions = dict(zip(model.output_names, model.outputs))
loss = None
train_op = None
eval_metric_ops = None
# Set loss and metric only during train and evaluate.
if mode is not model_fn_lib.ModeKeys.PREDICT:
model._make_train_function() # pylint: disable=protected-access
loss = model.total_loss
if model.metrics:
eval_metric_ops = {}
# When each metric maps to an output
if isinstance(model.metrics, dict):
for i, output_name in enumerate(model.metrics.keys()):
metric_name = model.metrics[output_name]
if callable(metric_name):
metric_name = metric_name.__name__
# When some outputs use the same metric
if list(model.metrics.values()).count(metric_name) > 1:
metric_name += '_' + output_name
eval_metric_ops[metric_name] = metrics_module.mean(
model.metrics_tensors[i - len(model.metrics)])
else:
for i, metric_name in enumerate(model.metrics):
if callable(metric_name):
metric_name = metric_name.__name__
eval_metric_ops[metric_name] = metrics_module.mean(
model.metrics_tensors[i])
# Set train_op only during train.
if mode is model_fn_lib.ModeKeys.TRAIN:
train_op = model.train_function.updates_op
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs={
_DEFAULT_SERVING_KEY:
export_lib.export_output.PredictOutput(predictions)
})
return model_fn
def _save_first_checkpoint(keras_model, estimator, custom_objects,
keras_weights):
"""Save first checkpoint for the keras Estimator.
Args:
keras_model: an instance of compiled keras model.
estimator: keras estimator.
custom_objects: Dictionary for custom objects.
keras_weights: A flat list of Numpy arrays for weights of given keras_model.
Returns:
The model_fn for a keras Estimator.
"""
with ops.Graph().as_default() as g, g.device(estimator._device_fn):
random_seed.set_random_seed(estimator.config.tf_random_seed)
training_util.create_global_step()
model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model,
custom_objects)
if isinstance(model, models.Sequential):
model = model.model
# Load weights and save to checkpoint if there is no checkpoint
latest_path = saver_lib.latest_checkpoint(estimator.model_dir)
if not latest_path:
with session.Session() as sess:
model.set_weights(keras_weights)
# Make update ops and initialize all variables.
if not model.train_function:
# pylint: disable=protected-access
model._make_train_function()
K._initialize_variables(sess)
# pylint: enable=protected-access
saver = saver_lib.Saver()
saver.save(sess, os.path.join(estimator.model_dir, 'keras_model.ckpt'))
@tf_export('keras.estimator.model_to_estimator')
def model_to_estimator(keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None):
"""Constructs an `Estimator` instance from given keras model.
For usage example, please see
@{$programmers_guide/estimators$creating_estimators_from_keras_models}.
Args:
keras_model: Keras model in memory.
keras_model_path: Directory to a keras model on disk.
custom_objects: Dictionary for custom objects.
model_dir: Directory to save Estimator model parameters, graph and etc.
config: Configuration object.
Returns:
An Estimator from given keras model.
Raises:
ValueError: if neither keras_model nor keras_model_path was given.
ValueError: if both keras_model and keras_model_path was given.
ValueError: if the keras_model_path is a GCS URI.
ValueError: if keras_model has not been compiled.
"""
if (not keras_model) and (not keras_model_path):
raise ValueError(
'Either keras_model or keras_model_path needs to be provided.')
if keras_model and keras_model_path:
raise ValueError(
'Please specity either keras_model or keras_model_path but not both.')
if not keras_model:
if keras_model_path.startswith(
'gs://') or 'storage.googleapis.com' in keras_model_path:
raise ValueError(
'%s is not a local path. Please copy the model locally first.' %
keras_model_path)
logging.info('Loading models from %s', keras_model_path)
keras_model = models.load_model(keras_model_path)
else:
logging.info('Using the Keras model from memory.')
keras_model = keras_model
if not hasattr(keras_model, 'optimizer'):
raise ValueError(
'Given keras model has not been compiled yet. Please compile first '
'before creating the estimator.')
keras_weights = keras_model.get_weights()
keras_model_fn = _create_keras_model_fn(keras_model, custom_objects)
est = estimator_lib.Estimator(
keras_model_fn, model_dir=model_dir, config=config)
# TODO(yifeif): move checkpoint initialization to scaffold.init_fn
_save_first_checkpoint(keras_model, est, custom_objects, keras_weights)
return est
| {
"content_hash": "ff602e4a0eb7e2677a0037d83dd56693",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 85,
"avg_line_length": 36.03103448275862,
"alnum_prop": 0.6727916547037994,
"repo_name": "zasdfgbnm/tensorflow",
"id": "db0140c2df4d20f9e18e6c1401c6c6aa197bcf1f",
"size": "11173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/_impl/keras/estimator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "C",
"bytes": "341181"
},
{
"name": "C++",
"bytes": "37811513"
},
{
"name": "CMake",
"bytes": "193934"
},
{
"name": "Go",
"bytes": "1061098"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "551109"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48122"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "1556"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "32936295"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425164"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name = "Kamaelia.Community.RJL",
version = "0.1.0",
description = "Kamaelia - Multimedia & Server Development Kit",
author = "Ryan Lothian",
author_email = "[email protected]",
url = "http://kamaelia.sourceforge.net/",
packages = [
"Kamaelia",
"Kamaelia.Community",
"Kamaelia.Community.RJL",
"Kamaelia.Community.RJL.Kamaelia",
"Kamaelia.Community.RJL.Kamaelia.Protocol",
"Kamaelia.Community.RJL.Kamaelia.Protocol.Torrent",
"Kamaelia.Community.RJL.Kamaelia.Protocol.HTTP",
"Kamaelia.Community.RJL.Kamaelia.Protocol.HTTP.Handlers",
"Kamaelia.Community.RJL.Kamaelia.Util",
"Kamaelia.Community.RJL.Kamaelia.IPC",
"Kamaelia.Community.RJL.Kamaelia.File"
],
long_description = """
"""
)
| {
"content_hash": "3d0d36a48f962b89714092c4cc8e67e7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 34.84,
"alnum_prop": 0.6463834672789897,
"repo_name": "bbc/kamaelia",
"id": "069fb21187c076759a0b17309efda88bab9cf075",
"size": "1777",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/RJL/Packages/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from mock import MagicMock
import bouncebox.core.component as bc
import bouncebox.core.event as be
import bouncebox.util.testing as testing
class TestPublishing(TestCase):
def setUp(self):
pass
def test_subscribe_event_none(self):
c1 = bc.Component()
c2 = bc.Component()
c2.event_handler = MagicMock()
c1.subscribe(c2.event_handler)
evt = be.Event()
c1.publish(evt)
c2.event_handler.assert_called_once_with(evt)
evt = testing.TestEventA()
c1.publish(evt)
c2.event_handler.assert_called_with(evt)
def test_subscribe_specific_event(self):
c1 = bc.Component()
c2 = bc.Component()
c2.event_handler = MagicMock()
c1.subscribe(c2.event_handler, testing.TestEventB) # bind event class
evt = testing.TestEventA()
c1.publish(evt)
assert not c2.event_handler.called # no match
evt = testing.TestEventB()
c1.publish(evt)
c2.event_handler.assert_called_with(evt)
def test_publish(self):
c1 = bc.Component()
c2 = bc.Component()
c2.event_handler = MagicMock()
c1.subscribe(c2.event_handler)
c2.add_event_listener(be.Event, 'handle_all_events')
c2.handle_all_events = MagicMock()
# make sure publish doesn't do regular broadcasting
c1.add_component(c2)
evt = testing.TestEventA()
c1.publish(evt)
c2.event_handler.assert_called_once_with(evt)
assert c2.handle_all_events.call_count == 0
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-s','-x','--pdb', '--pdb-failure'],exit=False)
| {
"content_hash": "46239614f1c5d66ef67550732325ecd8",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 85,
"avg_line_length": 28.951612903225808,
"alnum_prop": 0.5949860724233983,
"repo_name": "dalejung/bouncebox",
"id": "e7f199c0684a4a1e25dd8644553faa758d1389bc",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bouncebox/core/test/test_element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90980"
}
],
"symlink_target": ""
} |
from .config import * # NOQA
from .runtime import * # NOQA
| {
"content_hash": "b8b7bb146fbb4ae15372350ce706ae6c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 30,
"avg_line_length": 30.5,
"alnum_prop": 0.6721311475409836,
"repo_name": "vicalloy/django-lb-workflow",
"id": "4fc274e1969c121078aeb29d16c9ba9068b77626",
"size": "61",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbworkflow/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "Dockerfile",
"bytes": "452"
},
{
"name": "HTML",
"bytes": "32992"
},
{
"name": "JavaScript",
"bytes": "18"
},
{
"name": "Makefile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "194839"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from ..mode import Mode
from ...sprites.text import Text
from ...sprites.rectangle_sprite import RectangleSprite
class TitlePage(Mode):
"""Transition to a "real" mode"""
STATES = list(range(100))
SHOW_TIME = 1
_title_page = None
@classmethod
def title_page(cls, index, state_index):
return cls._title_page
def __init__(self, app, mode, index, state_index):
super(TitlePage, self).__init__(index=index, state_index=state_index)
self._app = app
self._mode = mode
type(self)._title_page = title_page = mode.title_page(index, state_index)
self._switch_at = time.time() + self.SHOW_TIME
self._mode_indicator = RectangleSprite(x=0, y=0, height=2, width=self.index, color=(0.5, 0.5, 0.5))
self._texts = [Text(x=0, y=i * 6 + 3, text=t) for i, t in enumerate(title_page[1:])]
def update(self, pixel_grid, dt):
if time.time() >= self._switch_at:
self._app.set_mode(self.mode, self.state_index, transition=False)
return
def render(self, pixel_grid):
self._mode_indicator.render(pixel_grid)
for text in self._texts:
text.render(pixel_grid)
@property
def mode(self):
return self._mode
| {
"content_hash": "e00ecf369d5bfc299d9060f23418c502",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 107,
"avg_line_length": 32.214285714285715,
"alnum_prop": 0.6223207686622321,
"repo_name": "Spooner/pixel-table",
"id": "cb5a39d7283dd380a6ac20e1787abd115c0332f5",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixel_table/modes/title_page/title_page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3849"
},
{
"name": "C++",
"bytes": "8690"
},
{
"name": "Makefile",
"bytes": "169"
},
{
"name": "Python",
"bytes": "51762"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# Authentication and Authorisation
from functools import wraps
from . import http
def permit(test_func):
'''Decorate a handler to control access'''
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(self, *args, **kwargs):
if test_func(self, *args, **kwargs):
return view_func(self, *args, **kwargs)
return http.Forbidden()
return _wrapped_view
return decorator
permit_logged_in = permit(
lambda self, *args, **kwargs: self.request.user.is_authenticated()
)
permit_staff = permit(
lambda self, *args, **kwargs: self.request.user.is_staff
)
def permit_groups(*groups):
def in_groups(request, *args):
return request.user.groups.filter(name__in=groups).exists()
return permit(
lambda self, *args, **kwargs: in_groups(self.request, *groups)
)
| {
"content_hash": "b3ec97d6958326d8a32da67f8d5ef3e0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 27.545454545454547,
"alnum_prop": 0.6457645764576457,
"repo_name": "MarkusH/django-nap",
"id": "d5894cad22d64a70def2bfe8f38b883f5e8f4fb4",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nap/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "91846"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geography.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "78be681a6e1676b0ca9615ab84eb46a3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7130434782608696,
"repo_name": "adaptive-learning/geography",
"id": "b22cda9b173b50628c28b2e0b1d268d6f159807a",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "66639"
},
{
"name": "HTML",
"bytes": "50363"
},
{
"name": "JavaScript",
"bytes": "90291"
},
{
"name": "Python",
"bytes": "47693"
},
{
"name": "Shell",
"bytes": "702"
}
],
"symlink_target": ""
} |
from nipype.testing import assert_equal
from nipype.interfaces.fsl.maths import Threshold
def test_Threshold_inputs():
input_map = dict(args=dict(argstr='%s',
),
direction=dict(usedefault=True,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=2,
),
internal_datatype=dict(argstr='-dt %s',
position=1,
),
nan2zeros=dict(argstr='-nan',
position=3,
),
out_file=dict(argstr='%s',
genfile=True,
hash_files=False,
position=-2,
),
output_datatype=dict(argstr='-odt %s',
position=-1,
),
output_type=dict(),
terminal_output=dict(mandatory=True,
nohash=True,
),
thresh=dict(argstr='%s',
mandatory=True,
position=4,
),
use_nonzero_voxels=dict(requires=['use_robust_range'],
),
use_robust_range=dict(),
)
inputs = Threshold.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Threshold_outputs():
output_map = dict(out_file=dict(),
)
outputs = Threshold.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "b0f65f604537647c2ddf3a6a154e55ae",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 24.661016949152543,
"alnum_prop": 0.6178694158075602,
"repo_name": "mick-d/nipype_source",
"id": "78633dd9027eed542ae432deb40e4b7324f48587",
"size": "1509",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/interfaces/fsl/tests/test_auto_Threshold.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9090"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "3773780"
},
{
"name": "Shell",
"bytes": "2959"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import argparse, json, os, sys, time
import base64
import copy
import gc
import subprocess
import hashlib
import pydeep
import magic
import pefile
import rethinkdb as r
from utils import red, blue
# Testing
from fv_fileoutput import file_compare
def _dump_data(name, data):
try:
with open(name, 'wb') as fh: fh.write(data)
print "Wrote: %s" % (red(name))
except Exception, e:
print "Error: could not write (%s), (%s)." % (name, str(e))
def _object_compare(obj1, obj2):
content1 = base64.b64decode(obj1)
content2 = base64.b64decode(obj2)
min_size = min(len(content1), len(content2))
max_size = max(len(content1), len(content2))
change_score = max_size - min_size
for i in xrange(min_size):
if content1[i] != content2[i]:
change_score += 1
return change_score
class Controller(object):
def command_list_fv(self, db, args):
ids = db.table("files").pluck("firmware_id").distinct().run()
for _id in ids:
info = db.table("updates").filter({"firmware_id": _id["firmware_id"]}).pluck("date", "machine", "name", "version").run()
print "%s:" % _id["firmware_id"],
for _machine in info:
print "%s, %s, %s, %s" % (_machine["date"], _machine["machine"], _machine["name"], _machine["version"])
pass
def command_list_files(self, db, args):
files = db.table("files").filter({"firmware_id": args.fv_id}).pluck("guid", "name", "attrs", "description").order_by(r.row["attrs"]["size"]).run()
for _file in files: print "%s %s %s (%s)" % (_file["guid"], _file["attrs"]["size"], _file["name"], _file["description"])
pass
def _compare_children(self, db, list1, list2, save= False):
change_score = 0
added_objects = []
added_objects_score = 0
### Assemble GUID pairs
children1, children2 = {}, {}
child_cursor = db.table("objects").get_all(*(list1 + list2)).\
pluck("id", "size", "object_id", "guid", "children", "order").\
order_by("order").order_by("size").run()
has_child = False
for i, child in enumerate(child_cursor):
if child["id"] == "4ae16769-cef1-44ec-97d7-13d6d59fdd21":
has_child = True
if "guid" not in child:
#print i, child["size"]
child["guid"] = min(len(children1.keys()), len(children2.keys()))
#print child["guid"], child["size"]
#print i, child["size"], child["guid"]
if child["id"] in list1:
if child["guid"] not in children1.keys():
children1[child["guid"]] = []
children1[child["guid"]].append(child)
if child["id"] in list2:
if child["guid"] not in children2:
children2[child["guid"]] = []
children2[child["guid"]].append(child)
#print children1.keys()
#print children2.keys()
objects1, objects2 = [], []
for guid in children2.keys():
if guid not in children1:
print "added guid %s" % guid
### This guid/object was added in the update
added_objects += [c["object_id"] for c in children2[guid]]
added_objects_score += sum([int(c["size"]) for c in children2[guid]])
### Todo: this does not account for nested children in a new update
continue
for i in xrange(len(children2[guid])):
if "children" in children2[guid][i] and len(children2[guid][i]["children"]) > 0:
### There are nested children, compare them individually.
if len(children1[guid]) <= i or "children" not in children1[guid][i]:
### There are less grandchildren in the previous update (for this child guid)
child_ids = db.table("objects").get_all(*children2[guid][i]["children"]).pluck("object_id").run()
nested_change = [
int(children2[guid][i]["size"]),
[child["object_id"] for child in child_ids],
int(children2[guid][i]["size"])
]
else:
#print red("will compare grandchildren lengths %d %d for guid %s, index %d" % (
# len(children1[guid][i]["children"]), len(children2[guid][i]["children"]), guid, i
# ))
nested_change = self._compare_children(db, children1[guid][i]["children"], children2[guid][i]["children"], save= save)
change_score += nested_change[0]
added_objects += nested_change[1]
added_objects_score += nested_change[2]
if save:
db.table("objects").get(children2[guid][i]["id"]).update({
"load_change": {
"change_score": nested_change[0],
"new_files": nested_change[1],
"new_files_score": nested_change[2]
}
}).run()
continue
elif len(children1[guid]) <= i:
added_objects.append(children2[guid][i]["object_id"])
added_objects_score += int(children2[guid][i]["size"])
change_score += int(children2[guid][i]["size"])
else:
objects1.append(children1[guid][i]) # ["object_id"]
objects2.append(children2[guid][i]) # ["object_id"]
### If there are objects, compare the content
content1, content2 = [], []
if len(objects1) + len(objects2) > 0:
content_cursor = db.table("content").\
get_all(*([o["object_id"] for o in objects1] + [o["object_id"] for o in objects2]),
index= "object_id").order_by("size").pluck("object_id", "content", "size", "children").run()
for content in content_cursor:
if content["object_id"] in [o["object_id"] for o in objects1]:
content1.append(content)
if content["object_id"] in [o["object_id"] for o in objects2]:
content2.append(content)
#print len(objects1), len(objects2), len(content1), len(content2)
ids1, ids2 = {o["object_id"]: o["id"] for o in objects1}, {o["object_id"]: o["id"] for o in objects2}
for i in xrange(len(content2)):
if len(content1) <= i:
content_change_score = int(content2[i]["size"])
content_added_objects = [content2[i]["object_id"]]
content_added_objects_score = int(content2[i]["size"])
else:
change = _object_compare(content1[i]["content"], content2[i]["content"])
content_added_objects = []
content_added_objects_score = 0
content_change_score = change
change_score += content_change_score
added_objects += content_added_objects
added_objects_score += content_added_objects_score
if save and ("children" not in content2[i] or len(content2[i]["children"]) == 0):
db.table("objects").get(ids2[content2[i]["object_id"]]).update({
"load_change": {
"change_score": content_change_score,
"new_files": content_added_objects,
"new_files_score": content_added_objects_score
}
}).run()
#print guid, change_score, len(added_objects)
return (change_score, added_objects, added_objects_score)
pass
def _compare_firmware(self, db, firmware1, firmware2, save= False):
### Query firmware objects
if len(firmware1[2]) == 0 or len(firmware2[2]) == 0:
print "Cannot compare versions (%d -> %d) without loaded firmware objects." % (firmware1[0], firmware2[0])
return
### This could be bad without guided-objects
if len(firmware1[2]) != len(firmware2[2]):
print "Firmware object count has changed between versions (%s -> %s)." % (firmware1[0], firmware2[0])
change = self._compare_children(db, firmware1[2], firmware2[2], save= True)
### Save changes to update
if save:
db.table("updates").get_all(firmware2[1], index= "firmware_id").update({
"load_change": {
"change_score": change[0],
"new_files": change[1],
"new_files_score": change[2],
"delta": firmware2[4] - firmware1[4]
}
}).run()
db.table("objects").get_all(firmware2[1], index= "object_id").update({
"load_change": {
"change_score": change[0],
"new_files": change[1],
"new_files_score": change[2],
"delta": firmware2[4] - firmware1[4]
}
}).run()
print "Firmware %s change: %s" % (firmware2[1], str(change))
pass
def _load_meta(self, db, _object):
content = base64.b64decode(_object["content"])
entry = {
"magic": magic.from_buffer(content),
"ssdeep": pydeep.hash_buf(content),
"md5": hashlib.md5(content).hexdigest(),
"sha1": hashlib.sha1(content).hexdigest(),
"sha256": hashlib.sha256(content).hexdigest()
}
if entry["magic"] == "MS-DOS executable":
### This is a weak application of magic
try:
pe_data = self._get_pe(content)
for k, v in pe_data.iteritems(): entry[k] = v
except Exception, e: print e; pass
pass
#entry_copy = copy.deepcopy(entry)
#del entry
#del content
#gc.collect()
db.table("content").get(_object["id"]).update({"load_meta": entry}).run()
print "Loaded meta for object (%s) %s." % (_object["firmware_id"], _object["id"])
pass
def _get_pe(self, content):
def section_name(s): return s.Name.replace("\x00", "").strip()
pe_entry = {}
pe = pefile.PE(data= content)
pe_entry["machine_type"] = pe.FILE_HEADER.Machine
pe_entry["compile_time"] = pe.FILE_HEADER.TimeDateStamp
pe_entry["sections"] = [section_name(s) for s in pe.sections if len(section_name(s)) > 0]
pe_entry["linker"] = "%d,%d" % (pe.OPTIONAL_HEADER.MajorLinkerVersion, pe.OPTIONAL_HEADER.MinorLinkerVersion)
pe_entry["os_version"] = "%d,%d" % (pe.OPTIONAL_HEADER.MajorOperatingSystemVersion, pe.OPTIONAL_HEADER.MinorOperatingSystemVersion)
pe_entry["image_version"] = "%d,%d" % (pe.OPTIONAL_HEADER.MajorImageVersion, pe.OPTIONAL_HEADER.MinorImageVersion)
pe_entry["subsystem"] = pe.OPTIONAL_HEADER.Subsystem
pe_entry["subsystem_version"] = "%d,%d" % (pe.OPTIONAL_HEADER.MajorSubsystemVersion, pe.OPTIONAL_HEADER.MinorSubsystemVersion)
del pe
return pe_entry
pass
def _load_children(self, db, children):
child_objects = db.table("objects").get_all(*children).pluck("id", "object_id", "load_meta", "children").run()
for child in child_objects:
if "children" in child and len(child["children"]) > 0:
self._load_children(db, child["children"])
continue
contents = db.table("content").get_all(child["object_id"], index= "object_id").\
filter(not r.row.contains("load_meta")).run()
num = 0
for content in contents:
print "%d/??" % (num),
num += 1
self._load_meta(db, content)
break
del contents
pass
def _get_product_updates(self, db, product):
updates = db.table("updates").order_by("date").filter(lambda update:
update["products"].contains(product) & update.has_fields("firmware_id")
).map(r.row.merge({ "object_id": r.row["firmware_id"] })).eq_join("object_id",
db.table("objects"), index= "object_id"
).zip().run()
return updates
pass
def command_load_meta(self, db, args):
if args.vendor:
vendor_products = []
products = db.table("updates").order_by("date").filter(lambda update:
update["vendor"].eq(args.vendor)
).pluck("products").run()
for product_list in products:
for product in product_list["products"]:
if product not in vendor_products:
vendor_products.append(product)
products = vendor_products
### In an effort to avoid memory exhaustion
for product in products:
print "Recalling load_meta for product %s" % product
subprocess.call("python %s load_meta --product \"%s\"" % (sys.argv[0], product), shell=True)
return
products = [args.product]
for product in products:
updates = self._get_product_updates(db, product)
for update in updates:
if "children" not in update or len(update["children"]) == 0:
continue
self._load_children(db, update["children"])
def command_load_change(self, db, args):
if args.vendor:
vendor_products = []
products = db.table("updates").order_by("date").filter(lambda update:
update["vendor"].eq(args.vendor)
).pluck("products").run()
for product_list in products:
for product in product_list["products"]:
if product not in vendor_products:
vendor_products.append(product)
products = vendor_products
else:
products = [args.product]
for product in products:
updates = self._get_product_updates(db, product)
firmware_objects = []
for update in updates:
firmware_objects.append((update["version"], update["firmware_id"], update["children"], "load_change" in update, update["date"]))
for i in xrange(len(firmware_objects)-1):
if not args.force and firmware_objects[i+1][3]:
print "Skipping change comparison (%s -> %s), already completed." % (firmware_objects[i][0], firmware_objects[i+1][0])
continue
self._compare_firmware(db, firmware_objects[i], firmware_objects[i+1], True)
def _add_lookup(self, db, guid, name, value, force= False):
if db.table("objects").get_all(guid, index= "guid").is_empty().run():
if force is False:
print "Cannot find any files matching GUID (%s), please use the force option." % guid
return
pass
if db.table("lookup").get_all(guid, index= "guid").is_empty().run():
db.table("lookup").insert({
"guid": guid,
"%s" % name: value
}).run()
print "Added lookup for GUID (%s), with (%s) = (%s)." % (guid, name, value)
else:
db.table("lookup").get_all(guid, index= "guid").update({"%s" % name: value}).run()
print "Updated lookup for GUID (%s), set (%s) = (%s)." % (guid, name, value)
pass
def command_add_lookup(self, db, args):
self._add_lookup(db, args.guid, args.name, args.value, force= args.force)
def command_load_guids(self, db, args):
from uefi_firmware.guids import GUID_TABLES
from uefi_firmware.utils import rfguid
for table in GUID_TABLES:
for name, r_guid in table.iteritems():
#print name, rfguid(r_guid)
self._add_lookup(db, rfguid(r_guid), "guid_name", name, True)
pass
def parse_extra (parser, namespace):
namespaces = []
extra = namespace.extra
while extra:
n = parser.parse_args(extra)
extra = n.extra
namespaces.append(n)
return namespaces
def main():
argparser = argparse.ArgumentParser()
subparsers = argparser.add_subparsers(help='Firmware Controls', dest='command')
parser_list_fv = subparsers.add_parser("list_fv", help= "List all FV IDs which have files in the DB")
parser_list_files = subparsers.add_parser("list_files", help= "List all files GUIDs for a given FV ID")
parser_list_files.add_argument("fv_id", help="Firmware ID.")
'''Simple loading/parsing commands.'''
parser_load_change = subparsers.add_parser("load_change", help= "Load change scores for objects and firmware.")
parser_load_change.add_argument("-f", "--force", action="store_true", default= False, help="Force recalculation.")
group = parser_load_change.add_mutually_exclusive_group(required= True)
group.add_argument("--product", help="Product to load.")
group.add_argument("--vendor", help="Vendor to load.")
parser_load_meta = subparsers.add_parser("load_meta", help= "Extract meta, hashes for a machine's firmware.")
parser_load_meta.add_argument("-f", "--force", action="store_true", default= False, help="Force recalculation.")
group = parser_load_meta.add_mutually_exclusive_group(required= True)
group.add_argument("--product", help="Product to load.")
group.add_argument("--vendor", help="Vendor to load.")
parser_add_lookup = subparsers.add_parser("add_lookup", help= "Add metadata about a file GUID.")
parser_add_lookup.add_argument("-f", "--force", default=False, action= "store_true", help= "Force the lookup insert.")
parser_add_lookup.add_argument("guid", help= "File GUID")
parser_add_lookup.add_argument("name", help="Key to add to the GUID.")
parser_add_lookup.add_argument("value", help= "Value")
parser_load_guids = subparsers.add_parser("load_guids", help= "Read in EFI GUID definitions.")
parser_load_guids.add_argument("-f", "--force", default= False, action= "store_true", help= "Override existing DB GUID definitions.")
args = argparser.parse_args()
controller = Controller()
command = "command_%s" % args.command
r.connect("localhost", 28015).repl()
db = r.db("uefi")
#objects_table = db.table("objects")
#updates_table = db.table("updates")
#content_table = db.table("content")
#lookup_table = db.table("lookup")
#stats_table = db.table("stats")
command_ptr = getattr(controller, command, None)
if command_ptr is not None:
command_ptr(db, args)
if __name__ == '__main__':
main()
| {
"content_hash": "87ef81adcf596c4e8bbfeb3b9492310b",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 154,
"avg_line_length": 44.99764705882353,
"alnum_prop": 0.5440284459318134,
"repo_name": "theopolis/subzero",
"id": "9f358aff221cff228ce89bfe43ff52ebbf46ddf8",
"size": "19124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/fv_dbcontrol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48714"
},
{
"name": "JavaScript",
"bytes": "226673"
},
{
"name": "Python",
"bytes": "66449"
},
{
"name": "Ruby",
"bytes": "46743"
}
],
"symlink_target": ""
} |
from conans.model.conan_file import ConanFile
from conans import CMake
import os
class DefaultNameConan(ConanFile):
name = "DefaultName"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
generators = "cmake"
requires = "sfml/2015.8.12@TyRoXx/develop"
def build(self):
cmake = CMake(self.settings)
self.run('cmake . %s' % cmake.command_line)
self.run("cmake --build . %s" % cmake.build_config)
def test(self):
self.run(".%sbin%stest " % (os.sep, os.sep))
| {
"content_hash": "cdcb646aa46520b49f56df779832bb59",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 29.61111111111111,
"alnum_prop": 0.626641651031895,
"repo_name": "TyRoXx/conan-sfml",
"id": "46566557c22524b57f17298532958b09fca43eb8",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/conanfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "232"
},
{
"name": "CMake",
"bytes": "179"
},
{
"name": "Python",
"bytes": "3791"
}
],
"symlink_target": ""
} |
"""Module for handling IO in SALT software.
All calls to IO functions should go through this module.
**Note** this is a replacement for the `saltio` module.
"""
import os, glob, shutil, re, tempfile
from time import strftime
import smtplib
from email.mime.text import MIMEText
from astropy.io import fits
import numpy as np
from salterror import SaltIOError
def readlist(param):
"""Returns list from epar parameter.
It accepts the following for *param*
A filename prefixed by @, for example::
readlist('@images')
will read names from the file images.
A comma separated list of filenames
"""
# Ensure parameter is a string without leading or trailing whitespace
try:
param=str(param).strip()
except:
raise SaltIOError('Cannot convert input argument to string.')
if param[0]=='@':
try:
f=open(param[1:],'r')
output=[name.strip() for name in f]
except:
raise SaltIOError('Could not read from input file '+param[1:])
else:
output=[name.strip() for name in param.split(',')]
return output
def get_exposure(files, number=1):
"""Given a list of fits files returns exposure data as numpy array.
By default `get_exposure` will return the first exposure with data.
If *number* parameter is given it will browse the file list until
the requested exposure number is found.
Because of this browing `get_exposure` is only fast for small exposure
*number*. If the exposure numbers are always big or if you need to
access a lot of exposures you should use `build_exposure_index` and
`get_indexed_exposure` instead.
"""
try:
f=fits.open(files[0])
# Check if primary HDU contains data
if f[0].data is None:
N=len(f)-1
offset=1
else:
N=len(f)
offset=0
# Check if requested exposure number is in current file
if number<=N:
print 'Exposure found in file ',files[0]
output=np.asarray(f[number-1+offset].data)
else:
output=get_exposure(files[1:],number-N)
f.close()
except IOError:
raise SaltIOError('Cannot open FITS file '+str(files[0]))
return output
def build_exposure_index(files):
"""Given a list of fits files returns an index of exposures for use
with `get_indexed_exposure`.
"""
index=[]
for name in files:
try:
f=fits.open(name)
index+=[(name,number) for number in range(len(f)) if f[number].data is not None]
f.close()
except IOError:
raise SaltIOError('Cannot open FITS file '+str(name))
return index
def get_indexed_exposure(files,index,number=1):
"""Given a list of fits files and a index generated by
`build_exposure_index` it returns the requested exposure as a numpy array.
By default `get_exposure` will return the first exposure with data.
If *number* parameter is given it will browse the file list until
the requested exposure number is found.
The index consumes a lot of memory. Only use `get_indexed_exposure`
when you need to access a large number of exposures fast, or when the
exposure number is always big.
Otherwise you should use `get_exposure` instead.
"""
try:
f=fits.open(index[number-1][0])
output=np.asarray(f[index[number-1][1]].data)
f.close()
except IOError:
raise SaltIOError('Cannot open FITS file '+str(index[number-1]))
return output
def openbinary(file, type):
"""Open binary file."""
content=[]
try:
content = open(file,type)
except:
raise SaltIOError('Cannot open ASCII file ' + file)
return content
def readbinary(content, size, format):
"""read data from a binary file."""
import struct
value=''
try:
value=content.read(size)
value=struct.unpack(format,value)[0]
except:
raise SaltIOError('Cannot read value')
return value
def openascii(file,type):
"""open ASCII file"""
content=[]
try:
content = open(file,type)
except:
raise SaltIOError('Cannot open ASCII file '+file)
return content
def closeascii(file):
"""close ASCII file"""
try:
file.close()
except:
raise SaltIOError('Cannot close ASCII file '+file)
def tmpfile(path):
"""create a temporary file name"""
try:
tempfile.tempdir = path
infile = tempfile.mktemp()
except Exception, e:
infile = ''
raise SaltIOError('Cannot create temporary file name because %s' % e)
return infile
def openfits(infile, mode='copyonwrite', memmap=False):
"""open FITS file"""
try:
struct = fits.open(infile, mode=mode, memmap=memmap)
except Exception, e:
msg='Cannot open %s as a FITS file because %s' % (infile, e)
raise SaltIOError(msg)
struct = None
return struct
def openupdatefits(infile):
"""open FITS file for updating"""
try:
struct = fits.open(infile,mode='update')
except:
raise SaltIOError('Cannot open '+infile+' as a FITS file')
struct = None
return struct
def updatefits(struct):
"""update existing FITS file"""
try:
struct.flush()
except:
raise SaltIOError('Cannot update FITS file')
def writefits(struct,outfile, clobber=True):
"""write to FITS file"""
if (os.path.isfile(outfile) and clobber):
delete(outfile)
try:
struct.writeto(outfile)
except Exception,e :
raise SaltIOError('Cannot write %s because %s' % (outfile, e))
def readimage(struct,hdu):
"""read image from HDU structure"""
imagedata=[]
try:
imagedata = struct[hdu].data
except:
raise SaltIOError('Cannot read image data from HDU '+str(hdu))
return imagedata
def readheader(struct,hdu):
"""read image from HDU structure"""
headerdata=[]
try:
headerdata = struct[hdu].header
except:
raise SaltIOError('Cannot read header data from HDU '+str(hdu))
return headerdata
def writeimage(struct,hdu,imagedata):
"""write image from HDU structure"""
try:
struct[hdu].data = imagedata
except Exception, e:
raise SaltIOError('Cannot write image data to HDU ' + str(hdu))
return struct
def readtab(hdu,infile=''):
"""read FITS table HDU"""
table=''
try:
table = hdu.data
except:
raise SaltIOError('could not extract table from '+infile)
return table
def fitscolumns(columns):
"""construct FITS table columns"""
table=''
try:
table = fits.ColDefs(columns)
except:
raise SaltIOError('Cannot define table columns')
return table
def newfitstable(table,infile=None):
"""write FITS table"""
struct=''
try:
struct = fits.BinTableHDU.from_columns(table)
except Exception, e:
raise SaltIOError('Cannot create new table because %s' % e)
return struct
def closefits(struct):
"""close HDU structure"""
try:
struct.close()
except:
raise SaltIOError('Cannot close HDU structure')
def logname(file):
"""test the log file exists"""
import string
newlog = string.join(file.split(),"")
if (len(newlog) == 0):
newlog = 'salt.log'
return newlog
def overwrite(infile,clobber):
"""clobber if file is to be overwritten"""
if (os.path.isfile(infile) and clobber):
delete(infile)
elif (os.path.isfile(infile) and not clobber):
raise SaltIOError('file '+infile+' exists. use clobber=y')
def fileexists(infile):
"""check that a file exists"""
if not os.path.isfile(infile):
raise SaltIOError('File '+infile+' does not exist')
def filedoesnotexist(infile):
"""check that a file does not exist"""
if os.path.isfile(infile):
raise SaltIOError('File '+infile+' already exists')
def delete(infile):
"""delete a file"""
try:
os.remove(infile)
except Exception, e:
raise SaltIOError('Could not delete '+infile+' because '+str(e))
def deletedir(path):
"""delete a directory"""
try:
shutil.rmtree(path)
except:
raise SaltIOError('Could not delete directory '+path)
def pathexists(path):
"""check that a path exists and name ends with a /"""
path = path.strip()
if (path[-1] != '/'): path += '/'
if (not os.path.exists(path)):
raise SaltIOError('Path '+path[:-1]+' does not exist')
return path
def abspath(path):
"""convert relative path to absolute path"""
try:
path=pathexists(path)
curpath = os.getcwd()
changedir(path)
path = os.getcwd() + '/'
changedir(curpath)
except:
raise SaltIOError('Could not determine absolute path to '+path)
return path
def createdir(path):
"""create a directory"""
path = path.strip()
if (path[-1] != '/'): path += '/'
if (not os.path.exists(path)):
try:
os.mkdir(path)
except:
raise SaltIOError('Could not create directory '+path)
def changedir(path):
"""change working directory"""
path = path.strip()
try:
os.chdir(path)
except:
raise SaltIOError('Could not move to directory '+path)
def copy(file1,file2):
"""copy file"""
try:
shutil.copy2(file1,file2)
except Exception, e:
raise SaltIOError('Could not copy %s to %s due to %s' % (file1, file2, e))
def copydir(file1,file2):
"""copy direcotry recursively"""
try:
shutil.copytree(file1,file2)
except:
raise SaltIOError('Could not copy ' + file1 + ' to ' + file2)
def move(file1,file2):
"""move file"""
try:
shutil.move(file1,file2)
except Exception,e :
raise SaltIOError('Could not move %s to %s due to %s' % (file1, file2, e))
def symlink(infile,linkfile,clobber):
"""create symbolic link"""
# delete file if one of the same name already exists
if (os.path.exists(linkfile) and not clobber):
raise SaltIOError('file ' + linkfile + ' exists, use clobber=y')
if clobber:
try:
os.remove(linkfile)
except:
pass
# create symbolic link
try:
os.symlink(infile,linkfile)
except:
raise SaltIOError('could not create symbolic link from '+infile+' to '+linkfile)
def filedefined(filetype,file):
"""has a file been defined?"""
file = file.strip()
if (len(file) == 0 or file.count(' ') > 0):
raise SaltIOError('filetype '+filetype+'file(s) '+file+' not specified')
def argunpack(argument, value):
"""For arguments that might be a file or list, unpack to make a single list"""
try:
argdefined(argument, value)
if value[0] == '@':
listexists(argument, value)
return listparse(argument,value,'','','')
except:
raise SaltIOError('Unable to unpack ' + argument)
def argdefined(argument,value):
"""has an argument been defined?"""
value = value.strip()
if (len(value) == 0):
raise SaltIOError(argument + ' argument not defined')
def listexists(filetype,file):
"""does a list file exist, i.e. a parameter that begins with the '@' character"""
file = file.lstrip('@')
if not os.path.isfile(file):
raise SaltIOError(filetype + ' list '+file+' does not exist')
def readimages(filetype, images):
"""Read in and parse a list of input images
"""
infiles=[]
# check to see if the file exists
filedefined(filetype,images)
listexists(filetype,images)
infiles=listparse(filetype,images,'',infiles,'')
filesexist(infiles,'','r')
return infiles
def listparse(listtype,inlist,pref,altlist,path):
"""create a list from a file or parameter"""
outlist = []
# open the file and read in the arguements
if (len(inlist) > 0 and inlist[0] == '@' and len(pref) == 0):
line = ' '
infile = open(inlist.lstrip('@'))
while line:
line = infile.readline()
if (len(line.strip()) > 0) and not line.strip().startswith('#'):
outlist.append(line.rstrip('\r\n'))
# Include a single entry or a comma separated list of entries.
elif (len(inlist) > 0 and inlist[0] != '@' and inlist.count('*') == 0 and len(pref) == 0):
if (inlist.count(',') == 0):
outlist.append(inlist)
else:
list = inlist.split(',')
for listitem in list:
outlist.append(listitem)
# Include entries with a wildcard
elif (len(inlist) > 0 and inlist[0] != '@' and inlist.count('*') > 0 and len(pref) == 0):
globfiles = glob.glob(path+inlist)
for globitem in globfiles:
outlist.append(globitem.lstrip(path))
outlist.sort()
# Have an alternate or default list to include
elif (len(pref) > 0):
for infile in altlist:
basefile = os.path.basename(infile)
outlist.append(pref+basefile)
# If nothing is found, throw an error
if (len(outlist) == 0):
raise SaltIOError(listtype + ' list is empty')
return outlist
def filesexist(infiles,path,mode):
"""check files in list exist"""
if (path != ''):
if (path[len(path)-1] != '/'): path += '/'
for fileitem in infiles:
if (mode == 'r'):
if (not os.path.isfile(path+fileitem)):
raise SaltIOError('file '+path+fileitem+' does not exist')
elif (mode == 'w'):
if (os.path.isfile(path+fileitem)):
raise SaltIOError('file '+path+fileitem+' already exists')
def comparelists(list1,list2,name1,name2):
"""are two lists the same length?"""
if (len(list1) != len(list2)):
raise SaltIOError(name1+' and '+name2+' lists are of unequal length')
def cleanpropcode(pids, propids):
"""Create the list of appropriate proprosal codes"""
props = []
# Make a list of all the Propcodes that are observed
for pid in propids:
props.extend(pid.upper().split(','))
# Check to make sure that the requested propocodes
# are in that night's observation or set the propocodes
# to all of that nights observatoins
if (pids[0].upper() != 'ALL'):
for pid in pids:
for pid in pid.split(','):
if (pid.upper().strip() not in set(props)):
raise SaltIOError('Propcode ' + pid.upper()+' is not recorded in the observation log ')
else:
pids = set(props)
pids=removebadpids(pids)
if not pids:
raise SaltIOError('Propcode list is empty')
return pids
def removebadpids(pids):
"""Remove propcodes that you do not want --namely junk, bias, test"""
badnames=('BIAS','COMMON','JUNK','NONE','TEST','UNKNOWN')
original_pids=set(pids)
for pid in original_pids:
for bn in badnames:
if pid.upper().count(bn): pids.remove(pid)
return pids
def removeengineeringpids(pids):
"""Removing propcodes that are associated with engineering and calibration proposals"""
new_pids=[]
for pid in pids:
if not pid.count('ENG_') and not pid.count('CAL_'):
new_pids.append(pid)
return new_pids
def readgaindb(gaindb):
"""read gain database file"""
dbspeed = []
dbrate = []
dbgain = []
dbnoise = []
dbbias = []
dbamp = []
try:
gainfile = open(gaindb,'r')
for line in gainfile:
if (len(line.strip()) > 0 and line[0] != '#'):
line = line.rstrip('\r\n')
line = re.sub("\s+",",",line)
line.rstrip(',')
entries = line.split(',')
dbspeed.append(entries[0])
dbrate.append(entries[1])
dbgain.append(entries[2])
dbnoise.append(entries[3])
dbbias.append(entries[4])
dbamp.append(entries[5].strip('amp'))
except:
raise SaltIOError('Cannot read gain database file '+gaindb)
return dbspeed, dbrate, dbgain, dbnoise, dbbias, dbamp
def readxtalkcoeff(xtalkfile):
"""read crosstalk coefficent file"""
xdict = {}
try:
xfile = open(xtalkfile,'r')
for line in xfile:
if (len(line.strip()) > 0 and line[0] != '#'):
line = line.rstrip('\r\n')
line = line.rstrip()
line = re.sub("\s+",",",line)
line.rstrip(',')
line = line.split(',')
xdict[int(line[0])]=line[1:]
except:
raise SaltIOError('Cannot read crosstalk coefficient file '+xtalkfile)
return xdict
def readccdgeom(geomfile):
"""read CCD geometry definition file"""
gap = 0.
xshift = [0., 0.]
yshift = [0., 0.]
rot = [0., 0.]
try:
gfile = open(geomfile,'r')
for line in gfile:
if (len(line.strip()) > 0 and line[0] != '#'):
line = line.rstrip('\r\n')
line = line.rstrip().lstrip()
line = re.sub("\s+",",",line)
pars = line.split(',')
gap = float(pars[1])
xshift[0] = float(pars[2])
yshift[0] = float(pars[3])
rot[0] = float(pars[4])
if (len(pars) == 8):
xshift[1] = float(pars[5])
yshift[1] = float(pars[6])
rot[1] = float(pars[7])
except Exception, e :
raise SaltIOError('Cannot read geometry definition parameters in file %s because %s'%(geomfile, e))
return gap, xshift, yshift, rot
def checkfornone(inval):
if inval is None: return None
try:
inval=inval.strip().upper()
except:
pass
if inval in ['NONE','']: return None
try:
if not inval.strip(): return None
except:
pass
return inval
def getSection(section, iraf_format=True):
"""Given an input string for a section in an image, it will
return the input as a list.
section: An input string given a section in the image
Set to None to return the whole image
iraf_format: It will invert the x and y values
"""
#return None if section is None
if section is None:
return None
#remove the brackets
section=section.replace('[','')
section=section.replace(']','')
#loop through the axis
sect_list=[]
for s in section.split(','):
for t in s.split(':'):
sect_list.append(int(t))
#flip things around for use with python
if iraf_format and len(sect_list)==4:
return [sect_list[2]-1, sect_list[3], sect_list[0]-1, sect_list[1]]
return sect_list
def ask (msg):
"""Ask for a user response
returns the response
"""
resp=''
try:
resp=raw_input(msg)
except Exception, e:
msg='Could not get response because %s' % e
raise SaltIOError(msg)
return resp
def yn_ask(msg):
"""Ask for a [y/n] user response
returns the response
"""
resp=ask(msg)
while not (resp=='y' or resp=='n'):
pmsg="Please respond with a 'y' or 'n':"
resp=ask(pmsg)
if resp=='y':
resp=True
else:
resp=False
return resp
def email(server,username,password,sender,recipient,bcc, subject,message):
"""send email"""
# connect to email server
try:
smtp = smtplib.SMTP()
smtp.connect(server)
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(username,password)
except Exception, e:
message = 'Cannot connect to %s because %s' % (server, e)
raise SaltIOError(message)
#set up to send to all recipients
recip = []
recip.append(recipient)
for bccobj in bcc.split(','):
recip.append(bccobj)
# send emails
msg = MIMEText(message)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipient
msg['bcc'] = bcc
try:
smtp.sendmail(sender,recip,msg.as_string())
#print msg
except Exception, e:
raise SaltIOError('Failed to send email to %s because %s'% (recipient, e))
# disconnect from email server
try:
smtp.quit()
except:
raise SaltIOError('Cannot disconnect from email server '+server)
| {
"content_hash": "50b16a9104b781a83a27ceefab2ce16c",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 107,
"avg_line_length": 27.580862533692724,
"alnum_prop": 0.5942340581480576,
"repo_name": "crawfordsm/pysalt",
"id": "f5f476b2f4a1a775ffb739a5b6e883a7193c61a4",
"size": "20851",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "lib/saltsafeio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9334"
},
{
"name": "Common Lisp",
"bytes": "19932"
},
{
"name": "Makefile",
"bytes": "856"
},
{
"name": "Python",
"bytes": "1381161"
},
{
"name": "Smalltalk",
"bytes": "271"
}
],
"symlink_target": ""
} |
import os
import time
import numpy as np
from multiprocessing import Process
from retino.axon import Axon
from retino.plot import plot_axon
def produce_axon_growth_demo(id, target, iterations_count):
t = time.time()
axon = Axon(None, id=id, target=target)
for i in range(iterations_count):
axon.grow()
output_directory = "../Plots/AxonGrowth/AXON-id=" + str(axon.id) + "-target=" + str(axon.target)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
number = "%04d" % i
plot_axon(axon, output_directory + "/" + number + ".png", 100.0, 100.0)
print("Growth demo finished plotting for Axon " + str(id) + " in " + str(time.time() - t))
if __name__ == '__main__':
targets = np.mgrid[20:100:30, 20:100:30].reshape(2, -1).T
for i in range(len(targets)):
p = Process(target=produce_axon_growth_demo, args=(i, targets[i], 300))
p.start()
print("Started P", i, "for", targets[i])
| {
"content_hash": "4e5fd0c92884da2a604ac15e0f8cc304",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 104,
"avg_line_length": 33.266666666666666,
"alnum_prop": 0.6192384769539078,
"repo_name": "calben/retino",
"id": "b65a184de89668387c16bb88c4c231ca50ff5b41",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/experiment_growth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55809"
}
],
"symlink_target": ""
} |
"""
flask_security.passwordless
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security passwordless module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app as app
from werkzeug.local import LocalProxy
from .signals import login_instructions_sent
from .utils import config_value, get_token_status, send_mail, url_for_security
# Convenient references
_security = LocalProxy(lambda: app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def send_login_instructions(user):
"""Sends the login instructions email for the specified user.
:param user: The user to send the instructions to
:param token: The login token
"""
token = generate_login_token(user)
login_link = url_for_security('token_login', token=token, _external=True)
send_mail(config_value('EMAIL_SUBJECT_PASSWORDLESS'), user.email,
'login_instructions', user=user, login_link=login_link)
login_instructions_sent.send(app._get_current_object(), user=user,
login_token=token)
def generate_login_token(user):
"""Generates a unique login token for the specified user.
:param user: The user the token belongs to
"""
return _security.login_serializer.dumps([str(user.id)])
def login_token_status(token):
"""Returns the expired status, invalid status, and user of a login token.
For example::
expired, invalid, user = login_token_status('...')
:param token: The login token
"""
return get_token_status(token, 'login', 'LOGIN')
| {
"content_hash": "42024d3f9ba0bdf1440f33ed50cc0b4e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 29.472727272727273,
"alnum_prop": 0.6767427513880321,
"repo_name": "williamfeng323/py-web",
"id": "fef70ad7bd2ae6549cb17d07f4842b3c48981b4f",
"size": "1645",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "flask/lib/python3.6/site-packages/flask_security/passwordless.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39957"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "6046"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Mako",
"bytes": "10018"
},
{
"name": "Python",
"bytes": "15554131"
},
{
"name": "Shell",
"bytes": "6007"
}
],
"symlink_target": ""
} |
from django.test import TestCase, Client, RequestFactory
from django.contrib.auth.models import User
from imager_profile.models import ImagerProfile
from imager_profile.views import ProfileView, HomeView
import factory
class UserFactory(factory.django.DjangoModelFactory):
"""Generate test users."""
class Meta:
model = User
username = factory.Sequence(lambda n: "User {}".format(n))
email = factory.LazyAttribute(
lambda x: "{}@imager.com".format(x.username.replace(" ", "")))
class ProfileTests(TestCase):
"""Run the tests."""
def setUp(self):
"""set up for tests."""
self.users = [UserFactory.create() for i in range(5)]
def test_profile_made(self):
"""Test that a profile has been made."""
self.assertTrue(ImagerProfile.objects.count() == 5)
def test_profile_associated_with_users(self):
"""Test that created profiles are actually assigned to users."""
profile = ImagerProfile.objects.first()
self.assertTrue(hasattr(profile, 'user'))
self.assertIsInstance(profile.user, User)
def test_user_has_profile_attached(self):
"""Test that a user is attached to a profile."""
user = self.users[0]
self.assertTrue(hasattr(user, 'profile'))
self.assertIsInstance(user.profile, ImagerProfile)
def test_update_profile_attribute(self):
"""Test that changing a attribute of a user works correctly."""
user = self.users[0]
user.profile.bio = 'bio'
user.profile.save()
query = User.objects.first()
self.assertTrue(query.profile.bio == 'bio')
def test_change_username_changes_profile(self):
"""Test that changing a attribute of a user works correctly."""
user = self.users[0]
profile = user.profile
user.profile.bio = 'bio'
user.profile.save()
self.assertEquals(user.profile.bio, profile.bio)
def test_change_profile_changes_user(self):
"""Test that changing a attribute of a user works correctly."""
user = self.users[0]
user.username = 'newname'
profile = user.profile
user.profile.save()
self.assertEquals(user.username, profile.user.username)
class ProfileFrontEndTests(TestCase):
def setUp(self):
self.users = [UserFactory.create() for i in range(5)]
self.client = Client()
self.request = RequestFactory()
def test_home_view_is_status_ok(self):
"""Test route to home view without client info or headers."""
from imager_profile.views import HomeView
req = self.request.get("/")
view = HomeView.as_view()
response = view(req)
self.assertTrue(response.status_code == 200)
def test_home_route_is_status_ok(self):
"""Test route using client's headers, etc."""
response = self.client.get("/")
self.assertTrue(response.status_code == 200)
def test_login_route_is_status_ok(self):
"""Test route using client's headers, etc."""
response = self.client.get("/login/")
self.assertTrue(response.status_code == 200)
def test_invalid_route_is_status_404(self):
"""Test that invalid route returns error."""
response = self.client.get("/bad")
self.assertTrue(response.status_code == 404)
def test_home_route_context_foo(self):
"""Test that home route has the right context info."""
response = self.client.get("/")
self.assertContains(response, 'Imager Site')
def test_home_route_uses_right_templates(self):
"""Check that home page is using the right templates."""
response = self.client.get("/")
self.assertTemplateUsed(response, "layout.html")
def test_login_route_redirects(self):
"""Test that login redirects users."""
new_user = UserFactory.create()
new_user.save()
new_user.username = "testname123"
new_user.set_password("testpassword123")
new_user.save()
response = self.client.post("/login/", {
"username": new_user.username,
"password": "testpassword123",
})
self.assertTrue(response.status_code == 302)
def test_login_route_redirects_to_homepage(self):
"""Test that login redirects users to homepage."""
new_user = UserFactory.create()
new_user.save()
new_user.username = "username123"
new_user.set_password("testing123")
new_user.save()
response = self.client.post("/login/", {
"username": new_user.username,
"password": "testing123",
}, follow=True)
self.assertTrue(response.redirect_chain[0][0] == "/")
def register_bob(self, follow=False):
"""Create a dummy user named russellszn."""
response = self.client.post("/registration/register/", {
"username": "russellszn",
"email": "[email protected]",
"password1": "testing123",
"password2": "testing123",
}, follow=follow)
return response
def add_testuser(self):
"""Make testuser and return his profile."""
user = UserFactory.create()
user.username = 'testuser'
user.set_password('testuser')
user.save()
return user.profile
def test_can_register_new_user(self):
"""Post request properly filled out creates new user."""
user_count = User.objects.count()
self.register_bob()
self.assertTrue(User.objects.count() == user_count + 1)
def test_registered_user_is_inactive(self):
"""Test that a newly registered user is not yet activated."""
self.register_bob()
the_user = User.objects.filter(username='russellszn')
self.assertFalse(the_user[0].is_active)
def test_successful_registration_redirects(self):
"""Test that registration redirects."""
response = self.register_bob()
self.assertTrue(response.status_code == 302)
def test_successful_registration_redirects_to_right_place(self):
"""Test that registration redirects to registration complete page."""
response = self.register_bob(follow=True)
self.assertTrue(
response.redirect_chain[0][0] == '/registration/register/complete/')
def test_profile_page_returns_correct_html(self):
"""Test that accessing test profile returns correct html."""
self.add_testuser()
response = self.client.get('/profile/testuser/')
self.assertContains(response, 'Album Count')
def test_profile_route_uses_right_templates(self):
"""Check that profile page is using the right templates."""
self.add_testuser()
response = self.client.get("/profile/testuser/")
self.assertTemplateUsed(response, "layout.html")
class EditProfileTest(TestCase):
"""Test edit profile."""
def setUp(self):
self.client = Client()
self.request = RequestFactory()
def add_testuser(self):
"""Make testuser and return his profile."""
user = UserFactory.create()
user.username = 'testuser'
user.set_password('testuser')
user.is_active = True
user.save()
return user.profile
def register_bob(self, follow=False):
"""Create a dummy user named russellszn."""
response = self.client.post("/registration/register/", {
"username": "russellszn",
"email": "[email protected]",
"password1": "testing123",
"password2": "testing123",
}, follow=follow)
return response
def test_view_status(self):
"""Test redirect if not logged in."""
self.add_testuser()
response = self.client.get("/profile/testuser/edit")
self.assertTrue(response.status_code == 301)
| {
"content_hash": "a8a20202742a92fce33f927acb0af6c2",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 80,
"avg_line_length": 36,
"alnum_prop": 0.6220192795535261,
"repo_name": "JSchatzman/django-imager",
"id": "06dc42c947d153d3fee928a1a213275b681af106",
"size": "7884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8947"
},
{
"name": "HTML",
"bytes": "9148"
},
{
"name": "JavaScript",
"bytes": "1325"
},
{
"name": "Python",
"bytes": "48503"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.