max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/micropython/opt_level.py | sebi5361/micropython | 181 | 12788851 | <reponame>sebi5361/micropython
import micropython as micropython
# check we can get and set the level
micropython.opt_level(0)
print(micropython.opt_level())
micropython.opt_level(1)
print(micropython.opt_level())
# check that the optimisation levels actually differ
micropython.opt_level(0)
exec('print(__debug__)')
micropython.opt_level(1)
exec('print(__debug__)')
exec('assert 0')
| 2.453125 | 2 |
60145395-perspective-transform/perspective_transform.py | nathancy/stackoverflow | 3 | 12788852 | <gh_stars>1-10
from imutils.perspective import four_point_transform
import cv2
import numpy
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread("1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours and sort for largest contour
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
for c in cnts:
# Perform contour approximation
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
displayCnt = approx
break
# Obtain birds' eye view of image
warped = four_point_transform(image, displayCnt.reshape(4, 2))
cv2.imshow("thresh", thresh)
cv2.imshow("warped", warped)
cv2.imshow("image", image)
cv2.imwrite("thresh.png", thresh)
cv2.imwrite("warped.png", warped)
cv2.imwrite("image.png", image)
cv2.waitKey()
| 2.515625 | 3 |
count_sort.py | gmolnar1/Find-My-Game | 3 | 12788853 | import math
import os
import sys
import pprint
def count_sort_func(data,maxdata,index):
maxdata +=1
count_list = [0]*(maxdata)
count_dict = data
for n in data:
count_list[n[index]] +=1
i = 0
count = 0
for n in range(len(count_list)):
print(n)
while(count_list[n]>0):
for vals in count_dict:
j = 0
if vals[index]==n:
#pprint.pprint(vals)
pprint.pprint(count_dict)
print("---------------------------------")
data[i] = vals
count+=1
print(count)
count_dict.pop(j)
break
j+=1
#data[i] = n
i+=1
print("Hi")
count_list[n] -= 1
#pprint(list(data))
return data
| 3.40625 | 3 |
nesta/core/routines/datasets/crunchbase/crunchbase_root_task.py | anniyanvr/nesta | 13 | 12788854 | '''
Root task (Crunchbase)
========================
Luigi routine to collect all data from the Crunchbase data dump and load it to MySQL.
'''
import luigi
import datetime
import logging
from nesta.core.routines.datasets.crunchbase.crunchbase_parent_id_collect_task import ParentIdCollectTask
from nesta.core.routines.datasets.crunchbase.crunchbase_geocode_task import CBGeocodeBatchTask
from nesta.core.luigihacks.misctools import find_filepath_from_pathstub as f3p
from nesta.core.orms.crunchbase_orm import Base
from nesta.core.orms.orm_utils import get_class_by_tablename
class RootTask(luigi.WrapperTask):
'''A dummy root task, which collects the database configurations
and executes the central task.
Args:
date (datetime): Date used to label the outputs
db_config_path (str): Path to the MySQL database configuration
production (bool): Flag indicating whether running in testing
mode (False, default), or production mode (True).
'''
date = luigi.DateParameter(default=datetime.date.today())
production = luigi.BoolParameter(default=False)
insert_batch_size = luigi.IntParameter(default=500)
db_config_path = luigi.Parameter(default=f3p("mysqldb.config"))
db_config_env = luigi.Parameter(default="MYSQLDB")
def requires(self):
'''Collects the database configurations and executes the central task.'''
_routine_id = "{}-{}".format(self.date, self.production)
logging.getLogger().setLevel(logging.INFO)
yield ParentIdCollectTask(date=self.date,
_routine_id=_routine_id,
test=not self.production,
insert_batch_size=self.insert_batch_size,
db_config_path=self.db_config_path,
db_config_env=self.db_config_env)
geocode_kwargs = dict(date=self.date,
_routine_id=_routine_id,
test=not self.production,
db_config_env="MYSQLDB",
insert_batch_size=self.insert_batch_size,
env_files=[f3p("nesta"),
f3p("config/mysqldb.config"),
f3p("config/crunchbase.config")],
job_def="py37_amzn2",
job_queue="HighPriority",
region_name="eu-west-2",
poll_time=10,
memory=4096,
max_live_jobs=2)
for tablename in ['organizations', 'funding_rounds', 'investors', 'people', 'ipos']:
_class = get_class_by_tablename(Base, f'crunchbase_{tablename}')
yield CBGeocodeBatchTask(city_col=_class.city,
country_col=_class.country,
location_key_col=_class.location_id,
job_name=f"Crunchbase-{tablename}-{_routine_id}",
**geocode_kwargs)
| 2.265625 | 2 |
lake/attributes/control_signal_attr.py | StanfordAHA/Lake | 11 | 12788855 | <reponame>StanfordAHA/Lake
import kratos as kts
from enum import Enum
class ControlSignalAttr(kts.Attribute):
def __init__(self,
is_control=False,
ignore=False,
doc_string=""):
super().__init__()
self.value = "control_signal"
self.is_control = is_control
self.ignore = ignore
self.documentation = doc_string
def set_documentation(self, new_doc):
self.documentation = new_doc
def get_documentation(self):
return self.documentation
def get_control(self):
return self.is_control
def get_ignore(self):
return self.ignore
| 2.5 | 2 |
pandaf/000/mc.py | cpausmit/Kraken | 0 | 12788856 | import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
import re
import os
process = cms.Process("PandaNtupler")
cmssw_base = os.environ['CMSSW_BASE']
options = VarParsing.VarParsing ('analysis')
options.register('isData',
False,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.bool,
"True if running on Data, False if running on MC")
options.register('isSignal',
True,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.bool,
"True if running on MC signal samples")
options.parseArguments()
isData = options.isData
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# ---- define the input file --------------------------------------------
process.source = cms.Source(
"PoolSource", fileNames = cms.untracked.vstring('XX-LFN-XX')
)
# ---- define the output file -------------------------------------------
process.TFileService = cms.Service(
"TFileService",
closeFileFast = cms.untracked.bool(True),
fileName = cms.string("kraken-output-file-tmp_000.root"),
)
##----------------GLOBAL TAG ---------------------------
# used by photon id and jets
process.load("Configuration.Geometry.GeometryIdeal_cff")
process.load('Configuration.StandardSequences.Services_cff')
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
if (isData):
process.GlobalTag.globaltag = '80X_dataRun2_2016SeptRepro_v3'
else:
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v6'
### LOAD DATABASE
from CondCore.DBCommon.CondDBSetup_cfi import *
######## LUMI MASK
if isData and False:
import FWCore.PythonUtilities.LumiList as LumiList
process.source.lumisToProcess = LumiList.LumiList(filename = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt').getVLuminosityBlockRange()
print "Using local JSON"
### LOAD CONFIGURATION
process.load('PandaProd.Filter.infoProducerSequence_cff')
process.load('PandaProd.Filter.MonoXFilterSequence_cff')
process.load('PandaProd.Ntupler.PandaProd_cfi')
### ##ISO
process.load("RecoEgamma/PhotonIdentification/PhotonIDValueMapProducer_cfi")
process.load("RecoEgamma/ElectronIdentification/ElectronIDValueMapProducer_cfi")
process.PandaNtupler.isData = isData
process.triggerFilterSequence = cms.Sequence() # let's turn this off for now
if options.isSignal:
process.PandaNtupler.nSystWeight = -1
#-----------------------JES/JER----------------------------------
from CondCore.DBCommon.CondDBSetup_cfi import *
if isData:
jeclabel = 'Summer16_23Sep2016AllV3_DATA'
else:
jeclabel = 'Summer16_23Sep2016V3_MC'
process.jec = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(messageLevel = cms.untracked.int32(0)),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_'+jeclabel+'_AK4PFPuppi'),
label = cms.untracked.string('AK4PFPuppi')
),
cms.PSet(record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_'+jeclabel+'_AK8PFPuppi'),
label = cms.untracked.string('AK8PFPuppi')
),
cms.PSet(record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_'+jeclabel+'_AK4PFchs'),
label = cms.untracked.string('AK4PFchs')
),
cms.PSet(record = cms.string('JetCorrectionsRecord'),
tag = cms.string('JetCorrectorParametersCollection_'+jeclabel+'_AK8PFchs'),
label = cms.untracked.string('AK8PFchs')
),
),
)
process.jec.connect = cms.string('sqlite:jec/%s.db'%jeclabel)
process.es_prefer_jec = cms.ESPrefer('PoolDBESSource', 'jec')
if isData:
jerlabel = 'Spring16_25nsV6_DATA'
else:
jerlabel = 'Spring16_25nsV6_MC'
process.jer = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(messageLevel = cms.untracked.int32(0)),
toGet = cms.VPSet(
cms.PSet(record = cms.string('JetResolutionRcd'),
tag = cms.string('JR_%s_PtResolution_AK4PFchs'%jerlabel),
label = cms.untracked.string('AK4PFchs_pt'),
),
cms.PSet(record = cms.string('JetResolutionRcd'),
tag = cms.string('JR_%s_PhiResolution_AK4PFchs'%jerlabel),
label = cms.untracked.string('AK4PFchs_phi'),
),
cms.PSet(record = cms.string('JetResolutionScaleFactorRcd'),
tag = cms.string('JR_%s_SF_AK4PFchs'%jerlabel),
label = cms.untracked.string('AK4PFchs'),
),
cms.PSet(record = cms.string('JetResolutionRcd'),
tag = cms.string('JR_%s_PtResolution_AK4PFPuppi'%jerlabel),
label = cms.untracked.string('AK4PFPuppi_pt'),
),
cms.PSet(record = cms.string('JetResolutionRcd'),
tag = cms.string('JR_%s_PhiResolution_AK4PFPuppi'%jerlabel),
label = cms.untracked.string('AK4PFPuppi_phi'),
),
cms.PSet(record = cms.string('JetResolutionScaleFactorRcd'),
tag = cms.string('JR_%s_SF_AK4PFPuppi'%jerlabel),
label = cms.untracked.string('AK4PFPuppi'),
),
)
)
process.jer.connect = cms.string('sqlite:jer/%s.db'%jerlabel)
process.es_prefer_jer = cms.ESPrefer('PoolDBESSource', 'jer')
#-----------------------ELECTRON ID-------------------------------
from PandaProd.Ntupler.egammavid_cfi import *
initEGammaVID(process,options)
#### RECOMPUTE JEC From GT ###
from PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection
jecLevels= ['L1FastJet', 'L2Relative', 'L3Absolute']
if options.isData:
jecLevels.append('L2L3Residual')
updateJetCollection(
process,
jetSource = process.PandaNtupler.chsAK4,
labelName = 'UpdatedJEC',
jetCorrections = ('AK4PFchs', cms.vstring(jecLevels), 'None')
)
process.PandaNtupler.chsAK4=cms.InputTag('updatedPatJetsUpdatedJEC') # replace CHS with updated JEC-corrected
process.jecSequence = cms.Sequence( process.patJetCorrFactorsUpdatedJEC* process.updatedPatJetsUpdatedJEC)
########### MET Filter ################
process.load('RecoMET.METFilters.BadPFMuonFilter_cfi')
process.BadPFMuonFilter.muons = cms.InputTag("slimmedMuons")
process.BadPFMuonFilter.PFCandidates = cms.InputTag("packedPFCandidates")
process.BadPFMuonFilter.taggingMode = cms.bool(True)
process.load('RecoMET.METFilters.BadChargedCandidateFilter_cfi')
process.BadChargedCandidateFilter.muons = cms.InputTag("slimmedMuons")
process.BadChargedCandidateFilter.PFCandidates = cms.InputTag("packedPFCandidates")
process.BadChargedCandidateFilter.taggingMode = cms.bool(True)
process.metfilterSequence = cms.Sequence(process.BadPFMuonFilter
*process.BadChargedCandidateFilter)
if not options.isData:
process.PandaNtupler.metfilter = cms.InputTag('TriggerResults','','PAT')
############ RECOMPUTE PUPPI/MET #######################
from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
runMetCorAndUncFromMiniAOD(process, ## PF MET
isData=isData)
process.PandaNtupler.pfmet = cms.InputTag('slimmedMETs','','PandaNtupler')
process.MonoXFilter.met = cms.InputTag('slimmedMETs','','PandaNtupler')
from PhysicsTools.PatAlgos.slimming.puppiForMET_cff import makePuppiesFromMiniAOD
makePuppiesFromMiniAOD(process,True)
process.puppi.useExistingWeights = False # I still don't trust miniaod...
process.puppiNoLep.useExistingWeights = False
runMetCorAndUncFromMiniAOD(process, ## Puppi MET
isData=options.isData,
metType="Puppi",
pfCandColl=cms.InputTag("puppiForMET"),
recoMetFromPFCs=True,
jetFlavor="AK4PFPuppi",
postfix="Puppi")
process.puppiForMET.photonId = process.PandaNtupler.phoLooseIdMap
process.PandaNtupler.puppimet = cms.InputTag('slimmedMETsPuppi','','PandaNtupler')
process.MonoXFilter.puppimet = cms.InputTag('slimmedMETsPuppi','','PandaNtupler')
############ RUN CLUSTERING ##########################
process.jetSequence = cms.Sequence()
# btag and patify puppi AK4 jets
from RecoJets.JetProducers.ak4GenJets_cfi import ak4GenJets
from PhysicsTools.PatAlgos.tools.pfTools import *
if not isData:
process.packedGenParticlesForJetsNoNu = cms.EDFilter("CandPtrSelector",
src = cms.InputTag("packedGenParticles"),
cut = cms.string("abs(pdgId) != 12 && abs(pdgId) != 14 && abs(pdgId) != 16")
)
process.ak4GenJetsNoNu = ak4GenJets.clone(src = 'packedGenParticlesForJetsNoNu')
process.jetSequence += process.packedGenParticlesForJetsNoNu
process.jetSequence += process.ak4GenJetsNoNu
# btag and patify jets for access later
addJetCollection(
process,
labelName = 'PFAK4Puppi',
jetSource=cms.InputTag('ak4PFJetsPuppi'), # this is constructed in runMetCorAndUncFromMiniAOD
algo='AK4',
rParam=0.4,
pfCandidates = cms.InputTag("puppiForMET"),
pvSource = cms.InputTag('offlineSlimmedPrimaryVertices'),
svSource = cms.InputTag('slimmedSecondaryVertices'),
muSource = cms.InputTag('slimmedMuons'),
elSource = cms.InputTag('slimmedElectrons'),
btagInfos = ['pfImpactParameterTagInfos','pfInclusiveSecondaryVertexFinderTagInfos'],
btagDiscriminators = ['pfCombinedInclusiveSecondaryVertexV2BJetTags'],
genJetCollection = cms.InputTag('ak4GenJetsNoNu'),
genParticles = cms.InputTag('prunedGenParticles'),
getJetMCFlavour = False, # jet flavor disabled
)
if not isData:
process.jetSequence += process.patJetPartonMatchPFAK4Puppi
process.jetSequence += process.patJetGenJetMatchPFAK4Puppi
process.jetSequence += process.pfImpactParameterTagInfosPFAK4Puppi
process.jetSequence += process.pfInclusiveSecondaryVertexFinderTagInfosPFAK4Puppi
process.jetSequence += process.pfCombinedInclusiveSecondaryVertexV2BJetTagsPFAK4Puppi
process.jetSequence += process.patJetsPFAK4Puppi
##################### FAT JETS #############################
from PandaProd.Ntupler.makeFatJets_cff import initFatJets, makeFatJets
fatjetInitSequence = initFatJets(process,isData)
process.jetSequence += fatjetInitSequence
if process.PandaNtupler.doCHSAK8:
ak8CHSSequence = makeFatJets(process,
isData=isData,
pfCandidates='pfCHS',
algoLabel='AK',
jetRadius=0.8)
process.jetSequence += ak8CHSSequence
if process.PandaNtupler.doPuppiAK8:
ak8PuppiSequence = makeFatJets(process,
isData=isData,
pfCandidates='puppi',
algoLabel='AK',
jetRadius=0.8)
process.jetSequence += ak8PuppiSequence
if process.PandaNtupler.doCHSCA15:
ca15CHSSequence = makeFatJets(process,
isData=isData,
pfCandidates='pfCHS',
algoLabel='CA',
jetRadius=1.5)
process.jetSequence += ca15CHSSequence
if process.PandaNtupler.doPuppiCA15:
ca15PuppiSequence = makeFatJets(process,
isData=isData,
pfCandidates='puppi',
algoLabel='CA',
jetRadius=1.5)
process.jetSequence += ca15PuppiSequence
if not isData:
process.ak4GenJetsYesNu = ak4GenJets.clone(src = 'packedGenParticles')
process.jetSequence += process.ak4GenJetsYesNu
###############################
process.p = cms.Path(
process.infoProducerSequence *
process.triggerFilterSequence *
process.jecSequence *
process.egmGsfElectronIDSequence *
process.egmPhotonIDSequence *
process.photonIDValueMapProducer * # iso map for photons
process.electronIDValueMapProducer * # iso map for photons
process.fullPatMetSequence * # pf MET
process.puppiMETSequence * # builds all the puppi collections
process.egmPhotonIDSequence * # baseline photon ID for puppi correction
process.fullPatMetSequencePuppi * # puppi MET
process.monoXFilterSequence * # filter
process.jetSequence * # patify ak4puppi and do all fatjet stuff
process.metfilterSequence *
process.PandaNtupler
)
| 1.726563 | 2 |
src/server/blueprints/security.py | 1064CBread/1064Chat | 2 | 12788857 | from flask import Blueprint, Flask, render_template, request
blueprint = Blueprint(__name__, __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
if request.method != 'POST':
return render_template("login_start.jinja")
print(request.form)
return 'You "logged" in. EMAIL: ' + request.form['email'] + '; PASS: ' + request.form['password']
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
if request.method != 'POST':
return render_template("register_start.jinja")
return 'You "register" an account. EMAIL: ' + request.form['email'] + '; PASS: ' + request.form['password']
def registerself(app: Flask, prefix=''):
app.register_blueprint(blueprint, url_prefix=prefix + blueprint.url_prefix)
| 2.71875 | 3 |
ch04/ch04_04/ch04_04.py | z2x3c4v5bz/pybook_yehnan | 0 | 12788858 | if __name__ == '__main__':
s = 'abcde'
print(s[::-1])
print(s[-1::-1])
print(s[-1:0:-1]) # failed
'''
edcba
edcba
edcb
'''
| 2.640625 | 3 |
pvlibs/data_import/__init__.py | bfw930/pvlibs | 0 | 12788859 | <filename>pvlibs/data_import/__init__.py<gh_stars>0
''' Imports '''
# core import protocols
from .core import parse_data_file
| 1.1875 | 1 |
utils.py | ondrejba/monte_carlo | 14 | 12788860 | def update_mean(value, mean, count):
"""
Update value of a streaming mean.
:param value: New value.
:param mean: Mean value.
:param count: Number of values averaged.
:return:
"""
return (value - mean) / (count + 1) | 3.265625 | 3 |
src/6/reading_nested_and_variable_sized_binary_structures/writepolys.py | tuanavu/python-gitbook | 14 | 12788861 | import struct
import itertools
polys = [
[ (1.0, 2.5), (3.5, 4.0), (2.5, 1.5) ],
[ (7.0, 1.2), (5.1, 3.0), (0.5, 7.5), (0.8, 9.0) ],
[ (3.4, 6.3), (1.2, 0.5), (4.6, 9.2) ],
]
def write_polys(filename, polys):
# Determine bounding box
flattened = list(itertools.chain(*polys))
min_x = min(x for x, y in flattened)
max_x = max(x for x, y in flattened)
min_y = min(y for x, y in flattened)
max_y = max(y for x, y in flattened)
with open(filename, 'wb') as f:
f.write(struct.pack('<iddddi',
0x1234,
min_x, min_y,
max_x, max_y,
len(polys)))
for poly in polys:
size = len(poly) * struct.calcsize('<dd')
f.write(struct.pack('<i', size+4))
for pt in poly:
f.write(struct.pack('<dd', *pt))
# Call it with our polygon data
write_polys('polys.bin', polys)
| 2.484375 | 2 |
src/TRAIN_MODELS/CLASSIFICATION/classificationModel_functions.py | CaT-zTools/Deep-CaT-z-software | 0 | 12788862 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
@author: DeepCaT_Z
"""
#%% ############################################
######### IMPORTS: DO NOT TOUCH ##############
################################################
import torch
from torch import nn
from sklearn.metrics import f1_score
from torch import optim
from time import time
import numpy as np
from skimage.io import imsave, imread
from skimage.transform import resize
from skimage.morphology import erosion, disk
from torch.utils.data import Dataset
from torchvision import transforms
from config_Classification_DeepCaT_Z import *
import os
#%% ############################### UTILS FOR DATASET CONSTRUCTION ############
# Data augmentation operations
class BrightnessTransform:
def __call__(self, sample):
brightness = np.random.rand()*0.3-0.15
if 'X' in sample:
sample['X'] = np.clip(sample['X'] + brightness, 0, 1)
if 'F' in sample:
sample['F'][0] = np.clip(sample['F'][0] + brightness, 0, 1)
return sample
class FlipTransform:
def __call__(self, sample):
if np.random.rand() > 0.5:
if 'X' in sample:
sample['X'] = np.flip(sample['X'], 2)
if 'S' in sample:
sample['S'] = np.flip(sample['S'], 1)
return sample
class Rot90Transform:
def __call__(self, sample):
k = np.random.randint(0, 4)
if 'X' in sample:
sample['X'] = np.rot90(sample['X'], k, (1, 2))
if 'S' in sample:
sample['S'] = np.rot90(sample['S'], k, (0, 1))
return sample
class CropTransform:
def __call__(self, sample):
dx = np.random.randint(0, IMG_LARGE_SIZE-IMG_SIZE)
dy = np.random.randint(0, IMG_LARGE_SIZE-IMG_SIZE)
if 'X' in sample:
sample['X'] = sample['X'][:, dy:dy+IMG_SIZE, dx:dx+IMG_SIZE]
if 'S' in sample:
sample['S'] = sample['S'][dy:dy+IMG_SIZE, dx:dx+IMG_SIZE]
return sample
class ShiftTransform:
def __call__(self, sample):
dx = np.random.randint(0, IMG_LARGE_SIZE-IMG_SIZE)
dy = np.random.randint(0, IMG_LARGE_SIZE-IMG_SIZE)
if dx and dy:
xdir = np.random.randint(0, 2)
ydir = np.random.randint(0, 2)
if 'X' in sample:
X = np.zeros_like(sample['X'])
if xdir == 0 and ydir == 0:
X[:, dy:, dx:] = sample['X'][:, :-dy, :-dx]
elif xdir == 1 and ydir == 0:
X[:, dy:, :-dx] = sample['X'][:, :-dy, dx:]
elif xdir == 0 and ydir == 1:
X[:, :-dy, dx:] = sample['X'][:, dy:, :-dx]
else:
X[:, :-dy, :-dx] = sample['X'][:, dy:, dx:]
sample['X'] = X
if 'S' in sample:
S = np.zeros_like(sample['S'])
if xdir == 0 and ydir == 0:
S[dy:, dx:] = sample['S'][:, :-dy, :-dx]
elif xdir == 1 and ydir == 0:
S[dy:, :-dx] = sample['S'][:, :-dy, dx:]
elif xdir == 0 and ydir == 1:
S[:-dy, dx:] = sample['S'][:, dy:, :-dx]
else:
S[:-dy, :-dx] = sample['S'][:, dy:, dx:]
sample['S'] = S
return sample
# Functions for loading the dataset + apply augmentation operations while training
class MiceDataset(Dataset):
def __init__(self, dirname, fold, steps, with_augment, with_sampling):
self.dirname = dirname
assert os.path.exists(f'{self.dirname}_{IMG_SIZE}'), f'{self.dirname}_{IMG_SIZE} does not exist'
assert os.path.exists(f'{self.dirname}_{IMG_LARGE_SIZE}'), f'{self.dirname}_{IMG_LARGE_SIZE} does not exist'
self.fold = fold
self.transform_big = None
self.transform_small = None
self.steps = steps
if with_augment:
self.transform_big = transforms.Compose([
CropTransform(),
BrightnessTransform(),
Rot90Transform(),
FlipTransform(),
])
self.transform_small = transforms.Compose([
BrightnessTransform(),
Rot90Transform(),
FlipTransform(),
])
files = sorted(os.listdir(f'{dirname}_{IMG_SIZE}/{fold}/frames'))
# structure files in contiguous video sequences
videos = []
for fname in files:
if len(videos) != 0 and int(fname.split('_')[1]) == int(videos[-1][-1].split('_')[1])+2:
videos[-1].append(fname[:-4])
else:
videos.append([fname[:-4]])
videos = [[video[i+step] for step in steps] for video in videos for i in range(np.max(np.abs(self.steps)), len(video))]
# oversample based on activities
if with_sampling:
A = np.array([np.loadtxt(f'{dirname}_{IMG_SIZE}/{fold}/labels/{video[-1]}.txt', np.int32) for video in videos]) - 1
# repeat all classes until they equalize the majority class
reps = np.round(np.max(np.bincount(A)) / np.bincount(A)).astype(int)
for video, a in zip(videos.copy(), A):
videos += [video] * (reps[a]-1)
self.videos = videos
def __len__(self):
return len(self.videos)
def __getitem__(self, i):
# sometimes we use the smaller or the bigger images
transform = self.transform_small
imgsize = IMG_SIZE
if self.transform_big:
if np.random.rand() < 0.5:
imgsize = IMG_LARGE_SIZE
transform = self.transform_big
video = self.videos[i]
sample = {}
sample['X'] = np.array([(imread(f'{self.dirname}_{imgsize}/{self.fold}/frames/{fname}.png', True)[..., np.newaxis]/255).astype(np.float32) for fname in video])
fname = video[-1]
sample['A'] = np.array(int(open(f'{self.dirname}_{imgsize}/{self.fold}/labels/{fname}.txt').read()) - 1, np.int64)
if transform:
sample = transform(sample)
# swap color axis: numpy (HWC), but torch (CHW)
sample['X'] = sample['X'].transpose((0, 3, 1, 2))
# numpy array -> torch tensor
for k in sample:
sample[k] = torch.from_numpy(np.ascontiguousarray(sample[k]))
return sample
#%% ############################### UTILS FOR NETWORKS ########################
class Reshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.shape = args
def forward(self, x):
return (x.contiguous()).view(self.shape)
class SliceRNN(nn.Module):
def forward(self, x):
output, hn = x
return output[:, -1]
#%% ############################### CLASSIFICATION NETWORK ####################
class Model_X_A(nn.Module):
def __init__(self, timesteps):
super().__init__()
# 128 => 64, 8, 8 (4096)
# 256 => 16, 16, 16 (4096)
# 512 => 4, 32, 32 (4096)
last_kernel = {32: 1024, 64: 256, 128: 64, 256: 16, 512: 4}
self.net = nn.Sequential(
Reshape(-1, CHANNELS_SIZE, IMG_SIZE, IMG_SIZE),
nn.Conv2d(CHANNELS_SIZE, 64, 3, 2, padding=1), nn.ReLU(),
nn.Conv2d(64, 64, 3, 2, padding=1), nn.ReLU(),
nn.Conv2d(64, 64, 3, 2, padding=1), nn.ReLU(),
nn.Conv2d(64, last_kernel[IMG_SIZE], 3, 2, padding=1), nn.ReLU(),
Reshape(-1, timesteps, 64*8*8),
nn.Dropout(),
nn.RNN(64*8*8, 128, batch_first= True),
SliceRNN(),
nn.Linear(128, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU(),
nn.Linear(32, NCLASSES),
)
def forward(self, x):
return (self.net(x),)
#%% ############################### LOSSES DEFINITION ########################
def ce(preds, labels):
return nn.CrossEntropyLoss()(preds, labels[:, 0])
def mse(preds, labels):
return torch.mean((preds - labels)**2)
def acc(preds, labels):
return torch.sum(preds.argmax(1) == labels[:, 0]) / len(labels)
def bacc(preds, labels):
labels = labels[:, 0]
preds = preds.argmax(1)
return torch.mean(torch.tensor([
torch.sum((preds == k) & (labels == k)) / torch.sum(labels == k)
for k in torch.unique(labels)]))
def mae(preds, labels):
return torch.mean(torch.abs(preds - labels))
def dice(preds, labels):
smooth = 1.
num = 2 * (preds * labels).sum()
den = preds.sum() + labels.sum()
return 1 - (((num+smooth) / (den+smooth)) / len(labels))
def bce_dice(preds, labels):
return nn.BCELoss()(preds, labels) + dice(preds, labels)
def f1_k(k):
def f(preds, labels):
labels = (labels[:, 0] == k).numpy().astype(int)
preds = (preds.argmax(1) == k).numpy().astype(int)
return f1_score(labels, preds)
f.__name__ = f'f1-{k}'
return f
#%% ######################## TRAINING METHODS ################################
def predict(model, device, ds):
Y = []
preds = []
with torch.no_grad():
model.eval()
for batch in ds:
X = [batch['X'].to(device)]
Y.append([batch['A']])
ps = model(*X)
preds.append([p.cpu() for p in ps])
return [torch.cat([o[0] for o in preds])], [torch.cat([o[0] for o in Y])]
def compute_metrics(fold, predictions, labels, metrics):
ret = {}
with torch.no_grad():
for m in metrics['A']:
ret[f'{fold}_A_{m.__name__}'] = m(predictions[0], labels[0])
return ret
def train(model, device, epochs, tr, ts, metrics, losses, losses_weights, lr):
history = []
optimizer = optim.Adam(model.parameters(), lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience = 5)
for epoch in range(epochs):
print(f'Epoch {epoch+1}/{epochs}')
tic = time()
# Train
train_loss, valid_loss = 0.0, 0.0
model.train()
train_labels = []
train_preds = []
for bi, batch in enumerate(tr):
X = [batch['X'].to(device)]
Y = [batch['A'].to(device)]
if bi == 0:
X[0].requires_grad = True
optimizer.zero_grad()
preds = model(*X)
loss = 0
loss += losses['A'](preds[0], Y[0]) * losses_weights['A']
train_loss += loss / len(tr)
loss.backward()
optimizer.step()
train_labels.append([y.cpu() for y in Y])
train_preds.append([p.cpu() for p in preds])
# Calculate and save gradients to folder "grads"
# if bi == 0: # save gradients relative to X
# x = X[0]
# for xi in range(x.shape[0]):
# xx = x[xi].detach().cpu().numpy()
# xx = np.concatenate(list(xx), 2) # concat horizontally
# gg = x.grad[xi].cpu().numpy()
# gg = np.abs(gg) # normalize gradients
# gg = (1 - (gg-gg.min()) / (gg.max()-gg.min()))
# # gg = dilation(gg, disk(2))
# gg = np.concatenate(list(gg), 2) # concat horizontally
# gg = erosion(gg[0, :, :], disk(1))
# gg = np.expand_dims(gg, axis=0)
# xx = np.transpose(xx, (1, 2, 0)) * 0.4
# gg = np.transpose(gg, (1, 2, 0)) * 0.6
# fig = np.concatenate((xx+gg, xx+0.6, xx+gg), 2)
# fig = (fig*255).astype(np.uint8)
# imsave(f'grads\\grads-epoch{epoch}-image{xi}.png', fig[:, :, :3])
train_labels = [torch.cat([o[0] for o in train_labels])]
train_preds = [torch.cat([o[0] for o in train_preds])]
# Evaluate
avg_metrics = dict(
train_loss=train_loss.cpu().detach(),
**compute_metrics('train', train_preds, train_labels, metrics),
**compute_metrics('test', *predict(model, device, ts), metrics),
)
toc = time()
print('- %ds - %s' % (toc-tic, ' - '.join(f'%s: %f' % t for t in avg_metrics.items())))
history.append(avg_metrics)
return history
| 2.078125 | 2 |
HW1_P4.py | ZhaoQii/Naive-Bayesian-Classifier-in-Spam-Detection | 0 | 12788863 | <reponame>ZhaoQii/Naive-Bayesian-Classifier-in-Spam-Detection
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 26 15:14:51 2017
@author: <NAME>
"""
from matplotlib import style
import matplotlib.pyplot as plt
import scipy as sp
import pandas as pd
import numpy as np
import os
os.chdir('/Users/ap/Dropbox/2017FALL/EECS E6720BayesianModelforML/HW1/EECS6720-hw1-data')
xtrain = pd.read_csv('X_train.csv', header = None)
ytrain = pd.read_csv('label_train.csv', header = None)
xtest = pd.read_csv('X_test.csv', header = None)
ytest = pd.read_csv('label_test.csv', header = None)
# set all hyperparameters
a = 1
b = 1
e = 1
f = 1
N = ytrain.shape[0]
N0 = np.sum(ytrain[0] == 0)
N1 = np.sum(ytrain[0] == 1)
ystar1_giveny = (e + np.sum(ytrain == 1)) / (N + e + f)
ystar0_giveny = (f + np.sum(ytrain == 0)) / (N + e + f)
col_sum0 = np.sum(xtrain.loc[ytrain.index[ytrain[0] == 0].tolist()], 0)
col_sum1 = np.sum(xtrain.loc[ytrain.index[ytrain[0] == 1].tolist()], 0)
def cal_log_negbin(x, alpha, beta):
log_p = sp.special.gammaln(x + alpha) - sp.special.gammaln(alpha) - \
sp.special.gammaln(x + 1) + np.log((beta / (beta + 1)) ** alpha) + \
np.log((1 / (beta + 1)) ** x)
return(log_p)
def pred_1prob(xstar):
log_p = 0
for i, v in xstar.iteritems():
temp = cal_log_negbin(v, a + col_sum1[i], b + N1)
log_p = log_p + temp
p = np.exp(log_p)*ystar1_giveny
return(p)
def pred_0prob(xstar):
log_p = 0
for i, v in xstar.iteritems():
temp = cal_log_negbin(v, a + col_sum0[i], b + N0)
log_p = log_p + temp
p = np.exp(log_p)*ystar0_giveny
return(p)
test0 = xtest.apply(pred_0prob, axis = 1)
test1 = xtest.apply(pred_1prob, axis = 1)
test0 = np.asarray(test0[0].tolist())
test1 = np.asarray(test1[0].tolist())
# There are some emails can not be determined. The reason is the number of several variables
# of them are larger enough to let log(predict_probability) = -inf meaning predict_probability
# is just 0 for both y = 1 or y = 0
num_undetermined = np.count_nonzero(np.where((test1 == 0) & (test0 == 0)))
pred_y = 1 * (test0 < test1)
# If we can not decide them, we tend to regard them are spam since too much same words in them.
pred_y[np.where((test1 == 0) & (test0 == 0))] = 1
v11 = np.sum([a and b for a, b in zip(ytest[0].values == 0, pred_y == 0)])
v12 = np.sum([a and b for a, b in zip(ytest[0].values == 0, pred_y == 1)])
v21 = np.sum([a and b for a, b in zip(ytest[0].values == 1, pred_y == 0)])
v22 = np.sum([a and b for a, b in zip(ytest[0].values == 1, pred_y == 1)])
table = pd.DataFrame({'real_notspam':[v11, v12], 'real_spam':[v21, v22]},
index = ['predict_notspam', 'predict_spam'])
#### (c) ####
# Pick three mislabeled emails firstly
mis3 = ytest.index[ytest[0] != pred_y].tolist()
mis3 = mis3[:3]
temp = test0 + test1 # temp is the sum of both probabilities
temp[np.where(temp == 0)] = np.float('nan') # Normalize the probabilities, if temo = 0, set it to nan
test0 = np.divide(test0, temp)
test1 = np.divide(test1, temp)
# E(lambda1) = E(E(lambda1|xi:yi=1)), where lambda1|xi:yi=1 is the posterior of
# The lambda1 given the data, specificly, Gamma(1+sum(xi:yi=1), 1+N1), same for lambda0
Elambda1 = (col_sum1 + 1) / (N1 + 1)
Elambda0 = (col_sum0 + 1) / (N0 + 1)
with open('README.txt', 'r') as file:
xnames = file.read().split('\n')
def make_plots(index):
style.use('ggplot')
plt.xticks(range(53), xnames, rotation = 'vertical')
plt.plot(xtest.loc[index], 'b-', label = 'Features')
plt.plot(Elambda0, 'r-', label = r'$E(\vec{\lambda_{0}})$')
plt.plot(Elambda1, 'g-', label = r'$E(\vec{\lambda_{1}})$')
plt.legend(loc='best')
plt.title('The Features of {}th Sample VS '.format(index + 1) + \
r'$E(\vec{\lambda_{1}})$' + '&' r'$E(\vec{\lambda_{0}})$')
plt.show()
# Predictive Probability
def get_pred_prob(index):
print('P({}th Email is not Spam) ='.format(index + 1), test0[index])
print('P({}th Email is Spam) ='.format(index + 1), test1[index])
for each in mis3:
make_plots(each)
get_pred_prob(each)
#### (d) ####
cloest3 = abs(test0 - test1).argsort()[0:3]
for each in cloest3:
make_plots(each)
get_pred_prob(each)
| 2.421875 | 2 |
checkout/migrations/0013_auto_20211024_1504.py | kevin-ci/janeric2 | 1 | 12788864 | <reponame>kevin-ci/janeric2<filename>checkout/migrations/0013_auto_20211024_1504.py
# Generated by Django 3.1.5 on 2021-10-24 15:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checkout', '0012_auto_20211024_1453'),
]
operations = [
migrations.AlterModelOptions(
name='productshippingdata',
options={'ordering': ['product__active', 'product__category__division', 'product__category__name', 'product__product_family__name', 'product__product_size__name'], 'verbose_name_plural': 'Product Shipping Data'},
),
]
| 1.5625 | 2 |
joke teller.py | naitikvaru/python_joke_teller | 1 | 12788865 | <reponame>naitikvaru/python_joke_teller<gh_stars>1-10
import pyjokes
print(pyjokes.get_joke())
| 1.59375 | 2 |
vergeml/sources/mnist.py | ss18/vergeml | 324 | 12788866 | <gh_stars>100-1000
from vergeml.img import INPUT_PATTERNS, open_image, fixext, ImageType
from vergeml.io import source, SourcePlugin, Sample
from vergeml.data import Labels
from vergeml.utils import VergeMLError
from vergeml.sources.labeled_image import LabeledImageSource
import random
import numpy as np
from PIL import Image
import os.path
import json
from operator import methodcaller
import io
from typing import List
import gzip
import hashlib
_FILES = ("train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz")
_MNIST_LABELS = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
_FASHION_MNIST_LABELS = ("tshirt_top",
"trouser",
"pullover",
"dress",
"coat",
"sandal",
"shirt",
"sneaker",
"sag",
"ankle_boot")
# we use the md5 to check for fashion mnist, so we can provide the labels
# automatically
_MD5_FASHION = "8d4fb7e6c68d591d4c3dfef9ec88bf0d"
def _md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
@source('image', descr="Load images in MNIST format.")
class InputMnist(SourcePlugin):
data = None
def num_samples(self, split: str) -> int:
return len(self.data[split])
def read_sample(self, split: str, index: int):
return self.data[split][index]
def _check_files(self):
self.data = dict(train=[], val=[], test=[])
samples_dir = self.config["samples_dir"]
files = [os.path.join(samples_dir, file) for file in _FILES]
for path in files:
if not os.path.exists(path):
raise VergeMLError("File not found in samples_dir: {}".format(
os.path.basename(path)))
if _md5(files[0]) == _MD5_FASHION:
self.meta['labels'] = _FASHION_MNIST_LABELS
else:
self.meta['labels'] = _MNIST_LABELS
# preload
for split, images, labels in (('train', files[0], files[1]), ('test', files[2], files[3])):
with gzip.open(images) as f:
# First 16 bytes are magic_number, n_imgs, n_rows, n_cols
pixels = np.frombuffer(f.read(), 'B', offset=16)
pixels = pixels.reshape(-1, 28, 28)
with gzip.open(labels) as f:
# First 8 bytes are magic_number, n_labels
integer_labels = np.frombuffer(f.read(), 'B', offset=8)
n_cols = integer_labels.max() + 1
for ix, imagearr in enumerate(pixels):
label = integer_labels[ix]
onehot = np.zeros((n_cols), dtype='float32')
onehot[label] = 1.0
self.data[split].append((Image.fromarray(imagearr), onehot,
dict(labels=self.meta['labels'],
filename=images,
split=split,
types=('pil', 'labels'))))
if split == 'train':
n = self.config['val_num']
if self.config['val_perc'] is not None:
n = int(len(self.data['train']) * self.config['val_perc'] // 100)
if n is not None:
if n > len(self.data['train']):
raise VergeMLError("number of test samples is greater than number of available samples.")
rng = random.Random(self.config['random_seed'])
count = len(self.data[split])
indices = rng.sample(range(count), count)
self.data['val'] = [self.data['train'][i] for i in indices[:n]]
self.data['train'] = [self.data['train'][i] for i in indices[n:]]
else:
if self.config['test_num']:
if self.config['test_num'] > len(self.data['test']):
raise VergeMLError("number of test samples is greater than number of available samples.")
rng = random.Random(self.config['random_seed'])
indices = rng.sample(range(len(self.data[split])), len(pixels))
self.data['test'] = [self.data['test'][i] for i in indices[:n]]
plugin = InputMnist
| 2.140625 | 2 |
spellChecker/spellCheckerApp/apps.py | spell-checkers/web-spell-checker | 0 | 12788867 | from django.apps import AppConfig
class SpellcheckerappConfig(AppConfig):
name = 'spellCheckerApp'
| 1.070313 | 1 |
src/RIOT/tests/gnrc_sock_ip/tests/01-run.py | ARte-team/ARte | 2 | 12788868 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright (C) 2016 <NAME> <<EMAIL>>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect_exact(u"Calling test_sock_ip_create__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_ip_create__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_ip_create__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_ip_create__no_endpoints()")
child.expect_exact(u"Calling test_sock_ip_create__only_local()")
child.expect_exact(u"Calling test_sock_ip_create__only_local_reuse_ep()")
child.expect_exact(u"Calling test_sock_ip_create__only_remote()")
child.expect_exact(u"Calling test_sock_ip_create__full()")
child.expect_exact(u"Calling test_sock_ip_recv__EADDRNOTAVAIL()")
child.expect_exact(u"Calling test_sock_ip_recv__ENOBUFS()")
child.expect_exact(u"Calling test_sock_ip_recv__EPROTO()")
child.expect_exact(u"Calling test_sock_ip_recv__ETIMEDOUT()")
child.expect_exact(u" * Calling sock_ip_recv()")
child.expect(r" \* \(timed out with timeout \d+\)")
child.expect_exact(u"Calling test_sock_ip_recv__socketed()")
child.expect_exact(u"Calling test_sock_ip_recv__socketed_with_remote()")
child.expect_exact(u"Calling test_sock_ip_recv__unsocketed()")
child.expect_exact(u"Calling test_sock_ip_recv__unsocketed_with_remote()")
child.expect_exact(u"Calling test_sock_ip_recv__with_timeout()")
child.expect_exact(u"Calling test_sock_ip_send__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_ip_send__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_ip_send__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_ip_send__ENOTCONN()")
child.expect_exact(u"Calling test_sock_ip_send__socketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send__socketed_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send__socketed_no_local()")
child.expect_exact(u"Calling test_sock_ip_send__socketed()")
child.expect_exact(u"Calling test_sock_ip_send__socketed_other_remote()")
child.expect_exact(u"Calling test_sock_ip_send__unsocketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send__unsocketed_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send__unsocketed_no_local()")
child.expect_exact(u"Calling test_sock_ip_send__unsocketed()")
child.expect_exact(u"Calling test_sock_ip_send__no_sock_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send__no_sock()")
child.expect_exact(u"ALL TESTS SUCCESSFUL")
if __name__ == "__main__":
sys.exit(run(testfunc))
| 1.984375 | 2 |
loldib/getratings/models/NA/na_kalista/__init__.py | koliupy/loldib | 0 | 12788869 | <reponame>koliupy/loldib<filename>loldib/getratings/models/NA/na_kalista/__init__.py
from .na_kalista_top import *
from .na_kalista_jng import *
from .na_kalista_mid import *
from .na_kalista_bot import *
from .na_kalista_sup import *
| 1.078125 | 1 |
mne/tests/test_coreg.py | jdammers/mne-python | 0 | 12788870 | from glob import glob
import os
import os.path as op
from shutil import copyfile
from nose.tools import assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.transforms import (Transform, apply_trans, rotation, translation,
scaling)
from mne.coreg import (fit_matched_points, create_default_subject, scale_mri,
_is_mri_subject, scale_labels, scale_source_space,
coregister_fiducials)
from mne.io.constants import FIFF
from mne.utils import _TempDir, run_tests_if_main
from mne.source_space import write_source_spaces
from functools import reduce
def test_coregister_fiducials():
"""Test coreg.coregister_fiducials()"""
# prepare head and MRI fiducials
trans = Transform('head', 'mri',
rotation(.4, .1, 0).dot(translation(.1, -.1, .1)))
coords_orig = np.array([[-0.08061612, -0.02908875, -0.04131077],
[0.00146763, 0.08506715, -0.03483611],
[0.08436285, -0.02850276, -0.04127743]])
coords_trans = apply_trans(trans, coords_orig)
def make_dig(coords, cf):
return ({'coord_frame': cf, 'ident': 1, 'kind': 1, 'r': coords[0]},
{'coord_frame': cf, 'ident': 2, 'kind': 1, 'r': coords[1]},
{'coord_frame': cf, 'ident': 3, 'kind': 1, 'r': coords[2]})
mri_fiducials = make_dig(coords_trans, FIFF.FIFFV_COORD_MRI)
info = {'dig': make_dig(coords_orig, FIFF.FIFFV_COORD_HEAD)}
# test coregister_fiducials()
trans_est = coregister_fiducials(info, mri_fiducials)
assert trans_est.from_str == trans.from_str
assert trans_est.to_str == trans.to_str
assert_array_almost_equal(trans_est['trans'], trans['trans'])
@testing.requires_testing_data
def test_scale_mri():
"""Test creating fsaverage and scaling it."""
# create fsaverage using the testing "fsaverage" instead of the FreeSurfer
# one
tempdir = _TempDir()
fake_home = testing.data_path()
create_default_subject(subjects_dir=tempdir, fs_home=fake_home,
verbose=True)
assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed"
fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')
os.remove(fid_path)
create_default_subject(update=True, subjects_dir=tempdir,
fs_home=fake_home)
assert op.exists(fid_path), "Updating fsaverage"
# copy MRI file from sample data (shouldn't matter that it's incorrect,
# so here choose a small one)
path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri',
'T1.mgz')
path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
copyfile(path_from, path_to)
# remove redundant label files
label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label')
label_paths = glob(label_temp)
for label_path in label_paths[1:]:
os.remove(label_path)
# create source space
print('Creating surface source space')
path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')
src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir,
add_dist=False)
write_source_spaces(path % 'ico-0', src)
mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
print('Creating volume source space')
vsrc = mne.setup_volume_source_space(
'fsaverage', pos=50, mri=mri, subjects_dir=tempdir,
add_interpolator=False)
write_source_spaces(path % 'vol-50', vsrc)
# scale fsaverage
os.environ['_MNE_FEW_SURFACES'] = 'true'
scale = np.array([1, .2, .8])
scale_mri('fsaverage', 'flachkopf', scale, True, subjects_dir=tempdir,
verbose='debug')
del os.environ['_MNE_FEW_SURFACES']
assert _is_mri_subject('flachkopf', tempdir), "Scaling fsaverage failed"
spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')
assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled"
assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf',
'lh.sphere.reg'))
vsrc_s = mne.read_source_spaces(spath % 'vol-50')
pt = np.array([0.12, 0.41, -0.22])
assert_array_almost_equal(apply_trans(vsrc_s[0]['src_mri_t'], pt * scale),
apply_trans(vsrc[0]['src_mri_t'], pt))
scale_labels('flachkopf', subjects_dir=tempdir)
# add distances to source space
mne.add_source_space_distances(src)
src.save(path % 'ico-0', overwrite=True)
# scale with distances
os.remove(spath % 'ico-0')
scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
ssrc = mne.read_source_spaces(spath % 'ico-0')
assert ssrc[0]['dist'] is not None
def test_fit_matched_points():
"""Test fit_matched_points: fitting two matching sets of points"""
tgt_pts = np.random.RandomState(42).uniform(size=(6, 3))
# rotation only
trans = rotation(2, 6, 3)
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, translate=False,
out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation")
# rotation & translation
trans = np.dot(translation(2, -6, 3), rotation(2, 6, 3))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation and translation.")
# rotation & translation & scaling
trans = reduce(np.dot, (translation(2, -6, 3), rotation(1.5, .3, 1.4),
scaling(.5, .5, .5)))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, scale=1, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation, translation and scaling.")
# test exceeding tolerance
tgt_pts[0, :] += 20
assert_raises(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)
run_tests_if_main()
| 1.90625 | 2 |
surgame/src/particles.py | anokata/pythonPetProjects | 3 | 12788871 | import util
import pygame
import math
import images
class Particles(util.Block):
lifetime = 100
def __init__(self, x, y, n=10, lifetime=10, imgname=images.particleDefault):
super().__init__(x, y, imgname)
self.particles_xyd = list() # координаты и скорости частицы (x, y, dx, dy)
spd = 2
self.lifetime = lifetime
self.n = n
for i in range(n):
p = {'x': x, 'y': y}
p['dx'] = spd * (math.cos(i*2*math.pi/n))
p['dy'] = spd * (math.sin(i*2*math.pi/n))
self.particles_xyd.append(p)
def step(self):
self.lifetime -= 1
if self.lifetime:
for p in self.particles_xyd:
p['x'] += p['dx']
p['y'] += p['dy']
return True
else:
return False
def draw(self, cam, screen):
if self.lifetime:
for p in self.particles_xyd:
super().draw(p['x'], p['y'], cam, screen)
| 3.109375 | 3 |
tests/test_maphash.py | MoonVision/maphash | 3 | 12788872 | <filename>tests/test_maphash.py
import json
from pathlib import Path
from maphash import maphash
def test_hashes_int():
assert (
maphash(1) == "67b176705b46206614219f47a05aee7ae6a3edbe850bbbe214c536b989aea4d2"
)
def test_hashes_float():
assert (
maphash(0.1)
== "75bd59c8426679f3ef7b3a37184ee08e2e5ecee840e330acf6782d77cf2a2d1b"
)
def test_hashes_str():
assert (
maphash("string")
== "00ff5fd099f3820fa1196c77d97331caaec09301635641a113b1b81d268b26df"
)
def test_hashes_dict():
assert (
maphash(dict())
== "840eb7aa2a9935de63366bacbe9d97e978a859e93dc792a0334de60ed52f8e99"
)
def test_hashes_list():
assert (
maphash(list())
== "ca4510738395af1429224dd785675309c344b2b549632e20275c69b15ed1d210"
)
def test_hashes_none():
assert (
maphash(None)
== "3ea445410f608e6453cdcb7dbe42d57a89aca018993d7e87da85993cbccc6308"
)
def test_hashes_complex_document():
with Path("tests/json-schema-v7.json").open() as fp:
doc = json.load(fp)
assert (
maphash(doc)
== "37385bcbbdf1ea13531c53db2784b26ca7248c283b432c382c12e6f1e65d0249"
)
def test_different_key_order_same_hash():
assert maphash(dict(a=1, b=2, c=3)) == maphash(dict(b=2, c=3, a=1))
def test_additional_entry_different_hash():
d1 = dict(d=4)
d2 = d1.copy()
d2["e"] = 5
assert maphash(d1) != maphash(d2)
def test_different_list_item_order_different_hash():
assert maphash([1, 2, 3]) != maphash([2, 3, 1])
| 2.78125 | 3 |
frozenweb/__init__.py | rufrozen/frozenweb | 0 | 12788873 | <reponame>rufrozen/frozenweb
from .config import Config
from .builder import Builder
from .server import Server
| 1.0625 | 1 |
promise-types/http/http_promise_type.py | olehermanse/cfengine_basics | 0 | 12788874 | """HTTP module for CFEngine"""
import os
import urllib
import urllib.request
import ssl
import json
from cfengine import PromiseModule, ValidationError, Result
_SUPPORTED_METHODS = {"GET", "POST", "PUT", "DELETE", "PATCH"}
class HTTPPromiseModule(PromiseModule):
def __init__(self, *args, **kwargs):
super().__init__("http_promise_module", "1.0.0", *args, **kwargs)
def validate_promise(self, promiser, attributes):
if "url" in attributes:
url = attributes["url"]
if type(url) != str:
raise ValidationError("'url' must be a string")
if not url.startswith(("https://", "http://")):
raise ValidationError("Only HTTP(S) requests are supported")
if "method" in attributes:
method = attributes["method"]
if type(method) != str:
raise ValidationError("'method' must be a string")
if method not in _SUPPORTED_METHODS:
raise ValidationError("'method' must be one of %s" % ", ".join(_SUPPORTED_METHODS))
if "headers" in attributes:
headers = attributes["headers"]
headers_type = type(headers)
if headers_type == str:
headers_lines = headers.splitlines()
if any(line.count(":") != 1 for line in headers_lines):
raise ValidationError("'headers' must be string with 'name: value' pairs on separate lines")
elif headers_type == list:
if any(line.count(":") != 1 for line in headers):
raise ValidationError("'headers' must be a list of 'name: value' pairs")
elif headers_type == dict:
# nothing to check for dict?
pass
else:
raise ValidationError("'headers' must be a string, an slist or a data container" +
" value with 'name: value' pairs")
if "payload" in attributes:
payload = attributes["payload"]
if type(payload) not in (str, dict):
raise ValidationError("'payload' must be a string or a data container value")
if type(payload) == str and payload.startswith("@") and not os.path.isabs(payload[1:]):
raise ValidationError("File-based payload must be an absolute path")
if "file" in attributes:
file_ = attributes["file"]
if type(file_) != str or not os.path.isabs(file_):
raise ValidationError("'file' must be an absolute path to a file")
if "insecure" in attributes:
insecure = attributes["insecure"]
if type(insecure) != str or insecure not in ("true", "True", "false", "False"):
raise ValidationError("'insecure' must be either \"true\" or \"false\"")
def evaluate_promise(self, promiser, attributes):
url = attributes.get("url", promiser)
method = attributes.get("method", "GET")
headers = attributes.get("headers", dict())
payload = attributes.get("payload")
target = attributes.get("file")
insecure = attributes.get("insecure", False)
canonical_promiser = promiser.translate(str.maketrans({char: "_" for char in ("@", "/", ":", "?", "&", "%")}))
if headers and type(headers) != dict:
if type(headers) == str:
headers = {key: value for key, value in (line.split(":") for line in headers.splitlines())}
elif type(headers) == list:
headers = {key: value for key, value in (line.split(":") for line in headers)}
if payload:
if type(payload) == dict:
try:
payload = json.dumps(payload)
except TypeError:
self.log_error("Failed to convert 'payload' to text representation for request '%s'" % url)
return (Result.NOT_KEPT,
["%s_%s_request_failed" % (canonical_promiser, method),
"%s_%s_payload_failed" % (canonical_promiser, method),
"%s_%s_payload_conversion_failed" % (canonical_promiser, method)])
if "Content-Type" not in headers:
headers["Content-Type"] = "application/json"
elif payload.startswith("@"):
path = payload[1:]
try:
# Closed automatically when this variable gets out of
# scope. Thank you, Python!
payload = open(path, "rb")
except OSError as e:
self.log_error("Failed to open payload file '%s' for request '%s': %s" % (path, url, e))
return (Result.NOT_KEPT,
["%s_%s_request_failed" % (canonical_promiser, method),
"%s_%s_payload_failed" % (canonical_promiser, method),
"%s_%s_payload_file_failed" % (canonical_promiser, method)])
if "Content-Lenght" not in headers:
headers["Content-Length"] = os.path.getsize(path)
# must be 'None' or bytes or file object
if type(payload) == str:
payload = payload.encode("utf-8")
request = urllib.request.Request(url=url, data=payload, method=method, headers=headers)
SSL_context = None
if insecure:
# convert to a boolean
insecure = (insecure.lower() == "true")
if insecure:
SSL_context = ssl.SSLContext()
SSL_context.verify_method = ssl.CERT_NONE
try:
if target:
# TODO: create directories
with open(target, "wb") as target_file:
with urllib.request.urlopen(request, context=SSL_context) as url_req:
if not (200 <= url_req.status <= 300):
self.log_error("Request for '%s' failed with code %d" % (url, url_req.status))
return (Result.NOT_KEPT, ["%s_%s_request_failed" % (canonical_promiser, method)])
# TODO: log progress when url_req.headers["Content-length"] > REPORTING_THRESHOLD
done = False
while not done:
data = url_req.read(512 * 1024)
target_file.write(data)
done = bool(data)
else:
with urllib.request.urlopen(request, context=SSL_context) as url_req:
if not (200 <= url_req.status <= 300):
self.log_error("Request for '%s' failed with code %d" % (url, url_req.status))
return (Result.NOT_KEPT, ["%s_%s_request_failed" % (canonical_promiser, method)])
done = False
while not done:
data = url_req.read(512 * 1024)
done = bool(data)
except urllib.error.URLError as e:
self.log_error("Failed to request '%s': %s" % (url, e))
return (Result.NOT_KEPT, ["%s_%s_request_failed" % (canonical_promiser, method)])
except OSError as e:
self.log_error("Failed to store '%s' response to '%s': %s" % (url, target, e))
return (Result.NOT_KEPT,
["%s_%s_request_failed" % (canonical_promiser, method),
"%s_%s_file_failed" % (canonical_promiser, method)])
if target:
self.log_info("Saved request response from '%s' to '%s'" % (url, target))
else:
self.log_info("Successfully executed%s request to '%s'" % ((" " + method if method else ""),
url))
return (Result.REPAIRED, ["%s_%s_request_done" % (canonical_promiser, method)])
if __name__ == "__main__":
HTTPPromiseModule().start()
| 2.734375 | 3 |
preprocessing/get_industry_sector.py | marwage/stock_prediction | 0 | 12788875 | <reponame>marwage/stock_prediction
import logging
import os
import pandas as pd
import sys
from pymongo import MongoClient
def get(output_path: str):
client = MongoClient()
info_db = client["companyinfodb"]
collection_names = info_db.list_collection_names()
industries = set()
sectors = set()
for company in collection_names:
info = info_db[company].find_one({})
industries.add(info["Industry"])
sectors.add(info["Sector"])
print(industries)
print(sectors)
industries_list = []
industries_values = []
for i, industry in enumerate(industries):
industries_list.append(industry)
industries_values.append(float(i + 1))
data_frame = pd.DataFrame({"industry": industries_list, "value": industries_values})
file_name = "industries.csv"
data_frame.to_csv(os.path.join(output_path, file_name), index=False)
sector_list = []
sector_values = []
for i, sector in enumerate(sectors):
sector_list.append(sector)
sector_values.append(float(i + 1))
data_frame = pd.DataFrame({"sector": sector_list, "value": sector_values})
file_name = "sectors.csv"
data_frame.to_csv(os.path.join(output_path, file_name), index=False)
def main():
output_path = os.path.join(".", "data")
os.makedirs(output_path, exist_ok=True)
log_path = os.path.join(".", "log/get_industry_sector.log")
os.makedirs(os.path.dirname(log_path), exist_ok=True)
logging.basicConfig(
filename=log_path,
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s"
)
get(output_path)
if __name__ == "__main__":
main()
| 2.59375 | 3 |
main_website/migrations/0004_auto_20201228_1649.py | kiza054/woodhall-website | 2 | 12788876 | <reponame>kiza054/woodhall-website
# Generated by Django 3.1.4 on 2020-12-28 16:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_website', '0003_auto_20201228_1456'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
],
),
migrations.AlterModelOptions(
name='waitinglist',
options={'verbose_name': 'Waiting List Entry', 'verbose_name_plural': 'Waiting List Entries'},
),
migrations.AlterField(
model_name='waitinglist',
name='parent_carer_email',
field=models.EmailField(max_length=254, verbose_name='email of parent/carer'),
),
]
| 1.898438 | 2 |
src/genie/libs/parser/iosxr/tests/test_show_ethernet_yang.py | nujo/genieparser | 4 | 12788877 | <filename>src/genie/libs/parser/iosxr/tests/test_show_ethernet_yang.py<gh_stars>1-10
import re
import unittest
from unittest.mock import Mock
import xml.etree.ElementTree as ET
from pyats.topology import Device
from genie.ops.base import Context
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.iosxr.show_ethernet import ShowEthernetTrunkDetail, \
ShowEthernetTags
class test_show_ethernet_tags_yang(unittest.TestCase):
device = Device(name='aDevice')
device1 = Device(name='bDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {'interface': {'GigabitEthernet0/0/0/0': {'sub_interface': {'GigabitEthernet0/0/0/0.501': {'vlan_id': {'2': {'inner_encapsulation_type': 'dot1q',
'inner_encapsulation_vlan_id': '5',
'mtu': '1522',
'outer_encapsulation_type': 'dot1q'}}}}}}}
class etree_holder():
def __init__(self):
self.data = ET.fromstring('''
<data>
<interfaces xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-pfi-im-cmd-oper">
<interface-xr>
<interface>
<interface-name>GigabitEthernet0/0/0/0</interface-name>
<interface-handle>GigabitEthernet0/0/0/0</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-up</state>
<line-state>im-state-up</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>1</state-transition-count>
<last-state-transition-time>1100222</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:50:0e</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:50:0e</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>1</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/0.501</interface-name>
<interface-handle>GigabitEthernet0/0/0/0.501</interface-handle>
<interface-type>IFT_VLAN_SUBIF</interface-type>
<hardware-type-string>VLAN sub-interface(s)</hardware-type-string>
<state>im-state-up</state>
<line-state>im-state-up</line-state>
<encapsulation>dot1q</encapsulation>
<encapsulation-type-string>802.1Q</encapsulation-type-string>
<mtu>1522</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>1</state-transition-count>
<last-state-transition-time>1100222</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<mac-address>
<address>52:54:00:ff:50:0e</address>
</mac-address>
<carrier-delay>
<carrier-delay-up>0</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<parent-interface-name>GigabitEthernet0/0/0/0</parent-interface-name>
<description></description>
<encapsulation-information>
<encapsulation-type>vlan</encapsulation-type>
<dot1q-information>
<encapsulation-details>
<vlan-encapsulation>qinq</vlan-encapsulation>
<stack>
<outer-tag>2</outer-tag>
<second-tag>5</second-tag>
</stack>
</encapsulation-details>
</dot1q-information>
</encapsulation-information>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787915</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/1</interface-name>
<interface-handle>GigabitEthernet0/0/0/1</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-admin-down</state>
<line-state>im-state-admin-down</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>0</state-transition-count>
<last-state-transition-time>1100377</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:34:03</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:34:03</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/2</interface-name>
<interface-handle>GigabitEthernet0/0/0/2</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-admin-down</state>
<line-state>im-state-admin-down</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>0</state-transition-count>
<last-state-transition-time>1100377</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:40:4e</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:40:4e</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/3</interface-name>
<interface-handle>GigabitEthernet0/0/0/3</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-admin-down</state>
<line-state>im-state-admin-down</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>0</state-transition-count>
<last-state-transition-time>1100377</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:f9:5e</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:f9:5e</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/4</interface-name>
<interface-handle>GigabitEthernet0/0/0/4</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-admin-down</state>
<line-state>im-state-admin-down</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>0</state-transition-count>
<last-state-transition-time>1100377</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:23:6b</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:23:6b</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/5</interface-name>
<interface-handle>GigabitEthernet0/0/0/5</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-admin-down</state>
<line-state>im-state-admin-down</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>0</state-transition-count>
<last-state-transition-time>1100377</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:f5:54</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:f5:54</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>GigabitEthernet0/0/0/6</interface-name>
<interface-handle>GigabitEthernet0/0/0/6</interface-handle>
<interface-type>IFT_GETHERNET</interface-type>
<hardware-type-string>GigabitEthernet</hardware-type-string>
<state>im-state-admin-down</state>
<line-state>im-state-admin-down</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>0</state-transition-count>
<last-state-transition-time>1100377</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-full</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-force</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:d2:b1</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:d2:b1</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787869</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>MgmtEth0/0/CPU0/0</interface-name>
<interface-handle>MgmtEth0/0/CPU0/0</interface-handle>
<interface-type>IFT_ETHERNET</interface-type>
<hardware-type-string>Management Ethernet</hardware-type-string>
<state>im-state-up</state>
<line-state>im-state-up</line-state>
<encapsulation>ether</encapsulation>
<encapsulation-type-string>ARPA</encapsulation-type-string>
<mtu>1514</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>1</state-transition-count>
<last-state-transition-time>1100222</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<duplexity>im-attr-duplex-unknown</duplexity>
<media-type>im-attr-media-other</media-type>
<link-type>im-attr-link-type-auto</link-type>
<in-flow-control>im-attr-flow-control-off</in-flow-control>
<out-flow-control>im-attr-flow-control-off</out-flow-control>
<mac-address>
<address>52:54:00:ff:99:42</address>
</mac-address>
<burned-in-address>
<address>52:54:00:ff:99:42</address>
</burned-in-address>
<carrier-delay>
<carrier-delay-up>10</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<arp-information>
<arp-timeout>14400</arp-timeout>
<arp-type-name>ARPA</arp-type-name>
<arp-is-learning-disabled>false</arp-is-learning-disabled>
</arp-information>
<ip-information>
<ip-address>10.85.112.123</ip-address>
<subnet-mask-length>25</subnet-mask-length>
</ip-information>
<data-rates>
<input-data-rate>192</input-data-rate>
<input-packet-rate>392</input-packet-rate>
<output-data-rate>70</output-data-rate>
<output-packet-rate>105</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>228836760</packets-received>
<bytes-received>13447429857</bytes-received>
<packets-sent>56486840</packets-sent>
<bytes-sent>4095136965</bytes-sent>
<multicast-packets-received>1042005</multicast-packets-received>
<broadcast-packets-received>174752</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>21</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>1</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787864</last-discontinuity-time>
<seconds-since-packet-received>0</seconds-since-packet-received>
<seconds-since-packet-sent>0</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
<interface>
<interface-name>Null0</interface-name>
<interface-handle>Null0</interface-handle>
<interface-type>IFT_NULL</interface-type>
<hardware-type-string>Null interface</hardware-type-string>
<state>im-state-up</state>
<line-state>im-state-up</line-state>
<encapsulation>null</encapsulation>
<encapsulation-type-string>Null</encapsulation-type-string>
<mtu>1500</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>1</state-transition-count>
<last-state-transition-time>1100254</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<bandwidth>0</bandwidth>
<max-bandwidth>0</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<description></description>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>0</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787884</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
</interface-xr>
</interfaces>
</data>
''')
golden_output = {'get.return_value': etree_holder()}
def test_golden(self):
self.device = Mock(**self.golden_output)
intf_obj = ShowEthernetTags(device=self.device)
intf_obj.context = Context.yang.value.split()
parsed_output = intf_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
empty_parsed_output = {'interface': {'GigabitEthernet0/0/0/0': {'sub_interface': {'GigabitEthernet0/0/0/0.501': {}}}}}
class empty_etree_holder():
def __init__(self):
self.data = ET.fromstring('''
<data>
<interfaces xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-pfi-im-cmd-oper">
<interface-xr>
<interface>
<interface-name>GigabitEthernet0/0/0/0.501</interface-name>
<interface-handle>GigabitEthernet0/0/0/0.501</interface-handle>
<interface-type>IFT_VLAN_SUBIF</interface-type>
<hardware-type-string>VLAN sub-interface(s)</hardware-type-string>
<state>im-state-up</state>
<line-state>im-state-up</line-state>
<encapsulation>dot1q</encapsulation>
<encapsulation-type-string>802.1Q</encapsulation-type-string>
<mtu>1522</mtu>
<is-l2-transport-enabled>false</is-l2-transport-enabled>
<state-transition-count>1</state-transition-count>
<last-state-transition-time>1100222</last-state-transition-time>
<is-dampening-enabled>false</is-dampening-enabled>
<speed>1000000</speed>
<mac-address>
<address>52:54:00:ff:50:0e</address>
</mac-address>
<carrier-delay>
<carrier-delay-up>0</carrier-delay-up>
<carrier-delay-down>0</carrier-delay-down>
</carrier-delay>
<bandwidth>1000000</bandwidth>
<max-bandwidth>1000000</max-bandwidth>
<is-l2-looped>false</is-l2-looped>
<parent-interface-name>GigabitEthernet0/0/0/0</parent-interface-name>
<description></description>
<encapsulation-information>
</encapsulation-information>
<data-rates>
<input-data-rate>0</input-data-rate>
<input-packet-rate>0</input-packet-rate>
<output-data-rate>0</output-data-rate>
<output-packet-rate>0</output-packet-rate>
<peak-input-data-rate>0</peak-input-data-rate>
<peak-input-packet-rate>0</peak-input-packet-rate>
<peak-output-data-rate>0</peak-output-data-rate>
<peak-output-packet-rate>0</peak-output-packet-rate>
<bandwidth>1000000</bandwidth>
<load-interval>9</load-interval>
<output-load>0</output-load>
<input-load>0</input-load>
<reliability>255</reliability>
</data-rates>
<interface-statistics>
<stats-type>full</stats-type>
<full-interface-stats>
<packets-received>0</packets-received>
<bytes-received>0</bytes-received>
<packets-sent>0</packets-sent>
<bytes-sent>0</bytes-sent>
<multicast-packets-received>0</multicast-packets-received>
<broadcast-packets-received>0</broadcast-packets-received>
<multicast-packets-sent>0</multicast-packets-sent>
<broadcast-packets-sent>0</broadcast-packets-sent>
<output-drops>0</output-drops>
<output-queue-drops>0</output-queue-drops>
<input-drops>0</input-drops>
<input-queue-drops>0</input-queue-drops>
<runt-packets-received>0</runt-packets-received>
<giant-packets-received>0</giant-packets-received>
<throttled-packets-received>0</throttled-packets-received>
<parity-packets-received>0</parity-packets-received>
<unknown-protocol-packets-received>0</unknown-protocol-packets-received>
<input-errors>0</input-errors>
<crc-errors>0</crc-errors>
<input-overruns>0</input-overruns>
<framing-errors-received>0</framing-errors-received>
<input-ignored-packets>0</input-ignored-packets>
<input-aborts>0</input-aborts>
<output-errors>0</output-errors>
<output-underruns>0</output-underruns>
<output-buffer-failures>0</output-buffer-failures>
<output-buffers-swapped-out>0</output-buffers-swapped-out>
<applique>0</applique>
<resets>0</resets>
<carrier-transitions>0</carrier-transitions>
<availability-flag>0</availability-flag>
<last-data-time>1490888108</last-data-time>
<seconds-since-last-clear-counters>0</seconds-since-last-clear-counters>
<last-discontinuity-time>1489787915</last-discontinuity-time>
<seconds-since-packet-received>4294967295</seconds-since-packet-received>
<seconds-since-packet-sent>4294967295</seconds-since-packet-sent>
</full-interface-stats>
</interface-statistics>
<if-index>0</if-index>
</interface>
</interface-xr>
</interfaces>
</data>
''')
empty_output = {'get.return_value': empty_etree_holder()}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
intf_obj = ShowEthernetTags(device=self.device1)
intf_obj.context = Context.yang.value.split()
parsed_output = intf_obj.parse()
self.assertEqual(parsed_output,self.empty_parsed_output)
if __name__ == '__main__':
unittest.main() | 2.234375 | 2 |
pycket/prims/hash.py | namin/pycket | 129 | 12788878 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pycket import impersonators as imp
from pycket import values, values_string
from pycket.hash.base import W_HashTable, W_ImmutableHashTable, w_missing
from pycket.hash.simple import (
W_EqvMutableHashTable, W_EqMutableHashTable,
W_EqvImmutableHashTable, W_EqImmutableHashTable,
make_simple_mutable_table, make_simple_mutable_table_assocs,
make_simple_immutable_table, make_simple_immutable_table_assocs)
from pycket.hash.equal import W_EqualHashTable
from pycket.impersonators.baseline import W_ImpHashTable, W_ChpHashTable
from pycket.cont import continuation, loop_label
from pycket.error import SchemeException
from pycket.prims.expose import default, expose, procedure, define_nyi
from rpython.rlib import jit, objectmodel
_KEY = 0
_VALUE = 1
_KEY_AND_VALUE = 2
_PAIR = 3
PREFIXES = ["unsafe-mutable", "unsafe-immutable"]
def prefix_hash_names(base):
result = [base]
for pre in PREFIXES:
result.append("%s-%s" % (pre, base))
return result
@expose(prefix_hash_names("hash-iterate-first"), [W_HashTable])
def hash_iterate_first(ht):
if ht.length() == 0:
return values.w_false
return values.W_Fixnum.ZERO
@expose(prefix_hash_names("hash-iterate-next"), [W_HashTable, values.W_Fixnum])
def hash_iterate_next(ht, pos):
return ht.hash_iterate_next(pos)
@objectmodel.specialize.arg(4)
def hash_iter_ref(ht, n, env, cont, returns):
from pycket.interpreter import return_value, return_multi_vals
try:
w_key, w_val = ht.get_item(n)
if returns == _KEY:
return return_value(w_key, env, cont)
if returns == _VALUE:
return return_value(w_val, env, cont)
if returns == _KEY_AND_VALUE:
vals = values.Values._make2(w_key, w_val)
return return_multi_vals(vals, env, cont)
if returns == _PAIR:
vals = values.W_Cons.make(w_key, w_val)
return return_value(vals, env, cont)
assert False, "unknown return code"
except KeyError:
raise SchemeException("hash-iterate-key: invalid position")
except IndexError:
raise SchemeException("hash-iterate-key: invalid position")
@expose(prefix_hash_names("hash-iterate-key"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY)
@expose(prefix_hash_names("hash-iterate-value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_VALUE)
@expose(prefix_hash_names("hash-iterate-key+value"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_key_value(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_KEY_AND_VALUE)
@expose(prefix_hash_names("hash-iterate-pair"),
[W_HashTable, values.W_Fixnum], simple=False)
def hash_iterate_pair(ht, pos, env, cont):
return hash_iter_ref(ht, pos.value, env, cont, returns=_PAIR)
@expose("hash-for-each", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_for_each(ht, f, try_order, env, cont):
# FIXME: implmeent try-order? -- see hash-map
return hash_for_each_loop(ht, f, 0, env, cont)
@loop_label
def hash_for_each_loop(ht, f, index, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_for_each_loop(ht, f, index + 1, env, cont)
except IndexError:
return return_value(values.w_void, env, cont)
return f.call([w_key, w_value], env,
hash_for_each_cont(ht, f, index, env, cont))
@continuation
def hash_for_each_cont(ht, f, index, env, cont, _vals):
return hash_for_each_loop(ht, f, index + 1, env, cont)
@expose("hash-map", [W_HashTable, procedure, default(values.W_Object, values.w_false)], simple=False)
def hash_map(h, f, try_order, env, cont):
# FIXME : If try-order? is true, then the order of keys and values
# passed to proc is normalized under certain circumstances, such
# as when the keys are all symbols and hash is not an
# impersonator.
from pycket.interpreter import return_value
acc = values.w_null
return hash_map_loop(f, h, 0, acc, env, cont)
# f.enable_jitting()
# return return_value(w_missing, env,
# hash_map_cont(f, h, 0, acc, env, cont))
@loop_label
def hash_map_loop(f, ht, index, w_acc, env, cont):
from pycket.interpreter import return_value
try:
w_key, w_value = ht.get_item(index)
except KeyError:
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
except IndexError:
return return_value(w_acc, env, cont)
after = hash_map_cont(f, ht, index, w_acc, env, cont)
return f.call([w_key, w_value], env, after)
@continuation
def hash_map_cont(f, ht, index, w_acc, env, cont, _vals):
from pycket.interpreter import check_one_val
w_val = check_one_val(_vals)
w_acc = values.W_Cons.make(w_val, w_acc)
return hash_map_loop(f, ht, index + 1, w_acc, env, cont)
@jit.elidable
def from_assocs(assocs, fname):
if not assocs.is_proper_list():
raise SchemeException("%s: expected proper list" % fname)
keys = []
vals = []
while isinstance(assocs, values.W_Cons):
val, assocs = assocs.car(), assocs.cdr()
if not isinstance(val, values.W_Cons):
raise SchemeException("%s: expected list of pairs" % fname)
keys.append(val.car())
vals.append(val.cdr())
return keys[:], vals[:]
@expose("make-weak-hasheq", [default(values.W_List, values.w_null)])
def make_weak_hasheq(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqMutableHashTable, assocs, "make-weak-hasheq")
@expose("make-weak-hasheqv", [default(values.W_List, values.w_null)])
def make_weak_hasheqv(assocs):
# FIXME: not actually weak
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, assocs, "make-weak-hasheqv")
@expose(["make-weak-hash", "make-late-weak-hasheq"], [default(values.W_List, None)])
def make_weak_hash(assocs):
if assocs is None:
return W_EqualHashTable([], [], immutable=False)
return W_EqualHashTable(*from_assocs(assocs, "make-weak-hash"), immutable=False)
@expose("make-immutable-hash", [default(values.W_List, values.w_null)])
def make_immutable_hash(assocs):
keys, vals = from_assocs(assocs, "make-immutable-hash")
return W_EqualHashTable(keys, vals, immutable=True)
@expose("make-immutable-hasheq", [default(values.W_List, values.w_null)])
def make_immutable_hasheq(assocs):
return make_simple_immutable_table_assocs(W_EqImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("make-immutable-hasheqv", [default(values.W_List, values.w_null)])
def make_immutable_hasheqv(assocs):
return make_simple_immutable_table_assocs(W_EqvImmutableHashTable, assocs, "make-immutable-hasheq")
@expose("hash")
def hash(args):
if len(args) % 2 != 0:
raise SchemeException("hash: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return W_EqualHashTable(keys, vals, immutable=True)
@expose("hasheq")
def hasheq(args):
if len(args) % 2 != 0:
raise SchemeException("hasheq: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqImmutableHashTable, keys, vals)
@expose("hasheqv")
def hasheqv(args):
if len(args) % 2 != 0:
raise SchemeException("hasheqv: key does not have a corresponding value")
keys = [args[i] for i in range(0, len(args), 2)]
vals = [args[i] for i in range(1, len(args), 2)]
return make_simple_immutable_table(W_EqvImmutableHashTable, keys, vals)
@expose("make-hash", [default(values.W_List, values.w_null)])
def make_hash(pairs):
return W_EqualHashTable(*from_assocs(pairs, "make-hash"))
@expose("make-hasheq", [default(values.W_List, values.w_null)])
def make_hasheq(pairs):
return make_simple_mutable_table_assocs(W_EqMutableHashTable, pairs, "make-hasheq")
@expose("make-hasheqv", [default(values.W_List, values.w_null)])
def make_hasheqv(pairs):
return make_simple_mutable_table_assocs(W_EqvMutableHashTable, pairs, "make-hasheqv")
@expose("hash-set!", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set_bang(ht, k, v, env, cont):
if ht.immutable():
raise SchemeException("hash-set!: given immutable table")
return ht.hash_set(k, v, env, cont)
@continuation
def hash_set_cont(key, val, env, cont, _vals):
from pycket.interpreter import check_one_val
table = check_one_val(_vals)
return table.hash_set(key, val, env, return_table_cont(table, env, cont))
@continuation
def return_table_cont(table, env, cont, _vals):
from pycket.interpreter import return_value
return return_value(table, env, cont)
@expose("hash-set", [W_HashTable, values.W_Object, values.W_Object], simple=False)
def hash_set(table, key, val, env, cont):
from pycket.interpreter import return_value
if not table.immutable():
raise SchemeException("hash-set: not given an immutable table")
# Fast path
if isinstance(table, W_ImmutableHashTable):
new_table = table.assoc(key, val)
return return_value(new_table, env, cont)
return hash_copy(table, env,
hash_set_cont(key, val, env, cont))
@continuation
def hash_ref_cont(default, k, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is not w_missing:
return return_value(val, env, cont)
if default is None:
raise SchemeException("key %s not found"%k.tostring())
if default.iscallable():
return default.call([], env, cont)
return return_value(default, env, cont)
@expose("hash-ref", [W_HashTable, values.W_Object, default(values.W_Object, None)], simple=False)
def hash_ref(ht, k, default, env, cont):
return ht.hash_ref(k, env, hash_ref_cont(default, k, env, cont))
@expose("hash-remove!", [W_HashTable, values.W_Object], simple=False)
def hash_remove_bang(ht, k, env, cont):
if ht.immutable():
raise SchemeException("hash-remove!: expected mutable hash table")
return ht.hash_remove_inplace(k, env, cont)
@expose("hash-remove", [W_HashTable, values.W_Object], simple=False)
def hash_remove(ht, k, env, cont):
if not ht.immutable():
raise SchemeException("hash-remove: expected immutable hash table")
return ht.hash_remove(k, env, cont)
@continuation
def hash_clear_cont(ht, env, cont, _vals):
return hash_clear_loop(ht, env, cont)
def hash_clear_loop(ht, env, cont):
from pycket.interpreter import return_value
if ht.length() == 0:
return return_value(values.w_void, env, cont)
w_k, w_v = ht.get_item(0)
return ht.hash_remove_inplace(w_k, env, hash_clear_cont(ht, env, cont))
@expose("hash-clear!", [W_HashTable], simple=False)
def hash_clear_bang(ht, env, cont):
from pycket.interpreter import return_value
if ht.is_impersonator():
ht.hash_clear_proc(env, cont)
return hash_clear_loop(ht, env, cont)
else:
ht.hash_empty()
return return_value(values.w_void, env, cont)
define_nyi("hash-clear", [W_HashTable])
@expose("hash-count", [W_HashTable])
def hash_count(hash):
return values.W_Fixnum(hash.length())
@continuation
def hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont, _vals):
from pycket.interpreter import return_value, check_one_val
val = check_one_val(_vals)
if val is values.w_false:
return return_value(values.w_false, env, cont)
else:
return hash_keys_subset_huh_loop(keys_vals, hash_2, idx + 1, env, cont)
@loop_label
def hash_keys_subset_huh_loop(keys_vals, hash_2, idx, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys_vals):
return return_value(values.w_true, env, cont)
else:
return hash_ref([hash_2, keys_vals[idx][0], values.w_false], env,
hash_keys_subset_huh_cont(keys_vals, hash_2, idx, env, cont))
@jit.elidable
def uses_same_eq_comparison(hash_1, hash_2):
h_1 = hash_1
h_2 = hash_2
if hash_1.is_impersonator() or hash_1.is_chaperone():
h_1 = hash_1.get_proxied()
if hash_2.is_impersonator() or hash_2.is_chaperone():
h_2 = hash_2.get_proxied()
if isinstance(h_1, W_EqualHashTable):
return isinstance(h_2, W_EqualHashTable)
elif isinstance(h_1, W_EqMutableHashTable) or isinstance(h_1, W_EqImmutableHashTable):
return isinstance(h_2, W_EqMutableHashTable) or isinstance(h_2, W_EqImmutableHashTable)
elif isinstance(h_1, W_EqvMutableHashTable) or isinstance(h_1, W_EqvImmutableHashTable):
return isinstance(h_2, W_EqvMutableHashTable) or isinstance(h_2, W_EqvImmutableHashTable)
else:
return False
@expose("hash-keys-subset?", [W_HashTable, W_HashTable], simple=False)
def hash_keys_subset_huh(hash_1, hash_2, env, cont):
if not uses_same_eq_comparison(hash_1, hash_2):
raise SchemeException("hash-keys-subset?: given hash tables do not use the same key comparison -- first table : %s - second table: %s" % (hash_1.tostring(), hash_2.tostring()))
return hash_keys_subset_huh_loop(hash_1.hash_items(), hash_2, 0, env, cont)
@continuation
def hash_copy_ref_cont(keys, idx, src, new, env, cont, _vals):
from pycket.interpreter import check_one_val
val = check_one_val(_vals)
return new.hash_set(keys[idx][0], val, env,
hash_copy_set_cont(keys, idx, src, new, env, cont))
@continuation
def hash_copy_set_cont(keys, idx, src, new, env, cont, _vals):
return hash_copy_loop(keys, idx + 1, src, new, env, cont)
@loop_label
def hash_copy_loop(keys, idx, src, new, env, cont):
from pycket.interpreter import return_value
if idx >= len(keys):
return return_value(new, env, cont)
return src.hash_ref(keys[idx][0], env,
hash_copy_ref_cont(keys, idx, src, new, env, cont))
def hash_copy(src, env, cont):
from pycket.interpreter import return_value
if isinstance(src, W_ImmutableHashTable):
new = src.make_copy()
return return_value(new, env, cont)
new = src.make_empty()
if src.length() == 0:
return return_value(new, env, cont)
return hash_copy_loop(src.hash_items(), 0, src, new, env, cont)
expose("hash-copy", [W_HashTable], simple=False)(hash_copy)
# FIXME: not implemented
@expose("equal-hash-code", [values.W_Object])
def equal_hash_code(v):
# only for improper path cache entries
if isinstance(v, values.W_Cons):
if v.is_proper_list():
return values.W_Fixnum.ZERO
nm = v.car()
p = v.cdr()
if isinstance(nm, values_string.W_String) and \
isinstance(p, values.W_Path) and \
isinstance(p.path, str):
return values.W_Fixnum(objectmodel.compute_hash((nm.tostring(), p.path)))
return values.W_Fixnum.ZERO
@expose("equal-secondary-hash-code", [values.W_Object])
def equal_secondary_hash_code(v):
return values.W_Fixnum.ZERO
@expose("eq-hash-code", [values.W_Object])
def eq_hash_code(v):
t = type(v)
if t is values.W_Fixnum:
return v
if t is values.W_Flonum:
hash = objectmodel.compute_hash(v.value)
elif t is values.W_Character:
hash = objectmodel.compute_hash(v.value)
else:
hash = objectmodel.compute_hash(v)
return values.W_Fixnum(hash)
@expose("eqv-hash-code", [values.W_Object])
def eqv_hash_code(v):
hash = v.hash_eqv()
return values.W_Fixnum(hash)
| 2.234375 | 2 |
bootloader/waflib/Tools/bison.py | BA7JCM/pyinstaller | 0 | 12788879 | <gh_stars>0
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib import Task
from waflib.TaskGen import extension
class bison(Task.Task):
color = 'BLUE'
run_str = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}'
ext_out = ['.h']
@extension('.y', '.yc', '.yy')
def big_bison(self, node):
has_h = '-d' in self.env.BISONFLAGS
outs = []
if node.name.endswith('.yc'):
outs.append(node.change_ext('.tab.cc'))
if has_h:
outs.append(node.change_ext('.tab.hh'))
else:
outs.append(node.change_ext('.tab.c'))
if has_h:
outs.append(node.change_ext('.tab.h'))
tsk = self.create_task('bison', node, outs)
tsk.cwd = node.parent.get_bld()
self.source.append(outs[0])
def configure(conf):
conf.find_program('bison', var='BISON')
conf.env.BISONFLAGS = ['-d']
| 2.140625 | 2 |
sharpen_image.py | danielskatz/parsl-example | 9 | 12788880 | import sys
try:
from PIL import Image, ImageFilter
except ImportError:
print("error:", sys.argv[0], "requires Pillow - install it via 'pip install Pillow'")
sys.exit(2)
if len(sys.argv) != 3:
print("error - usage:", sys.argv[0], "input_file output_file")
sys.exit(2)
input_filename = sys.argv[1]
output_filename = sys.argv[2]
#Read image
try:
im = Image.open(input_filename)
except OSError:
print("error - can't open file:", input_file)
sys.exit(2)
#Apply a filter to the image
im_sharp = im.filter(ImageFilter.SHARPEN)
#Save the filtered image to a new file
im_sharp.save(output_filename, 'JPEG')
| 3.328125 | 3 |
helbing_model.py | csebastiao/helbing_clogging | 0 | 12788881 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Model based on the Helbing model of "Simulation dynamical features of
escape panic" to simulate fish going through a hole in a wall.
"""
import numpy as np
import sys
import pandas as pd
from sklearn.neighbors import NearestNeighbors
class Fish():
"""
Fish that wants to swim to an objective out of the box.
Attributes
----------
size : float
Radius of the sphere representing the size of the fish
mass : float
Mass of the fish
position : numpy.array
Position of the fish as an array [x, y]
speed : numpy.array
Speed of the fish as an array [vx, vy]
desired_speed : float
Instant speed that the fish wants to have
objective : numpy.array
Actual position of the objective that the fish want to go to
first_objective : numpy.array
First objective of the fish, once attained, change to the second
second_objective : numpy.array
Second objective of the fish, once the first is attained
force : numpy.array
Force and social force on the fish as an array [fx, fy]
char_time : float
Characteristic time for a fish to get to his desired speed
color : str
Color of the fish, changes when the objective changes.
"""
def __init__(self, s, m, pos, tau, des_v, fobj, sobj):
self.size = s
self.mass = m
self.position = pos
self.speed = np.array([0., 0.])
self.desired_speed = des_v
self.objective = fobj
self.first_objective = fobj
self.second_objective = sobj
self.force = np.array([0., 0.])
self.char_time = tau
self.color = 'b'
def getCoords(self):
" Returns the position of the fish"
return self.position
def getColor(self):
" Returns the color of the fish"
return self.color
def getSize(self):
" Returns the radius of the fish"
return self.size
def objective_speed(self):
"""
Returns the objective speed of the fish, which is what speed the fish
wants to go, depending on his own position, the position of the
objective and his desired speed. That way, the closer the objective
is the smaller the objective speed is, even though his desired speed
is a constant.
Returns
-------
numpy.array
Objective speed of the fish
"""
return (self.desired_speed*
(self.objective - self.position)/
np.linalg.norm((self.objective - self.position)))
def get_neighbors(self, fish_list, N = 8):
"""
Finds the N neighbors of a fish
Parameters
----------
fish_list : list
List of every fish.
N : int, optional
Number of neighbors for a fish. The default is 5.
Returns
-------
n_list : list
List of the N fish considered as the neighbors.
Notes
----------
See also get_positions()
"""
neigh = NearestNeighbors(n_neighbors = N)
pos_list, exc_list = get_positions(self, fish_list)
neigh.fit(pos_list) #fit the NN to the values
closest = neigh.kneighbors([self.position],
return_distance = False) #just take indices
n_list = []
for i in closest[0] :
n_list.append(exc_list[i])
return n_list
def force_friction(self, other_fish, A, B, k, kappa):
"""
Returns the force of friction of a fish onto another.
Parameters
----------
other_fish : Fish
Fish with who we measure the force of friction.
A : float
Repulsion constant.
B : float
Repulsion constant.
k : float
Body force constant.
kappa : float
Sliding friction force constant.
Returns
-------
numpy.array
Returns the force of friction of other_fish on self, as [fx, fy].
Notes
----------
See also gfunc()
"""
d = np.linalg.norm((self.position - other_fish.position)) #distance
n = np.array((self.position - other_fish.position) / d) #normalized
# vector going from the other fish to self
sum_r = self.size + other_fish.size #sum of the radius of both fish
t = np.array([-n[1], n[0]]) #normalized tangential vector
tvd = np.dot((self.speed - other_fish.speed), t) #tangential velocity
# difference
rep_f = A * np.exp((sum_r - d)/B) * n #repulsion force
bod_f = k * gfunc(sum_r - d, d, sum_r) * n #body force
sli_f = kappa * gfunc(sum_r - d, d, sum_r) * tvd * t #sliding
#friction force
return rep_f + bod_f + sli_f
def force_wall(self, wall, A, B, k, kappa):
"""
Returns the force between the fish and the walls.
Parameters
----------
wall : list
Position of the upper and down coordinates of the hole in the
wall, as [[xu, yu], [xd, yd]]
A : float
Repulsion constant.
B : float
Repulsion constant.
k : float
Body force constant.
kappa : float
Sliding friction force constant.
Returns
-------
numpy.array
Returns the force of friction of the wall on self, as [fx, fy].
Notes
----------
See also gfunc()
"""
#find the closest position of the wall to the fish
if self.position[1] > wall[0][1] or self.position[1] < wall[1][1] :
if self.position[0] - wall[0][0] < 0:
wall_pos = [wall[0][0], self.position[1]]
#Make a thickness to the walls, as thick as the hole_size
elif self.position[0] - wall[0][0] < wall[0][1] + wall[1][1]:
wall_pos = [self.position[0] + 0.1, self.position[1]]
else :
wall_pos = [wall[0][0], self.position[1]]
elif self.position[1] >= 0 :
wall_pos = wall[0]
elif self.position[1] < 0 :
wall_pos = wall[1]
d = np.linalg.norm((self.position - wall_pos)) #see force_friction
n = np.array((self.position - wall_pos) / d)
sum_r = self.size
t = np.array([-n[1], n[0]])
tvd = np.dot(self.speed, t)
rep_f = A * np.exp((sum_r - d)/B) * n
bod_f = k * gfunc(sum_r - d, d, sum_r) * n
sli_f = kappa * gfunc(sum_r - d, d, sum_r) * tvd * t
return rep_f + bod_f - sli_f
def total_force(self, fish_list, wall, A, B, k, kappa):
"""
Returns the total force exerted on a fish from other fish, the wall
and the urge to go to the objective.
Parameters
----------
other_fish : Fish
Fish with who we measure the force of friction.
wall : list
Position of the upper and down coordinates of the hole in the
wall, as [[xu, yu], [xd, yd]]
A : float
Repulsion constant.
B : float
Repulsion constant.
k : float
Body force constant.
kappa : float
Sliding friction force constant.
Returns
-------
numpy.array
Total force on the fish as an array [fx, fy].
Notes
----------
See also get_neighbors(), force_friction(), and force_wall()
"""
ff = np.array([0.,0.])
neighbors = self.get_neighbors(fish_list) #get every neighbors
for j in range(len(neighbors)): #add friction_force for each one
ff += self.force_friction(neighbors[j], A, B, k, kappa)
fw = self.force_wall(wall, A, B, k, kappa)
fs = self.mass * (self.objective_speed() - self.speed)/self.char_time
return fs + ff + fw
def update_force(self, fish_list, wall, A, B, k, kappa):
" Update the force on the fish, see total_force()"
self.force = self.total_force(fish_list, wall, A, B, k, kappa)
def update_status(self, dt):
" Update the position and speed of the fish, see verlet_alg()"
self.position, self.speed = verlet_alg(self.position, self.speed,
self.force, self.mass, dt)
# def update_objective(self, d = 0.2):
# " Update the position of the actual objective of the fish"
# if self.objective == self.first_objective:
# if (self.position[0] > self.objective[0]) or (
# angle_between(-(self.objective - self.position),
# [-1, 0]) < (np.pi/12)) or (
# np.linalg.norm((self.position -
# self.first_objective[0])) < d ) :
# self.objective = self.second_objective
# self.color = 'orange' #change the color with the objective
# elif self.objective == self.second_objective:
# if (angle_between(-(self.objective - self.position), [-1, 0]) >
# (np.pi/12)) and (np.linalg.norm((
# self.position - self.first_objective)) > d) and (
# self.position[0] < self.first_objective[0]):
# self.objective = self.first_objective
# self.color = 'b' #change the color with the objective
def update_objective(self, d = 0.35):
" Update the position of the actual objective of the fish"
if self.objective == self.first_objective:
if ((np.linalg.norm((self.position - self.first_objective)) < d)
or (self.position[0] > self.first_objective[0])) :
self.objective = self.second_objective
self.color = 'orange' #change the color with the objective
def evolveTimeStep(self, fish_list, wall, A, B, k, kappa, dt):
" Make one timestep with an update of the characteristics of the fish"
self.update_force(fish_list, wall, A, B, k, kappa)
self.update_status(dt)
self.update_objective()
def verlet_first(pos, vel, force, m, delta_t):
"First part of the Verlet algorithm, see verlet_alg"
r = np.array(pos)
v = np.array(vel)
a = np.array(force/m)
return r + delta_t * v + 0.5 * a * delta_t**2, v + 0.5 * delta_t * a
def verlet_second(pos, vel, force, m, delta_t):
"Second part of the Verlet algorithm, see verlet_alg"
r = np.array(pos)
v = np.array(vel)
a = np.array(force/m)
return r, v + 0.5 * delta_t * a
def verlet_alg(pos, vel, force, m, delta_t):
"""
Verlet algorithm for the evolution of the position of a particle. Need to
be on two steps because we need "half-time" values. Useful because energy
is conserved (with small oscillations around the value).
Parameters
----------
pos : numpy.array
Initial position as an array [x, y].
vel : numpy.array
Initial speed as an array [vx, vy].
force : numpy.array
Initial force as an array [fx, fy].
m : float
Mass of the particle.
delta_t : float
Time passed between the initial and final time.
Returns
-------
r : numpy.array
Updated position as an array [x, y].
v : numpy.array
Updated speed as an array [vx, vy].
"""
r, v = verlet_first(pos, vel, force, m, delta_t)
r, v = verlet_second(r, v, force, m, delta_t)
return r, v
def gfunc(x, d, r):
"g function from Helbing, useful for contact forces."
if d > r :
return 0
else :
return x
def get_positions(target, fish_list):
"""
We want to get the positions (and the list of corresponding fish) of
every fish except the target fish.
Parameters
----------
target : Fish
Target fish.
fish_list : list
List of every fish.
Returns
-------
pos_list : list
List of positions of every fish except the target fish.
exc_list : list
List of every fish except the target fish.
"""
pos_list = []
exc_list = []
for fish in fish_list:
if fish == target :
pass
else :
pos_list.append([fish.position[0], fish.position[1]])
exc_list.append(fish)
return pos_list, exc_list
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'. """
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def crystal_init_pos(xmin, xmax, ymin, ymax, n):
"""
Initialized ordered positions of n**2 object in a box between xmin and
xmax and ymin and ymax such that they are not on top of each other. We use
n such that we know that there is an integer as a square root of the
number of object to get the same number of full rows and columns.
Parameters
----------
xmin : float
Minimal x value.
xmax : float
Maximal x value.
ymin : float
Minimal y value.
ymax : float
Maximal y value.
n : int
Square root of the number of fish.
Returns
-------
xp : list
x coordinates of every ordered object.
yp : list
y coordinates of every ordered object.
"""
xp = []
yp = []
if n == 1: #avoid division by 0, put the only object in the middle
xp.append((xmax - xmin)/2 + xmin)
yp.append((ymax - ymin)/2 + ymin)
else :
for i in range(n): #variation of y then x, so columns after columns
for j in range(n):
xp.append(xmin + (i/(n-1))*(xmax-xmin))
yp.append(ymin + (j/(n-1))*(ymax-ymin))
return xp, yp
def no_overlap(x, y, s, others):
"""
Verifies that there is no overlap between the target and other objects,
they're all circles but with different sizes.
Parameters
----------
x : float
x-position of the target.
y : float
y-position of the target.
s : float
Radius of the target.
others : list
Other object listed as [[x1, y1, s1], [x2, y2, s2], ...].
Returns
-------
bool
True if there is no overlap, False otherwise.
"""
for o in others :
if np.linalg.norm(np.array([x,y]) - np.array(o[:2])) < s + o[2]:
return False
else :
pass
return True
def random_init_pos(C, R, size, others):
"""
Returns a random initial position within a circle of center C and radius
R of an circle with a radius size, with no overlap with other objects.
Parameters
----------
C : list
Position of the center of the circle, as [xc, yc].
R : float
Radius of the circle.
size : float
Radius of the randomly initialized object.
others : list
Other object listed as [[x1, y1, s1], [x2, y2, s2], ...].
Returns
-------
list
Position and raidus of the object as [x, y, size].
Notes
----------
See also no_overlap().
"""
pos = []
while len(pos) < 1:
r = R * np.sqrt(np.random.random()) #random polar coordinates
theta = np.random.random() * 2 * np.pi
x = C[0] + r * np.cos(theta) #swith to cartesian coordinates
y = C[1] + r * np.sin(theta)
if no_overlap(x, y, size, others) == True:
pos.append([x, y, size]) #add if there is no overlap with others
else :
pass
return pos[0]
def no_overlap_random_pos(C, R, size_list, N):
"""
Returns random initial position of circle object of various radii within
a circle of center C and radius R.
Parameters
----------
C : list
Position of the center of the circle, as [xc, yc].
R : float
Radius of the circle.
size_list : list
List of the radii of the objects we want to randomly initialize.
N : int
Number of objects.
Raises
------
ValueError
If there is not as much radii as the number of objects wanted.
Returns
-------
numpy.array
Array of the position of every object, as [[x1, y1], ..., [xN, yN]].
Notes
----------
See also random_init_pos().
"""
pos_list = []
if len(size_list) != N :
raise ValueError('Not as much radii as object')
for i in range(N):
pos_list.append(random_init_pos(C, R, size_list[i], pos_list))
return np.array(pos_list)[:,:2]
def make_fish(L, N, rmin, rmax, C, R, m, tau,
desired_speed, first_obj, second_obj, init = 'random'):
"""
Makes a list of fish with various parameters
Parameters
----------
L : float
Size of the aquarium.
N : int
Number of fish.
rmin : float
Minimal radius.
rmax : float
Maximal radius.
C : list
Position of the center of the circle, as [xc, yc].
R : float
Radius of the circle.
m : float
Mass of the fish.
tau : float
Characteristic time of acceleration.
desired_speed : float
Desired maximal speed of the fish.
first_obj : list
Position of the first objective, as [x, y].
second_obj : list
Position of the second objective, as [x, y].
init : str, optional
How the initialization of the positions is made. Can either be
'crystal', where they are ordered as a square with a fixed distance
between each fish, or with 'random' as random positions of the fish
within a circle. The default is 'random'.
Raises
------
ValueError
If the init option is not used correctly, with either 'random' or
'crystal'.
Returns
-------
fish_list : list
List of fish.
Notes
----------
See also Fish(), no_overlap_random_pos(), and crystal_init_pos().
"""
if init == 'random':
fish_list = []
size_list = []
for i in range(N):
size_list.append(np.random.uniform(rmin, rmax))
pos_list = no_overlap_random_pos(C, R, size_list, N)
for i in range(N):
fish_list.append(Fish(size_list[i], m, pos_list[i],
tau, desired_speed, first_obj, second_obj))
elif init == 'crystal':
xx, yy = crystal_init_pos(-2*L + L/3, L/2 - L/3,
-L + L/3, L - L/3, n )
for i in range(N):
radius = np.random.random(rmin, rmax)
fish_list.append(Fish(radius, m, [xx[i], yy[i]],
tau, ds, fobj, sobj))
else :
raise ValueError('Wrong init option')
return fish_list
def helbing_constant():
"Return constant from the original article Helbing et. al, 2000"
m = 80. #mass
A = 2. * 10**3 #amplitude of long-range repulsion
B = 0.08 #characteristic distance for long-range repulsion
ds = 0.8 #desired speed
k = 1.2 * 10**5 #body force constant
kappa = 2.4 * 10**5 #friction force constant
rmin = 0.25 #minimum radius
rmax = 0.35 #maximum radius
tau = 0.5 #characteristic time for acceleration
return m, A, B, ds, k, kappa, rmin, rmax, tau
def adaptative_timestep(fish_list, default_dt = 0.01,
v_changelimit = 0.01, tms_mul = 0.95):
"""
Returns the timestep adapted to the biggest velocity change, such that
nothing due to a too big of a timestep occurs, such as described for
Helbing et al., 2000.
Parameters
----------
fish_list : list
List of every fish.
default_dt : float, optional
Initial timestep. The default is 0.01.
v_changelimit : float, optional
Maximum velocity change in a timestep. The default is 0.01.
tms_mul : float, optional
Multiplier used to reduce the timestep. The default is 0.95.
Returns
-------
t : float
Adaptative timestep.
"""
max_acc = 0
for fish in fish_list : #find the highest acceleration
if max_acc < np.linalg.norm(fish.force)/fish.mass :
max_acc = np.linalg.norm(fish.force)/fish.mass
t = default_dt
while t * max_acc > v_changelimit: #while velocity change superior to limit
t *= tms_mul #reduce the timestep to reduce the velocity change
return t
L = 10
m, A, B, ds, k, kappa, rmin, rmax, tau = helbing_constant()
ds = 1.2
kappa = 4.8 * 10**5
uw = [L/2, 2 * rmin]
dw = [L/2, -(2 * rmin)]
#Place first objective on the hole
fobj = [(1/2) * L + (uw[1] - dw[1])/2 , 0]
#Place second objective further away so there is no jam near the exit
sobj = [(11/4) * L, 0]
#Number of individual is N, with an integer square root n
n = 7
N = n**2
#Position and radius of the circle of random initial position with no overlap
C = [-L/3, 0]
R = L/2
for i in range(10):
fish_list = make_fish(L, N, rmin, rmax, C, R, m, tau, ds, fobj, sobj)
#Put this way, we get timestep, fish id, x, y, radius
tmax = 150
hist = []
f_hist = []
ts = 0
timer = 0.01
while ts < tmax:
f_count = 0
for fish in fish_list :
fish.update_force(fish_list, [uw, dw], A, B, k, kappa)
dt = adaptative_timestep(fish_list, default_dt = 0.01,
v_changelimit = 0.01)
ts += dt
for fish in fish_list :
fish.update_status(dt)
fish.update_objective()
hist.append([ts, f_count, fish.getCoords()[0], fish.getCoords()[1],
fish.getSize(), fish.getColor()])
if ts >= timer :
f_hist.append([ts, f_count, fish.getCoords()[0],
fish.getCoords()[1], fish.getSize(),
fish.getColor()])
f_count += 1
if ts >= timer :
timer += 0.01
sys.stdout.write("\r{0}%".format(round((ts-dt)/tmax*100,2)))
sys.stdout.flush()
#We create the corresponding pandas DataFrame
df = pd.DataFrame(hist, columns =['Time', 'FishID', 'X', 'Y', 'R', 'C'])
#We save it into a csv file
title = 'history_49_ds12_ka48_{}.csv'.format(i)
df.to_csv(title, index=False)
#We create the corresponding pandas DataFrame
df2 = pd.DataFrame(f_hist, columns =['Time', 'FishID', 'X', 'Y', 'R', 'C'])
#We save it into a csv file
title = 'fixed_history_49_ds12_ka48_{}.csv'.format(i)
df2.to_csv(title, index=False) | 3.703125 | 4 |
desafiosCursoEmVideo/ex056.py | gomesGabriel/Pythonicos | 1 | 12788882 | print('\033[33m-=-\033[m' * 20)
print('\033[33m************* Analisador completo *************\033[m')
print('\033[33m-=-\033[m' * 20)
si = 0
iv = 0
nv = 0
tm = 0
for c in range(1, 4):
print('---- {}ª Pessoa ----' .format(c))
n = str(input('Nome: ')).strip()
i = int(input('Idade: '))
s = str(input('Sexo [S/M]: ')).strip()
si += i
if c == 1 and s in 'Mm':
iv = i
nv = n
if s in 'Mm' and i > id:
iv = i
nv = n
if s in 'Ff' and i < 20:
tm += 1
print('A média de idade do grupo é: {}' .format(si / 4))
print('O homem mais velho tem {} e se chama: {}' .format(iv, nv))
print('Ao todo são {} mulheres com menos de 20 anos.' .format(tm))
| 3.09375 | 3 |
visualise/colormap.py | TimoWilken/scworldedit | 0 | 12788883 | #!/usr/bin/python3
"""Colormaps map data to colors for visualisation.
To use a colormap, call one of its color_* methods with data in a suitable
format.
"""
from abc import ABCMeta, abstractmethod
from array import array
from itertools import chain
class ColorMap(metaclass=ABCMeta):
"""The base color map.
Custom color maps should inherit from this class and override the
color_heatmap(self, dataset) method.
"""
@staticmethod
def _parse_html_color(color):
r"""Parse a color conforming to the regex #?\d\d?\d\d?\d\d?\d?\d?.
The parsed color may be in one of the following formats, each with an
optional hash ("#") character in front:
["#RRGGBB", "#RGB", "#RRGGBBAA", "#RGBA"].
"""
color = color.translate({ord('#'): None})
cl = len(color) // 3 # len of one RGBA component
r, g, b, a = color[:cl], color[cl:2*cl], color[2*cl:3*cl], color[3*cl:]
if cl == 1:
r, g, b, a = map(lambda c: 2*c, (r, g, b, a))
return int(r, 16), int(g, 16), int(b, 16), int(a, 16) if a else 255
@abstractmethod
def color_heatmap(self, dataset):
"""Transform heatmap data into pixel rows to write to a PNG file."""
return NotImplemented
class AbsoluteColorMap(ColorMap):
"""A user-defined colormap mapping absolute values to colors."""
def __init__(self, colors):
"""Initialise a new color map."""
self.default = self._parse_html_color(colors.get('default', '#0000'))
self.colormap = {int(k): self._parse_html_color(c)
for k, c in colors.items() if k.isdigit()}
def color_heatmap(self, dataset):
"""Transform heatmap data into pixel rows to write to a PNG file."""
coord_data = dataset.by_coordinates(relative=False)
for y in range(dataset.bounds.height):
yield array('B', chain.from_iterable(
self.colormap.get(coord_data[(x, y)], self.default)
if (x, y) in coord_data else (0, 0, 0, 0)
for x in range(dataset.bounds.width)
))
class DefaultColorMap(ColorMap):
"""The default greyscale colormap to use if no user-provided one exists."""
def color_heatmap(self, dataset):
"""Transform heatmap data into pixel rows to write to a PNG file."""
coord_data = dataset.by_coordinates(relative=True)
for y in range(dataset.bounds.height):
yield array('B', map(round, chain.from_iterable(
(*((255 * coord_data[(x, y)],) * 3), 255)
if (x, y) in coord_data else (0, 0, 0, 0)
for x in range(dataset.bounds.width)
)))
| 3.796875 | 4 |
_unittests/ut_sql/test_database_join.py | mohamedelkansouli/Ensae_py2 | 0 | 12788884 | <reponame>mohamedelkansouli/Ensae_py2<filename>_unittests/ut_sql/test_database_join.py
"""
@brief test log(time=13s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG, unzip
from pyquickhelper.pycode import get_temp_folder
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.pyensae.sql import Database
class TestDatabaseJoin (unittest.TestCase):
_memo_SQL1 = """SELECT query AS query,
profile_QSSH.pos AS profile_QSSH_pos,
type AS type,
bucket AS bucket,
max_nb AS max_nb,
sum_difftime AS sum_difftime,
profile_QSSH.url AS url,
url_QSSH.pos AS url_QSSH_pos,
co AS co,
nb_view AS nb_view,
sum_nb_view AS sum_nb_view,
sum_difftime_view AS sum_difftime_view,
nb_click AS nb_click,
sum_nb_click AS sum_nb_click,
sum_difftime_click AS sum_difftime_click
FROM profile_QSSH
JOIN url_QSSH
ON profile_QSSH.url == url_QSSH.url
"""
_memo_SQL2 = """SELECT query AS query,
profile_QSSH.pos AS pos,
type AS type,
bucket AS bucket,
max_nb AS max_nb,
sum_difftime AS sum_difftime,
profile_QSSH.url AS url,
co AS co,
nb_view AS nb_view,
sum_nb_view AS sum_nb_view,
sum_difftime_view AS sum_difftime_view,
nb_click AS nb_click,
sum_nb_click AS sum_nb_click,
sum_difftime_click AS sum_difftime_click
FROM profile_QSSH
INNER JOIN url_QSSH
ON profile_QSSH.url == url_QSSH.url
AND profile_QSSH.pos == url_QSSH.pos
WHERE bucket == 'bu###1'
"""
def test_join_bis(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
filename = os.path.join(os.path.split(
__file__)[0], "data", "database_linked.zip")
temp = get_temp_folder(__file__, "temp_join_bis")
filename = unzip(filename, temp)
assert os.path.exists(filename)
db = Database(filename, LOG=fLOG)
db.connect()
sql = "SELECT COUNT(*) FROM profile_QSSH"
exe = db.execute_view(sql)
assert exe[0][0] == 16
sql, fields = db.inner_join("profile_QSSH", "url_QSSH",
"url", None,
execute=False,
create_index=False,
unique=False)
sql = sql.strip(" \n\r\t")
tep = TestDatabaseJoin._memo_SQL1.strip(" \n\r\t")
if sql.replace(" ", "") != tep.replace(" ", ""):
print(sql)
raise Exception("sql queries should be identifical")
assert fields == [('query', 'query'), ('profile_QSSH.pos', 'profile_QSSH_pos'), ('type', 'type'),
('bucket', 'bucket'), ('max_nb',
'max_nb'), ('sum_difftime', 'sum_difftime'),
('profile_QSSH.url', 'url'), ('url_QSSH.pos', 'url_QSSH_pos'),
('co', 'co'), ('nb_view',
'nb_view'), ('sum_nb_view', 'sum_nb_view'),
('sum_difftime_view', 'sum_difftime_view'),
('nb_click', 'nb_click'), ('sum_nb_click', 'sum_nb_click'), ('sum_difftime_click', 'sum_difftime_click')]
view = db.execute_view(sql)
assert len(view) == 2
sql, fields = db.inner_join("profile_QSSH", "url_QSSH",
("url", "pos"), None,
execute=False,
create_index=False,
where="bucket == 'bu###1'")
sql = sql.strip(" \n\r\t")
tep = TestDatabaseJoin._memo_SQL2.strip(" \n\r\t")
if sql.replace(" ", "") != tep.replace(" ", ""):
for a, b in zip(sql.split("\n"), tep.split("\n")):
print("res", a)
print("exp", b)
print(a == b)
assert sql.replace(" ", "") == tep.replace(" ", "")
assert fields == [('query', 'query'), ('profile_QSSH.pos', 'pos'), ('type', 'type'),
('bucket', 'bucket'), ('max_nb',
'max_nb'), ('sum_difftime', 'sum_difftime'),
('profile_QSSH.url', 'url'), ('co',
'co'), ('nb_view', 'nb_view'),
('sum_nb_view', 'sum_nb_view'), ('sum_difftime_view',
'sum_difftime_view'),
('nb_click', 'nb_click'), ('sum_nb_click', 'sum_nb_click'),
('sum_difftime_click', 'sum_difftime_click')]
view = db.execute_view(sql)
assert len(view) == 1
db.close()
def test_histogram(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
filename = os.path.join(os.path.split(
__file__)[0], "data", "database_linked.zip")
temp = get_temp_folder(__file__, "temp_histogram")
filename = unzip(filename, temp)
assert os.path.exists(filename)
db = Database(filename, LOG=fLOG)
db.connect()
sql = db.histogram("url_QRW2",
col_sums=["sum_nb_click"],
columns=("pos", "url"))
view = db.execute_view(sql)
assert len(view) == 38216
sql = db.histogram("url_QRW2",
col_sums=["sum_nb_click"],
columns="url")
view = db.execute_view(sql)
assert len(view) == 28436
sql = db.histogram("url_QRW2",
col_sums=["sum_nb_click"],
columns="pos",
values=[1, 2, 3, 4, 5])
view = db.execute_view(sql)
assert view == [(1, 2370, 87049), (2, 5734, 11522),
(3, 4009, 5383), (4, 4304, 1778), (5, 21799, 3588)]
sql = db.histogram("url_QRW2",
col_sums=["sum_nb_click"],
columns="pos",
values={"pos123": [1, 2, 3], "others": [4, 5, 6, 7, 8, 9, 10]})
view = db.execute_view(sql)
assert view == [('none', 21, 0), ('others', 26082,
5366), ('pos123', 12113, 103954)]
db.close()
def test_histogram2(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
filename = os.path.join(os.path.split(
__file__)[0], "data", "database_linked.zip")
temp = get_temp_folder(__file__, "temp_histogram2")
filename = unzip(filename, temp)
assert os.path.exists(filename)
db = Database(filename, LOG=fLOG)
db.connect()
sql = db.histogram("url_QRW2",
values={"cat1": [(1, 1), (1, 0)], "cat2": [
(1, 10), (2, 10), (2, 1)]},
col_sums=["sum_nb_click"],
columns=("pos", "co"))
view = db.execute_view(sql)
assert view == [('cat1', 1115, 15), ('cat2', 3792,
411), ('none', 33309, 108894)]
db.close()
if __name__ == "__main__":
unittest.main()
| 2.0625 | 2 |
bootstrap/act-bootstrap.py | geirskjo/act-bootstrap | 0 | 12788885 | <reponame>geirskjo/act-bootstrap<gh_stars>0
#!/usr/bin/env python3
import argparse
import json
import os
import sys
from logging import critical, warning
import act
def parseargs():
""" Parse arguments """
parser = argparse.ArgumentParser(description="ACT Bootstrap data model")
parser.add_argument(
"--userid",
type=int,
dest="user_id",
required=True,
help="User ID")
parser.add_argument(
"--object-types",
dest="object_types_filename",
required=True,
help="Object type defintions (json)")
parser.add_argument(
"--fact-types",
dest="fact_types_filename",
required=True,
help="Fact type defintions (json)")
parser.add_argument(
"--meta-fact-types",
dest="meta_fact_types_filename",
required=True,
help="Meta Fact type defintions (json)")
parser.add_argument(
"--logfile",
dest="log_file",
help="Log to file (default = stdout)")
parser.add_argument(
"--loglevel",
dest="log_level",
default="info",
help="Loglevel (default = info)")
parser.add_argument(
"--act-baseurl",
dest="act_baseurl",
required=True,
help="API URI")
return parser.parse_args()
def create_object_types(client, object_types_filename):
if not os.path.isfile(object_types_filename):
critical("Object defintion file not found: %s" % object_types_filename)
sys.exit(1)
try:
object_types = json.loads(open(object_types_filename).read())
except json.decoder.JSONDecodeError:
critical("Unable to parse file as json: %s" % object_types_filename)
sys.exit(1)
existing_object_types = [object_type.name
for object_type in client.get_object_types()]
# Create all objects
for object_type in object_types:
name = object_type["name"]
validator = object_type.get("validator", act.DEFAULT_VALIDATOR)
if name in existing_object_types:
warning("Object type %s already exists" % name)
continue
client.object_type(name=name, validator_parameter=validator).add()
def create_fact_types(client, fact_types_filename):
# Create fact type with allowed bindings to ALL objects
# We want to change this later, but keep it like this to make it simpler
# when evaluating the data model
if not os.path.isfile(fact_types_filename):
critical("Facts defintion file not found: %s" % fact_types_filename)
try:
fact_types = json.loads(open(fact_types_filename).read())
except json.decoder.JSONDecodeError:
critical("Unable to parse file as json: %s" % fact_types_filename)
sys.exit(1)
for fact_type in fact_types:
name = fact_type["name"]
validator = fact_type.get("validator", act.DEFAULT_VALIDATOR)
object_bindings = fact_type.get("objectBindings", [])
if not object_bindings:
client.create_fact_type_all_bindings(
name, validator_parameter=validator)
else:
client.create_fact_type(name, validator=validator, object_bindings=object_bindings)
def create_meta_fact_types(client, meta_fact_types_filename):
# Create fact type with allowed bindings to ALL objects
# We want to change this later, but keep it like this to make it simpler
# when evaluating the data model
if not os.path.isfile(meta_fact_types_filename):
critical("Meta Fact defintions file not found: %s" % meta_fact_types_filename)
try:
meta_fact_types = json.loads(open(meta_fact_types_filename).read())
except json.decoder.JSONDecodeError:
critical("Unable to parse file as json: %s" % meta_fact_types_filename)
sys.exit(1)
for meta_fact_type in meta_fact_types:
name = meta_fact_type["name"]
validator = meta_fact_type.get("validator", act.DEFAULT_VALIDATOR)
fact_bindings = meta_fact_type.get("factBindings", [])
if not fact_bindings:
client.create_meta_fact_type_all_bindings(name, validator_parameter=validator)
else:
client.create_meta_fact_type(name, fact_bindings=fact_bindings, validator=validator)
if __name__ == "__main__":
args = parseargs()
client = act.Act(
args.act_baseurl,
args.user_id,
args.log_level,
args.log_file,
"act-types")
create_object_types(
client, object_types_filename=args.object_types_filename)
create_fact_types(client, fact_types_filename=args.fact_types_filename)
create_meta_fact_types(client, meta_fact_types_filename=args.meta_fact_types_filename)
| 2.515625 | 3 |
app/contracts/migrations/0025_auto_20180211_1442.py | snakrani/discovery | 0 | 12788886 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendors', '0028_auto_20180211_1350'),
('contracts', '0024_auto_20180205_0342'),
]
operations = [
migrations.RunSQL("UPDATE django_content_type SET app_label = 'contracts' WHERE app_label = 'contract';"),
migrations.RunSQL("ALTER TABLE IF EXISTS contract_contract RENAME TO contracts_contract;"),
migrations.RunSQL("ALTER TABLE IF EXISTS contract_fpdsload RENAME TO contracts_fpdsload;"),
migrations.RunSQL("ALTER TABLE IF EXISTS contract_placeofperformance RENAME TO contracts_placeofperformance;"),
]
| 1.617188 | 2 |
setup.py | numpde/bugs | 0 | 12788887 | <reponame>numpde/bugs
import setuptools
# python setup.py sdist bdist_wheel
# twine upload dist/* && rm -rf build dist *.egg-info
setuptools.setup(
name="bugs",
version="0.0.2",
author="RA",
author_email="<EMAIL>",
keywords="python essentials",
description="Python essential imports.",
long_description="Python essential imports. [Info](https://github.com/numpde/bugs).",
long_description_content_type="text/markdown",
url="https://github.com/numpde/bugs",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['pandas', 'plox', 'inclusive', 'tcga', 'more_itertools'],
# Required for includes in MANIFEST.in
#include_package_data=True,
test_suite="nose.collector",
tests_require=["nose"],
)
| 1.367188 | 1 |
library/library.py | jatinmg97/library | 0 | 12788888 | <filename>library/library.py
import pandas as pd
import uuid
import sqlite3
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn import model_selection
import numpy as np
from sklearn.metrics import balanced_accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import auc
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from scipy.spatial.distance import cdist
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_mutual_info_score
from sklearn.metrics import adjusted_rand_score
from sklearn.cluster import DBSCAN
from sqlalchemy import create_engine
from urllib import parse
import time
from sklearn.linear_model import SGDClassifier
import traceback
parameters={}
PK={}
#saves da
def data(name):
PK={}
Data_Table = pd.DataFrame(columns=['Data_Name','Data_ID'])
if not os.path.isfile('Data_Table.csv'):
Data_Table.to_csv('Data_Table.csv',header='column_names',index=False)
else:
Data_Table=pd.read_csv('Data_Table.csv')
#aaa2=(Data_Table.Data_Name=='name').all() #name
A=Data_Table[Data_Table['Data_Name'].astype(str).str[:].str.contains(name)]
b=A.empty
ID=A['Data_ID']
if b==True:
PK={}
PK[name] = uuid.uuid4()
PK2=pd.DataFrame.from_dict(PK, orient='index')
PK2 = PK2.reset_index()
Data_Table2=PK2
Data_Table2.columns = ['Data_Name', 'Data_ID']
Data_Table2['Data_Name'] = Data_Table2['Data_Name'].astype(str)
Data_Table2['Data_ID'] = Data_Table2['Data_ID'].astype(str)
# column_names=["Data_Name","Data_ID"
if not os.path.isfile('Data_Table.csv'):
Data_Table2.to_csv('Data_Table.csv',header='column_names',index=False)
else:
Data_Table2.to_csv('Data_Table.csv',mode='a',header=False, index=False)
ID=Data_Table2['Data_ID']
else:
ID=A['Data_ID']
return ID
def func(lr):
#parameters[x]=x.get_params()
#PK={}
#global Model_ID
parameters={}
PK={}
#PK3={}
#PK[lr]=time.time()
import datetime
now = datetime.datetime.now()
idz=now.strftime('%Y-%m-%dT%H:%M:%S') + ('-%02d' % (now.microsecond / 10000))
PK[lr] = uuid.uuid4()
PK2=pd.DataFrame.from_dict(PK, orient='index')
PK2 = PK2.reset_index()
Model_ID=PK2
Model_ID.columns = ['Model_Detail', 'Model_ID']
Model_ID['Model_Detail'] = Model_ID['Model_Detail'].astype(str)
Model_ID['Model_ID'] = Model_ID['Model_ID'].astype(str)
Model_ID['Time_Stamp']=idz
sep='('
x=Model_ID['Model_Detail'][0]
x1=x.split(sep,1)[0]
Model_ID['Model_Name']=x1
#Model_ID['Model_SF']= Model_ID['x']
return Model_ID
k={}
def func1(x,d):
#ID=d.loc[0,'Model_ID']
try:
ID=d
#name=x
print (x)
print(ID)
parameters={}
k={}
global Parameters3
parameters[x]=x.get_params()
Parameters2=pd.DataFrame.from_dict(parameters, orient='index')
Parameters3 = Parameters2.reset_index()
Parameters3=Parameters3.melt(id_vars=["index"],
var_name=["Param_Name"],
value_name="Value")
Parameters3.columns = ['Model_Name','Param_Name','Value']
print (Parameters3)
Parameters3['Model_Name'] = Parameters3['Model_Name'].astype(str)
Parameters3['Value'] = Parameters3['Value'].astype(str)
Parameters3 = Parameters3[Parameters3.Value != "nan"]
Parameters3['Model_ID']=ID
#Parameters3.loc[Parameters3.Model_Name==name, 'Model_ID']=ID
#Parameters4['Model_ID']= np.where(Parameters3['Model_Name']== name, ID,'NA')
#Parameters3['Model_ID']= np.where(Parameters3['Model_ID']== 'NA', ID,'NA')
#Parameters3.loc[:,"Model_ID"] = ID
l=Parameters3['Param_Name']
l=l.tolist()
# l=[1,2,3,4]
k={}
for i in l:
k[i]=uuid.uuid4()
#Parameters3.loc[:,"TuningID"]= k[i]
PK2=pd.DataFrame.from_dict(k, orient='index')
PK2 = PK2.reset_index()
PK2.columns = ['Model_Name', 'Tuning_ID']
PK2=PK2.drop(['Model_Name'],axis=1)
#Tuning_ID=PK2
Parameters3['Tuning_ID']=PK2['Tuning_ID']
Parameters3=Parameters3.drop(['Model_Name'],axis=1)
except:
var = traceback.format_exc()
with open("error.txt", "a") as myfile:
myfile.write(var)
return Parameters3
def func3(x,X_train,y_train,y):
#ID=y.loc[0,'Model_ID']
try:
global Goodness_of_fit
global var
ID=y
scoring = 'r2'
scoring2 = 'neg_mean_squared_error'
scoring3 = 'explained_variance'
#scoring4 = 'balanced_accuracy'
scoring5 = 'neg_mean_absolute_error'
seed = 7
kfold = model_selection.KFold(n_splits=2, random_state=seed)
r2={}
MSE={}
EV={}
ME={}
MAE={}
results={}
results2={}
results = model_selection.cross_val_score(x, X_train, y_train,cv=kfold, scoring=scoring)
results2 = model_selection.cross_val_score(x, X_train, y_train, cv=kfold, scoring=scoring2)
results3 = model_selection.cross_val_score(x, X_train, y_train, cv=kfold, scoring=scoring3)
# results4 = model_selection.cross_val_score(x, X_train, y_train, cv=kfold, scoring=scoring4)
results5 = model_selection.cross_val_score(x, X_train, y_train, cv=kfold, scoring=scoring5)
r2[x]=results.mean()
MSE[x]=results2.mean()
EV[x]=results3.mean()
# ME[x]=results4.mean()
MAE[x]=results5.mean()
r_squared=pd.DataFrame.from_dict(r2, orient='index')
r_squared = r_squared.reset_index()
mse=pd.DataFrame.from_dict(MSE, orient='index')
mse = mse.reset_index()
EV=pd.DataFrame.from_dict(EV, orient='index')
EV = EV.reset_index()
MAE=pd.DataFrame.from_dict(MAE, orient='index')
MAE = MAE.reset_index()
r_squared["g_fit"] = "R_SQUARED"
r_squared.columns=['Model_Name', 'Value',"Goodness_of_fit"]
mse["g_fit"] = "MSE"
mse.columns=['Model_Name', 'Value',"Goodness_of_fit"]
EV["g_fit"] = "EV"
EV.columns=['Model_Name', 'Value',"Goodness_of_fit"]
MAE["g_fit"] = "MAE"
MAE.columns=['Model_Name', 'Value',"Goodness_of_fit"]
Goodness_of_fit=r_squared
Goodness_of_fit['Model_Name'] = Goodness_of_fit['Model_Name'].astype(str)
Goodness_of_fit=r_squared.append(mse)
Goodness_of_fit=Goodness_of_fit.append(EV)
Goodness_of_fit=Goodness_of_fit.append(MAE)
Goodness_of_fit['Model_Name'] = Goodness_of_fit['Model_Name'].astype(str)
#Model_ID=ID
Goodness_of_fit['Model_ID']=ID
Goodness_of_fit = Goodness_of_fit.reset_index()
Goodness_of_fit= Goodness_of_fit.drop(['index'],axis=1)
m=Goodness_of_fit['Value']
m=m.tolist()
m=[1,2,3,4]
d={}
for i in m:
d[i]=uuid.uuid4()
# Parameters3.loc[:,"TuningID"]= k[i]
PK3=pd.DataFrame.from_dict(d, orient='index')
PK3 = PK3.reset_index()
PK3.columns = ['Model_Name','GF_ID2']
#Tuning_ID=PK2
Goodness_of_fit['GF_ID']=PK3['GF_ID2']
#Goodness_of_fit.loc[:,"Model_ID"] = ID
Goodness_of_fit=Goodness_of_fit.drop(['Model_Name'],axis=1)
except:
var = traceback.format_exc()
with open("error.txt", "a") as myfile:
myfile.write(var)
return Goodness_of_fit
def func4(x,X_train,y_train,y):
#ID=y.loc[0,'Model_ID']
global Goodness_of_fit
global var
try:
ID=y
#seed = 7
sep='('
ad=str(x)
ad=ad.split(sep,1)[0]
if ad=="XGBClassifier":
pred = x.predict(X_train)
#pred = [round(value) for value in pred]
pred = np.asarray([np.argmax(line) for line in pred])
else:
pred=x.predict(X_train)
Accuracy={}
AUC={}
Precision={}
f1_score2={}
recall_score2={}
results={}
results2={}
results = accuracy_score(y_train,pred,normalize=True)
#results2 = auc(y_train,pred)
results2= balanced_accuracy_score(y_train, pred)
results3 = f1_score(y_train,pred, average='weighted')
results4 = precision_score(y_train,pred,average='weighted')
results5 = recall_score(y_train,pred,average='weighted')
Accuracy[x]=results
AUC[x]=results2
Precision[x]=results3
f1_score2[x]=results4
recall_score2[x]=results5
Accuracy=pd.DataFrame.from_dict(Accuracy, orient='index')
Accuracy = Accuracy.reset_index()
Balanced_Accuracy=pd.DataFrame.from_dict(AUC, orient='index')
Balanced_Accuracy = Balanced_Accuracy.reset_index()
Precision=pd.DataFrame.from_dict(Precision, orient='index')
Precision = Precision.reset_index()
f1_score2=pd.DataFrame.from_dict(f1_score2, orient='index')
f1_score2 = f1_score2.reset_index()
recall_score2=pd.DataFrame.from_dict(recall_score2, orient='index')
recall_score2 = recall_score2.reset_index()
Accuracy["g_fit"] = "Accuracy"
Accuracy.columns=['Model_Name', 'Value',"Goodness_of_fit"]
Balanced_Accuracy["g_fit"] = "Balanced_Accuracy"
Balanced_Accuracy.columns=['Model_Name', 'Value',"Goodness_of_fit"]
Precision["g_fit"] = "Precision"
Precision.columns=['Model_Name', 'Value',"Goodness_of_fit"]
f1_score2["g_fit"] = "f1_score"
f1_score2.columns=['Model_Name', 'Value',"Goodness_of_fit"]
recall_score2["g_fit"] = "recall_score"
recall_score2.columns=['Model_Name', 'Value',"Goodness_of_fit"]
Goodness_of_fit=Accuracy
Goodness_of_fit['Model_Name'] = Goodness_of_fit['Model_Name'].astype(str)
Goodness_of_fit=Accuracy.append(Balanced_Accuracy)
Goodness_of_fit=Goodness_of_fit.append(Precision)
Goodness_of_fit=Goodness_of_fit.append(f1_score2)
Goodness_of_fit=Goodness_of_fit.append(recall_score2)
Goodness_of_fit['Model_Name'] = Goodness_of_fit['Model_Name'].astype(str)
#Model_ID=ID
Goodness_of_fit['Model_ID']=ID
Goodness_of_fit = Goodness_of_fit.reset_index()
Goodness_of_fit= Goodness_of_fit.drop(['index'],axis=1)
m=Goodness_of_fit['Value']
m=[1,2,3,4,5]
d={}
for i in m:
d[i]=uuid.uuid4()
# Parameters3.loc[:,"TuningID"]= k[i]
PK3=pd.DataFrame.from_dict(d, orient='index')
PK3 = PK3.reset_index()
PK3.columns = ['Model_Name','GF_ID2']
#Tuning_ID=PK2
Goodness_of_fit['GF_ID']=PK3['GF_ID2']
#Goodness_of_fit.loc[:,"Model_ID"] = ID
Goodness_of_fit=Goodness_of_fit.drop(['Model_Name'],axis=1)
# Goodness_of_fit=Goodness_of_fit[['Goodness_of_fit', 'Value', 'Model_Id', 'GF_ID']]
except:
var = traceback.format_exc()
with open("error.txt", "a") as myfile:
myfile.write(var)
return Goodness_of_fit
def cluster(kmeans,X_test,y_test,y):
x=str(kmeans)
#sep='('
#x1=x.split(sep,1)[0]
if "SpectralClustering" in x:
pred=kmeans.fit_predict(X_test)
elif "AgglomerativeClustering" in x:
pred=kmeans.fit_predict(X_test)
elif "DBSCAN" in x:
pred=kmeans.fit_predict(X_test)
else:
pred=kmeans.predict(X_test)
score2=adjusted_mutual_info_score(y_test,pred)
score3=adjusted_rand_score(y_test,pred)
if "AgglomerativeClustering" in x:
score4="0"
elif "DBSCAN" in x:
score4="0"
else:
score4=sum(np.min(cdist(X_test, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X_test.shape[0]
ID=y
Score={}
rand={}
wss={}
results={}
results2={}
Score[kmeans]=score2
rand[kmeans]=score3
wss[kmeans]=score4
Adjusted_mutual_info=pd.DataFrame.from_dict(Score, orient='index')
Adjusted_mutual_info = Adjusted_mutual_info.reset_index()
Adjusted_rand_info=pd.DataFrame.from_dict(rand, orient='index')
Adjusted_rand_info = Adjusted_rand_info.reset_index()
WSS=pd.DataFrame.from_dict(wss, orient='index')
WSS = WSS.reset_index()
Adjusted_mutual_info["g_fit"] = "Adjusted_mutual_info"
Adjusted_mutual_info.columns=['Model_Name', 'Value',"Goodness_of_fit"]
Adjusted_rand_info["g_fit"] = "Adjusted_rand_info"
Adjusted_rand_info.columns=['Model_Name', 'Value',"Goodness_of_fit"]
WSS["g_fit"] = "WSS"
WSS.columns=['Model_Name', 'Value',"Goodness_of_fit"]
Goodness_of_fit=Adjusted_mutual_info
Goodness_of_fit['Model_Name'] = Goodness_of_fit['Model_Name'].astype(str)
Goodness_of_fit=Goodness_of_fit.append(Adjusted_rand_info)
Goodness_of_fit=Goodness_of_fit.append(WSS)
Goodness_of_fit['Model_Name'] = Goodness_of_fit['Model_Name'].astype(str)
#Model_ID=ID
Goodness_of_fit['Model_ID']=ID
Goodness_of_fit = Goodness_of_fit.reset_index()
Goodness_of_fit= Goodness_of_fit.drop(['index'],axis=1)
m=Goodness_of_fit['Value']
m=[1,2,3]
d={}
for i in m:
d[i]=uuid.uuid4()
# Parameters3.loc[:,"TuningID"]= k[i]
PK3=pd.DataFrame.from_dict(d, orient='index')
PK3 = PK3.reset_index()
PK3.columns = ['Model_Name','GF_ID2']
#Tuning_ID=PK2
Goodness_of_fit['GF_ID']=PK3['GF_ID2']
#Goodness_of_fit.loc[:,"Model_ID"] = ID
Goodness_of_fit=Goodness_of_fit.drop(['Model_Name'],axis=1)
return Goodness_of_fit
def final(lr,X_train,y_train):
Model_Table=func(lr)
ID=Model_Table.loc[0,'Model_ID']
HP_Table=func1(lr,ID)
GF_Table=func3(lr,X_train,y_train,ID)
aaa=[Model_Table,HP_Table,GF_Table]
Model_Table=aaa[0]
HP_Table=aaa[1]
GF_Table=aaa[2]
if not os.path.isfile('Model_Table.csv'):
Model_Table.to_csv('Model_Table.csv',header='column_names')
else:
Model_Table.to_csv('Model_Table.csv',mode='a',header=False)
if not os.path.isfile('HP_Table.csv'):
HP_Table.to_csv('HP_Table.csv',header='column_names')
else:
HP_Table.to_csv('HP_Table.csv',mode='a',header=False)
if not os.path.isfile('GF_Table.csv'):
GF_Table.to_csv('GF_Table.csv',header='column_names')
else:
GF_Table.to_csv('GF_Table.csv',mode='a',header=False)
return [Model_Table,HP_Table,GF_Table]
def csv(aaa):
Model_Table=aaa[0]
HP_Table=aaa[1]
GF_Table=aaa[2]
if not os.path.isfile('Model_Table.csv'):
Model_Table.to_csv('Model_Table.csv',header='column_names')
else:
Model_Table.to_csv('Model_Table.csv',mode='a',header=False,index=False)
if not os.path.isfile('HP_Table.csv'):
HP_Table.to_csv('HP_Table.csv',header='column_names')
else:
HP_Table.to_csv('HP_Table.csv',mode='a',header=False,index=False)
if not os.path.isfile('GF_Table.csv'):
GF_Table.to_csv('GF_Table.csv',header='column_names')
else:
GF_Table.to_csv('GF_Table.csv',mode='a',header=False,index=False)
return Model_Table
class data:
def __init__(self,path):
self.path=path
def save(self):
path=self.path
PK={}
Data_Table = pd.DataFrame(columns=['Data_Name','Data_ID'])
if not os.path.isfile('Data_Table.csv'):
Data_Table.to_csv('Data_Table.csv',header='column_names',index=False)
else:
Data_Table=pd.read_csv('Data_Table.csv')
#aaa2=(Data_Table.Data_Name=='name').all() #name
A=Data_Table[Data_Table['Data_Name'].astype(str).str[:].str.contains(path)]
b=A.empty
ID=A['Data_ID']
if b==True:
PK={}
PK[path] = uuid.uuid4()
PK2=pd.DataFrame.from_dict(PK, orient='index')
PK2 = PK2.reset_index()
Data_Table2=PK2
Data_Table2.columns = ['Data_Name', 'Data_ID']
Data_Table2['Data_Name'] = Data_Table2['Data_Name'].astype(str)
Data_Table2['Data_ID'] = Data_Table2['Data_ID'].astype(str)
# column_names=["Data_Name","Data_ID"
if not os.path.isfile('Data_Table.csv'):
Data_Table2.to_csv('Data_Table.csv',header='column_names',index=False)
else:
Data_Table2.to_csv('Data_Table.csv',mode='a',header=False, index=False)
ID=Data_Table2['Data_ID']
print("Generated a new DID")
else:
ID=A['Data_ID']
print("Found your DID")
return print("DID: " +ID )
class tracker:
def __init__(self,lr,X_test,y_test):
self.lr=lr
self.X_test=X_test
self.y_test=y_test
def save(self):#,lr=None,X_test=None,y_test=None):
lr=self.lr
X_test=self.X_test
y_test=self.y_test
#k=self.k
Model_Table=func(lr)
ID=Model_Table.loc[0,'Model_ID']
HP_Table=func1(lr,ID)
from sklearn.utils.testing import all_estimators
from sklearn import base
estimators = all_estimators()
x=[]
for name, class_ in estimators:
if issubclass(class_, base.ClassifierMixin):
x.append(name)
x.append("XGBClassifier")
y=[]
for name, class_ in estimators:
if issubclass(class_, base.RegressorMixin):
y.append(name)
y.append("XGBRegressor")
z=[]
for name, class_ in estimators:
if issubclass(class_, base.ClusterMixin):
z.append(name)
sep='('
ad=str(lr)
ad=ad.split(sep,1)[0]
if ad in y:
GF_Table=func3(lr,X_test,y_test,ID)
elif ad in x:
GF_Table=func4(lr,X_test,y_test,ID)
else:
GF_Table=cluster(lr,X_test,y_test,ID)
#GF_Table=func3(lr,X_test,y_test,ID)
aaa=[Model_Table,HP_Table,GF_Table]
Model_Table=aaa[0]
HP_Table=aaa[1]
GF_Table=aaa[2]
if not os.path.isfile('Model_Table.csv'):
Model_Table.to_csv('Model_Table.csv',header='column_names',index=False)
else:
Model_Table.to_csv('Model_Table.csv',mode='a',header=False,index=False)
if not os.path.isfile('HP_Table.csv'):
HP_Table.to_csv('HP_Table.csv',header='column_names',index=False)
else:
HP_Table.to_csv('HP_Table.csv',mode='a',header=False,index=False)
if not os.path.isfile('GF_Table.csv'):
GF_Table.to_csv('GF_Table.csv',header='column_names',index=False)
else:
GF_Table.to_csv('GF_Table.csv',mode='a',header=False,index=False)
engine = create_engine('mssql+pyodbc://admin_login:%s@<EMAIL>/IICS_Logs?driver=SQL+Server+Native+Client+11.0'% parse.unquote_plus('Miracle@123'))
connection = engine.connect()
Model_Table.to_sql('Model_Table', con = engine, if_exists = 'append', chunksize = 1000,index=False)
HP_Table.to_sql('HP_Table', con = engine, if_exists = 'append', chunksize = 1000,index=False)
GF_Table.to_sql('GF_Table', con = engine, if_exists = 'append', chunksize = 1000,index=False)
nice=[Model_Table,HP_Table,GF_Table]
nice2="Tables updated !"
return print(nice, nice2)
def classification(self):#,lr=None,X_test=None,y_test=None):
lr=self.lr
X_test=self.X_test
y_test=self.y_test
Model_Table=func(lr)
ID=Model_Table.loc[0,'Model_ID']
HP_Table=func1(lr,ID)
GF_Table=func4(lr,X_test,y_test,ID)
aaa=[Model_Table,HP_Table,GF_Table]
Model_Table=aaa[0]
HP_Table=aaa[1]
GF_Table=aaa[2]
if not os.path.isfile('Model_Table.csv'):
Model_Table.to_csv('Model_Table.csv',header='column_names')
else:
Model_Table.to_csv('Model_Table.csv',mode='a',header=False)
if not os.path.isfile('HP_Table.csv'):
HP_Table.to_csv('HP_Table.csv',header='column_names')
else:
HP_Table.to_csv('HP_Table.csv',mode='a',header=False)
if not os.path.isfile('GF_Table.csv'):
GF_Table.to_csv('GF_Table.csv',header='column_names')
else:
GF_Table.to_csv('GF_Table.csv',mode='a',header=False)
return [Model_Table,HP_Table,GF_Table]
def clustering(self):#,lr=None,X_test=None,y_test=None):
lr=self.lr
X_test=self.X_test
y_test=self.y_test
Model_Table=func(lr)
ID=Model_Table.loc[0,'Model_ID']
HP_Table=func1(lr,ID)
GF_Table=cluster(lr,X_test,y_test,ID)
aaa=[Model_Table,HP_Table,GF_Table]
Model_Table=aaa[0]
HP_Table=aaa[1]
GF_Table=aaa[2]
if not os.path.isfile('Model_Table.csv'):
Model_Table.to_csv('Model_Table.csv',header='column_names')
else:
Model_Table.to_csv('Model_Table.csv',mode='a',header=False)
if not os.path.isfile('HP_Table.csv'):
HP_Table.to_csv('HP_Table.csv',header='column_names')
else:
HP_Table.to_csv('HP_Table.csv',mode='a',header=False)
if not os.path.isfile('GF_Table.csv'):
GF_Table.to_csv('GF_Table.csv',header='column_names')
else:
GF_Table.to_csv('GF_Table.csv',mode='a',header=False)
return print( [Model_Table,HP_Table,GF_Table] +"Tables Updated !")
| 2.265625 | 2 |
Lesson_7/Task_13.py | AlexHarf/Selenium_training | 0 | 12788889 | <gh_stars>0
import pytest
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test(driver):
driver.implicitly_wait(5)
driver.get("http://localhost/litecart/en/")
for i in range(0, 3):
menu = driver.find_elements_by_css_selector("div#box-most-popular.box a:not([data-fancybox-group])")
menu[i].click()
size = driver.find_elements_by_name("options[Size]")
if len(size) > 0:
size_select = Select(driver.find_element_by_name('options[Size]'))
size_select.select_by_value('Small')
driver.find_element_by_name("add_cart_product").click()
WebDriverWait(driver, 10).until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "div#cart span.quantity"), str(i+1)))
driver.get("http://localhost/litecart/en/")
driver.find_element_by_css_selector("div#cart a:last-child").click()
product_number = driver.find_elements_by_css_selector("td.item")
for c in product_number:
table_item=driver.find_element_by_class_name("item")
driver.find_element_by_name("remove_cart_item").click()
WebDriverWait(driver, 10).until(EC.staleness_of(table_item))
| 2.5625 | 3 |
skills/dff_friendship_skill/dialogflows/flows/starter_states.py | oserikov/dream | 34 | 12788890 | from enum import Enum, auto
class State(Enum):
USR_START = auto()
#
# SYS_GENRE = auto()
# USR_GENRE = auto()
# SYS_WEEKDAY = auto()
USR_WHAT_FAV = auto()
SYS_CHECK_POSITIVE = auto()
SYS_CHECK_NEGATIVE = auto()
SYS_CHECK_NEUTRAL = auto()
SYS_GET_REASON = auto()
USR_REPEAT = auto()
SYS_AGREED = auto()
SYS_DISAGREED = auto()
USR_ASSENT_YES = auto()
USR_ASSENT_NO = auto()
USR_MY_FAV = auto()
SYS_YES = auto()
SYS_NO = auto()
USR_WHY = auto()
USR_MY_FAV_STORY = auto()
# USR_WEEKDAY = auto()
SYS_FRIDAY = auto()
USR_FRIDAY = auto()
SYS_SMTH = auto()
USR_MY_FAV_DAY = auto()
#
SYS_ERR = auto()
USR_ERR = auto()
| 2.96875 | 3 |
common_configs/logging/stdlib.py | nigma/django-common-configs | 5 | 12788891 | <filename>common_configs/logging/stdlib.py
#-*- coding: utf-8 -*-
"""
Django logging configuration
Defined loggers:
- <catch all>
- django
- django.startup
- django.request
- django.db.backends
- django.commands
- django.security.DisallowedHost
- app.* - for project app loggers
- boto
- celery
- requests
- raven
- sentry.errors
Defined handlers:
- mail_admins
- console
- console_celery
- sentry
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import logging
from configurations import values
logging.captureWarnings(True)
class Logging(object):
#: Default handler
LOGGING_DEFAULT_HANDLER = values.Value("console")
#: Default handler for celery
LOGGING_CELERY_HANDLER = values.Value("console_celery")
#: Default formatter
LOGGING_DEFAULT_FORMATTER = values.Value("console")
#: Add request-id to each log line (requires https://github.com/dabapps/django-log-request-id)
LOGGING_ADD_REQUEST_ID = values.BooleanValue(True)
#: Use sentry for error logging
LOGGING_USE_SENTRY = values.BooleanValue(True)
def get_request_id_filters(self):
return ["request_id"] if self.LOGGING_ADD_REQUEST_ID else []
def get_sentry_handlers(self):
return ["sentry"] if self.LOGGING_USE_SENTRY else []
def get_logging_filters(self):
filters = {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
}
if self.LOGGING_ADD_REQUEST_ID:
filters["request_id"] = {
"()": "log_request_id.filters.RequestIDFilter"
}
return filters
def get_logging_formatters(self):
return {
"standard": {
"format": "%(asctime)s - %(levelname)-5s [%(name)s:%(lineno)s] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
},
"verbose": {
"format": "%(asctime)s - %(levelname)-5s %(module)s [%(name)s:%(lineno)s] %(process)d %(thread)d %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S"
},
"console": {
"format": "%(asctime)s - %(levelname)-5s [%(name)s:%(lineno)s] %(message)s",
"datefmt": "%H:%M:%S"
},
"heroku": {
"format": "%(levelname)-5s request_id=%(request_id)s [%(name)s:%(lineno)s] %(message)s"
},
"celery": {
"format": "%(levelname)-5s [%(processName)s:%(name)s:%(lineno)s] [%(task_name)s(%(task_id)s)] %(message)s"
}
}
def get_logging_handlers(self):
stream = sys.stdout
handlers = {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"] + self.get_request_id_filters(),
"class": "django.utils.log.AdminEmailHandler",
"include_html": False,
},
"console": {
"level": "DEBUG",
"filters": self.get_request_id_filters(),
"class": "logging.StreamHandler",
"formatter": "heroku",
"stream": stream
},
"console_celery": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "celery",
"stream": stream
}
}
if self.LOGGING_USE_SENTRY:
handlers["sentry"] = {
"level": "ERROR",
"filters": self.get_request_id_filters(),
"class": "raven.contrib.django.raven_compat.handlers.SentryHandler",
}
return handlers
def get_loggers(self):
handlers = [self.LOGGING_DEFAULT_HANDLER] + self.get_sentry_handlers()
return {
"": {
"handlers": handlers,
"level": "WARNING",
},
"boto": {
"handlers": handlers,
"level": "INFO",
"propagate": True
},
"django": {
"handlers": handlers,
"level": "WARNING",
"propagate": False,
},
"django.startup": {
"handlers": handlers,
"level": "INFO",
"propagate": False
},
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True
},
"django.db.backends": {
"level": "ERROR",
"handlers": handlers,
"propagate": False
},
"django.commands": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True
},
"django.security.DisallowedHost": {
"handlers": [],
"propagate": False,
},
"app": {
"handlers": handlers,
"level": "DEBUG",
"propagate": False
},
"celery": {
"handlers": handlers,
"level": "INFO",
"propagate": False
},
"requests": {
"handlers": handlers,
"level": "WARNING",
"propagate": False
},
"oauthlib": {
"handlers": handlers,
"level": "INFO",
"propagate": False
},
"raven": {
"level": "DEBUG",
"handlers": [self.LOGGING_DEFAULT_HANDLER],
"propagate": False
},
"sentry.errors": {
"level": "DEBUG",
"handlers": [self.LOGGING_DEFAULT_HANDLER],
"propagate": False
},
}
def LOGGING(self):
"""
Fully configured Django logging
"""
return {
"version": 1,
"disable_existing_loggers": False,
"root": {
"level": "WARNING",
"handlers": [self.LOGGING_DEFAULT_HANDLER] + self.get_sentry_handlers(),
},
"formatters": self.get_logging_formatters(),
"filters": self.get_logging_filters(),
"handlers": self.get_logging_handlers(),
"loggers": self.get_loggers()
}
| 1.96875 | 2 |
tests/test_sia_client.py | fcoach66/pysiaalarm | 0 | 12788892 | # -*- coding: utf-8 -*-
"""Class for tests of pysiaalarm."""
import json
import logging
import random
import socket
import threading
import time
import pytest
from mock import patch
from pysiaalarm import InvalidAccountFormatError
from pysiaalarm import InvalidAccountLengthError
from pysiaalarm import InvalidKeyFormatError
from pysiaalarm import InvalidKeyLengthError
from pysiaalarm import SIAAccount
from pysiaalarm import SIAClient
from pysiaalarm import SIAEvent
from tests.test_client import client_program
from tests.test_utils import create_test_items
_LOGGER = logging.getLogger(__name__)
KEY = "<KEY>"
ACCOUNT = "1111"
HOST = "localhost"
PORT = 7777
def func(event: SIAEvent):
"""Pass for testing."""
pass
class testSIA(object):
"""Class for pysiaalarm tests."""
@pytest.mark.parametrize(
"line, account, type, code",
[
(
'98100078"*SIA-DCS"5994L0#AAA[5AB718E008C616BF16F6468033A11326B0F7546CAB230910BCA10E4DEBA42283C436E4F8EFF50931070DDE36D5BB5F0C',
"AAA",
"",
"",
),
(
'2E680078"SIA-DCS"6002L0#AAA[|Nri1/CL501]_14:12:04,09-25-2019',
"AAA",
"Closing Report",
"CL",
),
],
)
def test_event_parsing(self, line, account, type, code):
"""Test event parsing methods."""
event = SIAEvent(line)
assert event.code == code
assert event.type == type
assert event.account == account
@pytest.mark.parametrize(
"key, account, port, error",
[
("ZZZZZZZZZZZZZZZZ", ACCOUNT, 7777, InvalidKeyFormatError),
("158888888888888", ACCOUNT, 7777, InvalidKeyLengthError),
("1688888888888888", ACCOUNT, 7777, None),
("23888888888888888888888", ACCOUNT, 7777, InvalidKeyLengthError),
("248888888888888888888888", ACCOUNT, 7777, None),
("3188888888888888888888888888888", ACCOUNT, 7777, InvalidKeyLengthError),
("32888888888888888888888888888888", ACCOUNT, 7777, None),
(KEY, "22", 7777, InvalidAccountLengthError),
(KEY, "ZZZ", 7777, InvalidAccountFormatError),
],
)
def test_sia_key_account_errors(self, key, account, port, error):
"""Test sia client behaviour."""
try:
SIAClient(
host="",
port=port,
accounts=[SIAAccount(account_id=account, key=key)],
function=func,
)
assert False if error else True
except Exception as exp:
assert isinstance(exp, error)
@pytest.mark.parametrize("config_file", [("tests\\unencrypted_config.json")])
def test_client(self, config_file):
"""Test the client.
Arguments:
config_file {str} -- Filename of the config.
"""
try:
with open(config_file, "r") as f:
config = json.load(f)
except: # noqa: E722
config = {"host": HOST, "port": PORT, "account_id": ACCOUNT, "key": None}
events = []
def func_append(event: SIAEvent):
events.append(event)
siac = SIAClient(
host="",
port=config["port"],
accounts=[SIAAccount(account_id=config["account_id"], key=config["key"])],
function=func_append,
)
siac.start()
tests = [
{"code": False, "crc": False, "account": False, "time": False},
{"code": True, "crc": False, "account": False, "time": False},
{"code": False, "crc": True, "account": False, "time": False},
{"code": False, "crc": False, "account": True, "time": False},
{"code": False, "crc": False, "account": False, "time": True},
]
t = threading.Thread(
target=client_program, name="test_client", args=(config, 1, tests)
)
t.daemon = True
t.start() # stops after the five events have been sent.
# run for 30 seconds
time.sleep(30)
siac.stop()
assert siac.counts == {
"events": 5,
"valid_events": 1,
"errors": {
"crc": 1,
"timestamp": 1,
"account": 1,
"code": 1,
"format": 0,
"user_code": 0,
},
}
assert len(events) == 1
| 2.109375 | 2 |
third/imagelib/06_dlib.py | gottaegbert/penter | 13 | 12788893 | <filename>third/imagelib/06_dlib.py
"""
# https://github.com/vipstone/faceai
# https://www.cnblogs.com/vipstone/p/8964656.html
下载训练模型
训练模型用于是人脸识别的关键,用于查找图片的关键点。
下载地址:http://dlib.net/files/
下载文件:shape_predictor_68_face_landmarks.dat.bz2
当然你也可以训练自己的人脸关键点模型,这个功能会放在后面讲。
下载好的模型文件,我的存放地址是:C:\Python36\Lib\site-packages\dlib-data\shape_predictor_68_face_landmarks.dat.bz2
解压:shape_predictor_68_face_landmarks.dat.bz2得到文件:shape_predictor_68_face_landmarks.dat
"""
def demo1():
import cv2
import dlib
path = "lenna.jpg"
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 人脸分类器
detector = dlib.get_frontal_face_detector()
# 获取人脸检测器
# r"E:\bigdata\ai\dlib\data\shape_predictor_68_face_landmarks.dat"
# r"E:\bigdata\ai\dlib\data\shape_predictor_5_face_landmarks.dat"
predictor = dlib.shape_predictor(
r"E:\bigdata\ai\dlib\data\shape_predictor_68_face_landmarks.dat"
)
dets = detector(gray, 1)
for face in dets:
shape = predictor(img, face) # 寻找人脸的68个标定点
# 遍历所有点,打印出其坐标,并圈出来
for pt in shape.parts():
pt_pos = (pt.x, pt.y)
cv2.circle(img, pt_pos, 2, (0, 255, 0), 1)
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 视频识别人脸
def demo2():
import cv2
import dlib
detector = dlib.get_frontal_face_detector() # 使用默认的人类识别器模型
def discern(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dets = detector(gray, 1)
for face in dets:
left = face.left()
top = face.top()
right = face.right()
bottom = face.bottom()
cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 2)
cv2.imshow("image", img)
else:
cv2.imshow("image", img)
cap = cv2.VideoCapture(r"E:\bigdata\ai\video\1.mp4")
while (1):
ret, img = cap.read()
discern(img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# cnn狗识别
def demo3():
import cv2
import dlib
cnn_face_detector = dlib.cnn_face_detection_model_v1(r"E:\bigdata\ai\dlib\data\mmod_dog_hipsterizer.dat")
for f in ["../imagelib/dog1.png"]:
# opencv 读取图片,并显示
img = cv2.imread(f, cv2.IMREAD_COLOR)
# opencv的bgr格式图片转换成rgb格式
b, g, r = cv2.split(img)
img2 = cv2.merge([r, g, b])
# 进行检测
dets = cnn_face_detector(img, 1)
# 打印检测到的人脸数
print("Number of faces detected: {}".format(len(dets)))
# 遍历返回的结果
# 返回的结果是一个mmod_rectangles对象。这个对象包含有2个成员变量:dlib.rectangle类,表示对象的位置;dlib.confidence,表示置信度。
for i, d in enumerate(dets):
face = d.rect
print(
"Detection {}: Left: {} Top: {} Right: {} Bottom: {} Confidence: {}".format(i, face.left(), face.top(),
face.right(),
d.rect.bottom(),
d.confidence))
# 在图片中标出人脸
left = face.left()
top = face.top()
right = face.right()
bottom = face.bottom()
cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 3)
cv2.namedWindow(f, cv2.WINDOW_AUTOSIZE)
cv2.imshow(f, img)
k = cv2.waitKey(0)
cv2.destroyAllWindows()
| 3.296875 | 3 |
src/run.py | iWeya/palestra-web-scraping-para-cacar-pontos-na-escola | 1 | 12788894 | import json
from requests_html import HTMLSession
from helpers import compare_trees, get_content_tree
MAIN_URL = r'http://docente.ifrn.edu.br/abrahaolopes/2017.1-integrado/2.02401.1v-poo'
def main():
session = HTMLSession()
current_tree = get_content_tree(MAIN_URL, session)
with open('storage/tree.json', 'r') as stored_tree_file:
stored_tree = json.load(stored_tree_file)
difference = compare_trees(
stored_tree,
current_tree
)
if difference:
for item in difference:
category = item['category'].upper()
category = category.rjust(8)
path = item['path']
url = item['url']
print(
f'{category} | {path}'
)
print(
f'{url}\n'
)
with open('storage/tree.json', 'w') as stored_tree_file:
stored_tree_file.write(
json.dumps(current_tree)
)
if __name__ == "__main__":
main()
| 2.984375 | 3 |
linAlgVis.py | testinggg-art/Linear_Algebra_With_Python | 1,719 | 12788895 | <filename>linAlgVis.py
import matplotlib.pyplot as plt
import numpy as np
import numpy
def linearCombo(a, b, c):
'''This function is for visualizing linear combination of standard basis in 3D.
Function syntax: linearCombo(a, b, c), where a, b, c are the scalar multiplier,
also the elements of the vector.
'''
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(projection='3d')
######################## Standard basis and Scalar Multiplid Vectors#########################
vec = np.array([[[0, 0, 0, 1, 0, 0]], # e1
[[0, 0, 0, 0, 1, 0]], # e2
[[0, 0, 0, 0, 0, 1]], # e3
[[0, 0, 0, a, 0, 0]], # a* e1
[[0, 0, 0, 0, b, 0]], # b* e2
[[0, 0, 0, 0, 0, c]], # c* e3
[[0, 0, 0, a, b, c]]]) # ae1 + be2 + ce3
colors = ['b','b','b','r','r','r','g']
for i in range(vec.shape[0]):
X, Y, Z, U, V, W = zip(*vec[i,:,:])
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False,
color = colors[i] ,arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3, alpha =.6)
#################################Plot Rectangle Boxes##############################
dlines = np.array([[[a, 0, 0],[a, b, 0]],
[[0, b, 0],[a, b, 0]],
[[0, 0, c],[a, b, c]],
[[0, 0, c],[a, 0, c]],
[[a, 0, c],[a, b, c]],
[[0, 0, c],[0, b, c]],
[[0, b, c],[a, b, c]],
[[a, 0, 0],[a, 0, c]],
[[0, b, 0],[0, b, c]],
[[a, b, 0],[a, b, c]]])
colors = ['k','k','g','k','k','k','k','k','k']
for i in range(dlines.shape[0]):
ax.plot(dlines[i,:,0], dlines[i,:,1], dlines[i,:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
#################################Annotation########################################
ax.text(x = a, y = b, z = c, s= ' $(%0.d, %0.d, %.0d)$'% (a, b, c), size = 18)
ax.text(x = a, y = 0, z = 0, s= ' $%0.d e_1 = (%0.d, 0, 0)$'% (a, a), size = 15)
ax.text(x = 0, y = b, z = 0, s= ' $%0.d e_2 = (0, %0.d, 0)$'% (b, b), size = 15)
ax.text(x = 0, y = 0, z = c, s= ' $%0.d e_3 = (0, 0, %0.d)$' %(c, c), size = 15)
#################################Axis Setting######################################
ax.grid()
ax.set_xlim([0, a+1])
ax.set_ylim([0, b+1])
ax.set_zlim([0, c+1])
ax.set_xlabel('x-axis', size = 18)
ax.set_ylabel('y-axis', size = 18)
ax.set_zlabel('z-axis', size = 18)
ax.set_title('Vector $(%0.d, %0.d, %.0d)$ Visualization' %(a, b, c), size = 20)
ax.view_init(elev=20., azim=15)
if __name__ == '__main__':
a = 7
b = 4
c = 9
linearCombo(a, b, c)
def linearComboNonStd(a, b, c, vec1, vec2, vec3):
'''This function is for visualizing linear combination of non-standard basis in 3D.
Function syntax: linearCombo(a, b, c, vec1, vec2, vec3), where a, b, c are the scalar multiplier,
ve1, vec2 and vec3 are the basis.
'''
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(projection='3d')
########################Plot basis##############################
vec1 = np.array([[0, 0, 0, vec1[0], vec1[1], vec1[2]]])
X, Y, Z, U, V, W = zip(*vec1)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'blue',arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
vec2 = np.array([[0, 0, 0, vec2[0], vec2[1], vec2[2]]])
X, Y, Z, U, V, W = zip(*vec2)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'blue',arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
vec3 = np.array([[0, 0, 0, vec3[0], vec3[1], vec3[2]]])
X, Y, Z, U, V, W = zip(*vec3)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'blue',arrow_length_ratio = .08, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
###########################Plot Scalar Muliplied Vectors####################
avec1 = a * vec1
X, Y, Z, U, V, W = zip(*avec1)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'red', alpha = .6,arrow_length_ratio = a/100, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
bvec2 = b * vec2
X, Y, Z, U, V, W = zip(*bvec2)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'red', alpha = .6,arrow_length_ratio = b/100, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
cvec3 = c * vec3
X, Y, Z, U, V, W = zip(*cvec3)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'red', alpha = .6,arrow_length_ratio = c/100, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
combo = avec1 + bvec2 + cvec3
X, Y, Z, U, V, W = zip(*combo)
ax.quiver(X, Y, Z, U, V, W, length=1, normalize=False, color = 'green', alpha = .7,arrow_length_ratio = np.linalg.norm(combo)/300, pivot = 'tail',
linestyles = 'solid',linewidths = 3)
#################################Plot Rectangle Boxes##############################
point1 = [avec1[0, 3], avec1[0, 4], avec1[0, 5]]
point2 = [avec1[0, 3]+bvec2[0, 3], avec1[0, 4]+bvec2[0, 4], avec1[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [bvec2[0, 3], bvec2[0, 4], bvec2[0, 5]]
point2 = [avec1[0, 3]+bvec2[0, 3], avec1[0, 4]+bvec2[0, 4], avec1[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [bvec2[0, 3], bvec2[0, 4], bvec2[0, 5]]
point2 = [cvec3[0, 3]+bvec2[0, 3], cvec3[0, 4]+bvec2[0, 4], cvec3[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [cvec3[0, 3], cvec3[0, 4], cvec3[0, 5]]
point2 = [cvec3[0, 3]+bvec2[0, 3], cvec3[0, 4]+bvec2[0, 4], cvec3[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [cvec3[0, 3], cvec3[0, 4], cvec3[0, 5]]
point2 = [cvec3[0, 3]+avec1[0, 3], cvec3[0, 4]+avec1[0, 4], cvec3[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
point1 = [avec1[0, 3], avec1[0, 4], avec1[0, 5]]
point2 = [cvec3[0, 3]+avec1[0, 3], cvec3[0, 4]+avec1[0, 4], cvec3[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
##
point1 = [avec1[0, 3]+bvec2[0, 3]+cvec3[0, 3],
avec1[0, 4]+bvec2[0, 4]+cvec3[0, 4],
avec1[0, 5]+bvec2[0, 5]+cvec3[0, 5]]
point2 = [cvec3[0, 3]+avec1[0, 3],
cvec3[0, 4]+avec1[0, 4],
cvec3[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
##
point1 = [avec1[0, 3]+bvec2[0, 3]+cvec3[0, 3],
avec1[0, 4]+bvec2[0, 4]+cvec3[0, 4],
avec1[0, 5]+bvec2[0, 5]+cvec3[0, 5]]
point2 = [cvec3[0, 3]+bvec2[0, 3],
cvec3[0, 4]+bvec2[0, 4],
cvec3[0, 5]+bvec2[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
##
point1 = [avec1[0, 3]+bvec2[0, 3]+cvec3[0, 3],
avec1[0, 4]+bvec2[0, 4]+cvec3[0, 4],
avec1[0, 5]+bvec2[0, 5]+cvec3[0, 5]]
point2 = [bvec2[0, 3]+avec1[0, 3],
bvec2[0, 4]+avec1[0, 4],
bvec2[0, 5]+avec1[0, 5]]
line1 = np.array([point1, point2])
ax.plot(line1[:,0], line1[:,1], line1[:,2], lw =3, ls = '--', color = 'black', alpha=0.5)
#################################Annotation########################################
ax.text(x = vec1[0,3], y = vec1[0,4], z = vec1[0,5], s= ' $v_1 =(%0.d, %0.d, %.0d)$'% (vec1[0,3], vec1[0,4], vec1[0,4]), size = 8)
ax.text(x = vec2[0,3], y = vec2[0,4], z = vec2[0,5], s= ' $v_2 =(%0.d, %0.d, %.0d)$'% (vec2[0,3], vec2[0,4], vec2[0,4]), size = 8)
ax.text(x = vec3[0,3], y = vec3[0,4], z = vec3[0,5], s= ' $v_3= (%0.d, %0.d, %.0d)$'% (vec3[0,3], vec3[0,4], vec3[0,4]), size = 8)
ax.text(x = avec1[0,3], y = avec1[0,4], z = avec1[0,5], s= ' $%.0d v_1 =(%0.d, %0.d, %.0d)$'% (a, avec1[0,3], avec1[0,4], avec1[0,4]), size = 8)
ax.text(x = bvec2[0,3], y = bvec2[0,4], z = bvec2[0,5], s= ' $%.0d v_2 =(%0.d, %0.d, %.0d)$'% (b, bvec2[0,3], bvec2[0,4], bvec2[0,4]), size = 8)
ax.text(x = cvec3[0,3], y = cvec3[0,4], z = cvec3[0,5], s= ' $%.0d v_3= (%0.d, %0.d, %.0d)$'% (c, cvec3[0,3], cvec3[0,4], cvec3[0,4]), size = 8)
# ax.text(x = 0, y = b, z = 0, s= ' $%0.d e_2 = (0, %0.d, 0)$'% (b, b), size = 15)
# ax.text(x = 0, y = 0, z = c, s= ' $%0.d e_3 = (0, 0, %0.d)$' %(c, c), size = 15)
#################################Axis Setting######################################
ax.grid()
ax.set_xlim([0, 15])
ax.set_ylim([0, 15])
ax.set_zlim([0, 15])
ax.set_xlabel('x-axis', size = 18)
ax.set_ylabel('y-axis', size = 18)
ax.set_zlabel('z-axis', size = 18)
#ax.set_title('Vector $(%0.d, %0.d, %.0d)$ Visualization' %(a, b, c), size = 20)
ax.view_init(elev=20., azim=15)
if __name__ == '__main__':
a = 2
b = 3
c = 4
vec1 = np.array([2,1,0])
vec2 = np.array([0,3,1])
vec3 = np.array([1,2,3])
linearComboNonStd(a, b, c, vec1,vec2,vec3)
| 3.765625 | 4 |
cosmoz/joysticks.py | T-K-233/arduino-python | 2 | 12788896 | <gh_stars>1-10
'''
Adapted from Xbox-360-Controller-for-Python
https://github.com/r4dian/Xbox-360-Controller-for-Python
Modified by -T.K.- Aug 2018
'''
import ctypes
import time
import sys
from operator import itemgetter, attrgetter
from itertools import count, starmap
from pyglet import event
class XINPUT_GAMEPAD(ctypes.Structure):
_fields_ = [
('buttons', ctypes.c_ushort), # wButtons
('4', ctypes.c_ubyte), # bLeftTrigger
('5', ctypes.c_ubyte), # bLeftTrigger
('0', ctypes.c_short), # sThumbLX
('1', ctypes.c_short), # sThumbLY
('2', ctypes.c_short), # sThumbRx
('3', ctypes.c_short), # sThumbRy
]
class XINPUT_STATE(ctypes.Structure):
_fields_ = [
('packet_number', ctypes.c_ulong), # dwPacketNumber
('gamepad', XINPUT_GAMEPAD), # Gamepad
]
class XINPUT_VIBRATION(ctypes.Structure):
_fields_ = [("l_motor", ctypes.c_ushort),
("r_motor", ctypes.c_ushort)]
class XINPUT_BATTERY_INFORMATION(ctypes.Structure):
_fields_ = [("BatteryType", ctypes.c_ubyte),
("BatteryLevel", ctypes.c_ubyte)]
xinput = ctypes.windll.xinput1_4
def struct_dict(struct):
'''
take a ctypes.Structure and return its field/value pairs as a dict.
>>> 'buttons' in struct_dict(XINPUT_GAMEPAD)
True
>>> struct_dict(XINPUT_GAMEPAD)['buttons'].__class__.__name__
'CField'
'''
get_pair = lambda field_type: (
field_type[0], getattr(struct, field_type[0]))
return dict(list(map(get_pair, struct._fields_)))
def get_bit_values(number, size=32):
'''
Get bit values as a list for a given number
>>> get_bit_values(1) == [0]*31 + [1]
True
>>> get_bit_values(0xDEADBEEF)
[1L, 1L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 1L]
You may override the default word size of 32-bits to match your actual
application.
>>> get_bit_values(0x3, 2)
[1L, 1L]
>>> get_bit_values(0x3, 4)
[0L, 0L, 1L, 1L]
'''
res = list(gen_bit_values(number))
res.reverse()
# 0-pad the most significant bit
res = [0] * (size - len(res)) + res
return res
def gen_bit_values(number):
'''
Return a zero or one for each bit of a numeric value up to the most significant 1 bit, beginning with the least significant bit.
'''
number = int(number)
while number:
yield number & 0x1
number >>= 1
class XBoxController(event.EventDispatcher):
'''
A stateful wrapper, using pyglet event model, that binds to one XInput device and dispatches events when states change.
'''
max_devices = 4
def __init__(self, device_number, normalize_axes=True):
self.device_number = device_number
values = vars()
del values['self']
self.__dict__.update(values)
super(XBoxController, self).__init__()
self._last_state = self.get_state()
self.axis = [0.0] * 6
self.button = [0] * 16
# Set the method that will be called to normalize the values for analog axis.
choices = [self.translate_identity, self.translate_using_data_size]
self.translate = choices[normalize_axes]
self.get_state()
def translate_using_data_size(self, value, data_size):
# normalizes analog data to [0,1] for unsigned data and [-0.5,0.5] for signed data
data_bits = 8 * data_size
return float(value) / (2 ** data_bits - 1)
def translate_identity(self, value, data_size=None):
return value
def get_state(self):
'Get the state of the controller represented by this object'
state = XINPUT_STATE()
res = xinput.XInputGetState(self.device_number, ctypes.byref(state))
if res == 0: # SUCCESS
self.state = 1
return state
if res == 1167: # DEVICE_NOT_CONNECTED
self.state = 0
else:
raise RuntimeError('Unknown error %d attempting to get state of device %d' % (res, self.device_number))
def is_connected(self):
return self._last_state is not None
def set_vibration(self, left_motor, right_motor):
'Control the speed of both motors seperately'
XInputSetState = xinput.XInputSetState
XInputSetState.argtypes = [ctypes.c_uint, ctypes.POINTER(XINPUT_VIBRATION)]
XInputSetState.restype = ctypes.c_uint
vibration = XINPUT_VIBRATION(int(left_motor * 65535), int(right_motor * 65535))
XInputSetState(self.device_number, ctypes.byref(vibration))
def get_battery_information(self):
'Get battery type & charge level'
BATTERY_DEVTYPE_GAMEPAD = 0x00
BATTERY_DEVTYPE_HEADSET = 0x01
XInputGetBatteryInformation = xinput.XInputGetBatteryInformation
XInputGetBatteryInformation.argtypes = [ctypes.c_uint, ctypes.c_ubyte, ctypes.POINTER(XINPUT_BATTERY_INFORMATION)]
XInputGetBatteryInformation.restype = ctypes.c_uint
battery = XINPUT_BATTERY_INFORMATION(0,0)
XInputGetBatteryInformation(self.device_number, BATTERY_DEVTYPE_GAMEPAD, ctypes.byref(battery))
'''
define BATTERY_TYPE_DISCONNECTED 0x00
define BATTERY_TYPE_WIRED 0x01
define BATTERY_TYPE_ALKALINE 0x02
define BATTERY_TYPE_NIMH 0x03
define BATTERY_TYPE_UNKNOWN 0xFF
define BATTERY_LEVEL_EMPTY 0x00
define BATTERY_LEVEL_LOW 0x01
define BATTERY_LEVEL_MEDIUM 0x02
define BATTERY_LEVEL_FULL 0x03
'''
batt_type = 'Unknown' if battery.BatteryType == 0xFF else ['Disconnected', 'Wired', 'Alkaline', 'Nimh'][battery.BatteryType]
level = ['Empty', 'Low', 'Medium', 'Full'][battery.BatteryLevel]
return batt_type, level
def handle_changed_state(self, state):
'Dispatch various events as a result of the state changing'
self.dispatch_event('on_state_changed', state)
self.dispatch_axis_events(state)
self.dispatch_button_events(state)
def dispatch_axis_events(self, state):
axis_fields = dict(XINPUT_GAMEPAD._fields_)
axis_fields.pop('buttons')
for axis, type in list(axis_fields.items()):
old_val = getattr(self._last_state.gamepad, axis)
new_val = getattr(state.gamepad, axis)
data_size = ctypes.sizeof(type)
old_val = self.translate(old_val, data_size)
new_val = self.translate(new_val, data_size)
# an attempt to add deadzones and dampen noise
if ((old_val!=new_val and (new_val>0.08000000000000000 or new_val<-0.08000000000000000) and abs(old_val-new_val) > 0.00000000500000000) or (axis=='4' or axis=='5') and new_val==0 and abs(old_val-new_val) > 0.00000000500000000):
self.dispatch_event('on_axis', axis, new_val)
def dispatch_button_events(self, state):
changed = state.gamepad.buttons ^ self._last_state.gamepad.buttons
changed = get_bit_values(changed, 16)
buttons_state = get_bit_values(state.gamepad.buttons, 16)
changed.reverse()
buttons_state.reverse()
button_numbers = count(1)
changed_buttons = list(filter(itemgetter(0), list(zip(changed, button_numbers, buttons_state))))
tuple(starmap(self.dispatch_button_event, changed_buttons))
def dispatch_button_event(self, changed, number, pressed):
self.dispatch_event('on_button', number-1, pressed) # -1 to restore index to 0
def on_axis(self, axis, value):
self.axis[int(axis)] = value * 2
def on_button(self, button, pressed):
self.button[button] = pressed
def refresh(self):
state = self.get_state()
try:
if state.packet_number != self._last_state.packet_number:
self.handle_changed_state(state)
except:
pass
self._last_state = state
@staticmethod
def init_all():
devices = list(map(XBoxController, list(range(XBoxController.max_devices))))
devices = [d for d in devices if d.is_connected()]
print('%d joysticks found.' % len(devices))
return devices
list(map(XBoxController.register_event_type, [
'on_state_changed',
'on_axis',
'on_button',
]))
import keyboard
class Keyboard:
def __init__(self, suppress=False):
self.keys = []
self.state = 1
self._suppress = suppress
def _process_keys(self, e):
if e.event_type == 'down':
e = e.scan_code
if e not in self.keys:
self.keys.append(e)
elif e.event_type == 'up':
e = e.scan_code
if e in self.keys:
self.keys.remove(e)
def key(self, key):
if type(key) == int:
return key in self.keys
def refresh(self):
keyboard.hook(self._process_keys, suppress=self._suppress)
| 2.5625 | 3 |
nn/neuron.py | prabhatnagarajan/rl_lib | 1 | 12788897 | <gh_stars>1-10
from activation import *
import numpy as np
from pdb import set_trace
class Neuron:
# takes in initialized weights with the final term
def __init__(self, init_weights, init_bias, activation=Activation.sigmoid):
self.weights = init_weights
self.bias = init_bias
obj = Activation()
self.activation = obj.sigmoid
def apply(self, input):
return self.activation(np.dot(input, self.weights) + self.bias)
| 3.09375 | 3 |
app/models/keyword.py | Simple2B/twitter-bot | 0 | 12788898 | <reponame>Simple2B/twitter-bot<filename>app/models/keyword.py<gh_stars>0
from app import db
from app.models.utils import ModelMixin
class Keyword(db.Model, ModelMixin):
__tablename__ = 'keywords'
id = db.Column(db.Integer, primary_key=True)
word = db.Column(db.String(60), unique=True, nullable=False)
| 1.96875 | 2 |
src/upbgui/frame_button.py | SIGSEGV-666/UPBGUI | 1 | 12788899 | from .widget import Widget, BGUI_DEFAULT, BGUI_NO_THEME, BGUI_CENTERED
from .frame import Frame
from .label import Label
FBSTYLE_CLASSIC = 0
FBSTYLE_SOLID = 1
class FrameButton(Widget):
"""A clickable frame-based button."""
theme_section = 'FrameButton'
theme_options = {
'Color': (0.4, 0.4, 0.4, 1),
'BorderSize': 1,
'BorderColor': (0, 0, 0, 1),
'LabelSubTheme': '',
'SolidBaseColor': (0.4, 0.4, 0.4, 1.0),
'SolidHoverColor': (0.6, 0.6, 0.6, 1.0),
'SolidClickColor': (0.9, 0.9, 0.9, 1.0)
}
def __init__(self, parent, name=None, base_color=None, text="", font=None,
pt_size=None, aspect=None, size=[1, 1], pos=[0, 0], sub_theme='', text_color=None, border_size=None, border_color=None, style=FBSTYLE_CLASSIC, hover_color=None, click_color=None, options=BGUI_DEFAULT):
"""
:param parent: the widget's parent
:param name: the name of the widget
:param base_color: the color of the button
:param text: the text to display (this can be changed later via the text property)
:param font: the font to use
:param pt_size: the point size of the text to draw (defaults to 30 if None)
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
self.style = style
self.frame = Frame(self, size=[1, 1], pos=[0, 0], options=BGUI_NO_THEME)
self.label = Label(self, text=text, font=font, pt_size=pt_size, pos=[0, 0], sub_theme=self.theme['LabelSubTheme'], options=BGUI_DEFAULT | BGUI_CENTERED)
if self.style == FBSTYLE_SOLID:
self.solid_basecolor = (self.theme['SolidBaseColor'] if not base_color else (base_color))
self.solid_hovercolor = (self.theme['SolidHoverColor'] if not hover_color else (hover_color))
self.solid_clickcolor = (self.theme['SolidClickColor'] if not click_color else (click_color))
if not base_color:
base_color = self.theme['Color']
self.base_color = base_color
self.frame.border = (border_size if border_size is not None else self.theme['BorderSize'])
self.frame.border_color = (border_color if border_color else self.theme['BorderColor'])
self.light = [
self.base_color[0] + 0.15,
self.base_color[1] + 0.15,
self.base_color[2] + 0.15,
self.base_color[3]]
self.dark = [
self.base_color[0] - 0.15,
self.base_color[1] - 0.15,
self.base_color[2] - 0.15,
self.base_color[3]]
if text_color:
self.label.color = text_color
if self.style == FBSTYLE_CLASSIC:
self.frame.colors = (self.dark, self.dark, self.light, self.light)
elif self.style == FBSTYLE_SOLID:
self.frame.colors = (self.solid_basecolor,)*4
@property
def text(self):
return self.label.text
@text.setter
def text(self, value):
self.label.text = value
@property
def color(self):
if self.style == FBSTYLE_CLASSIC:
return self.base_color
elif self.style == FBSTYLE_SOLID:
return self.solid_basecolor
@color.setter
def color(self, value):
if self.style == FBSTYLE_CLASSIC:
self.base_color = value
self.light = (
self.base_color[0] + 0.15,
self.base_color[1] + 0.15,
self.base_color[2] + 0.15,
self.base_color[3])
self.dark = (
self.base_color[0] - 0.15,
self.base_color[1] - 0.15,
self.base_color[2] - 0.15,
self.base_color[3])
self.frame.colors = (self.dark, self.dark, self.light, self.light)
elif self.style == FBSTYLE_SOLID:
self.solid_basecolor = value
def _handle_hover(self):
if self.style == FBSTYLE_CLASSIC:
light = self.light[:]
dark = self.dark[:]
# Lighten button when hovered over.
for n in range(3):
light[n] += .1
dark[n] += .1
self.frame.colors = (dark, dark, light, light)
elif self.style == FBSTYLE_SOLID:
self.frame.colors = (self.solid_hovercolor,)*4
def _handle_active(self):
if self.style == FBSTYLE_CLASSIC:
light = self.light[:]
dark = self.dark[:]
# Darken button when clicked.
for n in range(3):
light[n] -= .1
dark[n] -= .1
self.frame.colors = (light, light, dark, dark)
elif self.style == FBSTYLE_SOLID:
self.frame.colors = (self.solid_clickcolor,)*4
def _draw(self):
"""Draw the button"""
# Draw the children before drawing an additional outline
Widget._draw(self)
# Reset the button's color
if self.style == FBSTYLE_CLASSIC:
self.frame.colors = (self.dark, self.dark, self.light, self.light)
elif self.style == FBSTYLE_SOLID:
self.frame.colors = (self.solid_basecolor,)*4
| 3.09375 | 3 |
pybites_tools/zen.py | Timfrazer/pybites-tools | 0 | 12788900 | import sys
from io import StringIO
def zen_of_python() -> list[str]:
"""
Dump the Zen of Python into a variable
https://stackoverflow.com/a/23794519
"""
zen = StringIO()
old_stdout = sys.stdout
sys.stdout = zen
import this # noqa F401
sys.stdout = old_stdout
return zen.getvalue().splitlines()
def main():
import pyperclip
zen = "\n".join(zen_of_python())
pyperclip.copy(zen)
print("The Zen of Python has been copied to your clipboard")
if __name__ == "__main__":
main()
| 3.28125 | 3 |
betfair/exceptions.py | mkapuza/betfair.py | 94 | 12788901 | # -*- coding: utf-8 -*-
class BetfairError(Exception):
pass
class NotLoggedIn(BetfairError):
pass
class LoginError(BetfairError):
def __init__(self, response, data):
self.response = response
self.status_code = response.status_code
self.message = data.get('loginStatus', 'UNKNOWN')
super(LoginError, self).__init__(self.message)
class AuthError(BetfairError):
def __init__(self, response, data):
self.response = response
self.status_code = response.status_code
self.message = data.get('error', 'UNKNOWN')
super(AuthError, self).__init__(self.message)
class ApiError(BetfairError):
def __init__(self, response, data):
self.response = response
self.status_code = response.status_code
try:
error_data = data['error']['data']['APINGException']
self.message = error_data.get('errorCode', 'UNKNOWN')
self.details = error_data.get('errorDetails')
except KeyError:
self.message = 'UNKNOWN'
self.details = None
super(ApiError, self).__init__(self.message)
| 2.5625 | 3 |
test/test_weight_calculator.py | canvas-gamification/canvas-weight-calculator | 0 | 12788902 | import unittest
from exceptions import RangeValidationException
from weight_calculator import calculate_weights, validate_rages, validate_grades_and_ranges
class WeightCalculatorTest(unittest.TestCase):
def range_validation_tests(self):
self.assertFalse(validate_rages({
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [0, 5]
}))
self.assertFalse(validate_rages({
"Final": [50, 60],
"Midterms": [50, 60],
"Assignments": [10, 15]
}))
self.assertFalse(validate_rages({
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [15, 10]
}))
self.assertTrue(validate_rages({
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [5, 15]
}))
def range_and_weights_validation_tests(self):
self.assertFalse(validate_grades_and_ranges(
{
"Final": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [15, 10]
}))
self.assertFalse(validate_grades_and_ranges(
{
"Finals": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [10, 15]
}))
self.assertFalse(validate_grades_and_ranges(
{
"Final": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Finals": [50, 60],
"Midterms": [25, 30],
"Assignments": [10, 15]
}))
self.assertTrue(validate_grades_and_ranges(
{
"Final": [100, 50],
"Midterms": [50, 100],
"Assignments": [100, 0, 100]
},
{
"Final": [50, 60],
"Midterms": [25, 30],
"Assignments": [10, 15]
}))
def test(self):
ranges = {
"Final": [50, 60],
"Midterms": [30, 40],
"Assignments": [5, 30],
}
bad_ranges = {
"Finals": [50, 60],
"Midterms": [30, 40],
"Assignments": [5, 30],
}
grades = {
"Final": [100, 100, 99],
"Midterms": [50, 60],
"Assignments": [0, 0, 2, 5, 10]
}
weights = calculate_weights(grades, ranges)
self.assertEqual(weights, {
"Final": 60,
"Midterms": 35,
"Assignments": 5,
})
self.assertRaises(RangeValidationException, calculate_weights, bad_ranges, ranges)
| 3.015625 | 3 |
torchnmf/nmf.py | akashpalrecha/pytorch-NMF | 0 | 12788903 | import torch
import torch.nn.functional as F
from torch.nn import Parameter
from .metrics import Beta_divergence
from .base import Base
from tqdm import tqdm
def _mu_update(param, pos, gamma, l1_reg, l2_reg, constant_rows=None):
if param.grad is None:
return
# prevent negative term, very likely to happen with kl divergence
multiplier:torch.Tensor = F.relu(pos - param.grad, inplace=True)
if l1_reg > 0:
pos.add_(l1_reg)
if l2_reg > 0:
if pos.shape != param.shape:
pos = pos + l2_reg * param
else:
pos.add_(l2_reg * param)
multiplier.div_(pos)
if gamma != 1:
multiplier.pow_(gamma)
# Fill the first `constant_rows` of the multiplier with 1s
# to leave them unchanged
if constant_rows is not None:
multiplier[:constant_rows,:].fill_(1.0)
param.mul_(multiplier)
class _NMF(Base):
def __init__(self, W_size, H_size, rank):
super().__init__()
self.rank = rank
self.W = Parameter(torch.rand(*W_size).double())
self.H = Parameter(torch.rand(*H_size).double())
def forward(self, H=None, W=None):
if H is None:
H = self.H
if W is None:
W = self.W
return self.reconstruct(H, W)
def reconstruct(self, H, W):
raise NotImplementedError
def get_W_positive(self, WH, beta, H_sum) -> (torch.Tensor, None or torch.Tensor):
raise NotImplementedError
def get_H_positive(self, WH, beta, W_sum) -> (torch.Tensor, None or torch.Tensor):
raise NotImplementedError
def fit(self,
V,
W=None,
H=None,
fix_h_rows=None,
update_W=True,
update_H=True,
update_H_after_iter=None,
beta=1,
tol=1e-5,
min_loss=None,
max_iter=200,
min_iter=20,
verbose=0,
initial='random',
alpha=0,
l1_ratio=0,
lower_thresh=1e-8,
):
self.fix_neg.value = lower_thresh
V = self.fix_neg(V)
if W is None:
pass # will do special initialization in thre future
else:
self.W.data.copy_(W)
self.W.requires_grad = update_W
if H is None:
pass
else:
self.H.data.copy_(H)
self.H.requires_grad = update_H
if update_H_after_iter is None:
update_H_after_iter = max_iter
if beta < 1:
gamma = 1 / (2 - beta)
elif beta > 2:
gamma = 1 / (beta - 1)
else:
gamma = 1
l1_reg = alpha * l1_ratio
l2_reg = alpha * (1 - l1_ratio)
loss_scale = torch.prod(torch.tensor(V.shape)).float()
H_sum, W_sum = None, None
with tqdm(total=max_iter, disable=not verbose) as pbar:
for n_iter in range(max_iter):
if n_iter >= update_H_after_iter:
update_H = True
self.H.requires_grad = True
if self.W.requires_grad:
self.zero_grad()
WH = self.reconstruct(self.H.detach(), self.W)
loss = Beta_divergence(self.fix_neg(WH), V, beta)
loss.backward()
with torch.no_grad():
positive_comps, H_sum = self.get_W_positive(WH, beta, H_sum)
_mu_update(self.W, positive_comps, gamma, l1_reg, l2_reg)
W_sum = None
if self.H.requires_grad:
self.zero_grad()
WH = self.reconstruct(self.H, self.W.detach())
loss = Beta_divergence(self.fix_neg(WH), V, beta)
loss.backward()
with torch.no_grad():
positive_comps, W_sum = self.get_H_positive(WH, beta, W_sum)
_mu_update(self.H, positive_comps, gamma, l1_reg, l2_reg, fix_h_rows)
H_sum = None
loss = loss.div_(loss_scale).item()
pbar.set_postfix(loss=loss)
# pbar.set_description('Beta loss=%.4f' % error)
pbar.update()
if not n_iter:
loss_init = loss
elif (previous_loss - loss) / loss_init < tol and n_iter >= min_iter:
if min_loss is not None and loss > min_loss: pass
else: break
previous_loss = loss
return n_iter
def fit_transform(self, *args, **kwargs):
n_iter = self.fit(*args, **kwargs)
return n_iter, self.forward()
class NMF(_NMF):
def __init__(self, Vshape, rank=None):
self.K, self.M = Vshape
if not rank:
rank = self.K
super().__init__((self.K, rank), (rank, self.M), rank)
def reconstruct(self, H, W):
return W @ H
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum(1)
denominator = H_sum[None, :]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WHHt = WH @ H.t()
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum(0) # shape(n_components, )
denominator = W_sum[:, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WtWH = W.t() @ WH
denominator = WtWH
return denominator, W_sum
def sort(self):
_, maxidx = self.W.data.max(0)
_, idx = maxidx.sort()
self.W.data = self.W.data[:, idx]
self.H.data = self.H.data[idx]
class NMFD(_NMF):
def __init__(self, Vshape, T=1, rank=None):
self.K, self.M = Vshape
if not rank:
rank = self.K
self.pad_size = T - 1
super().__init__((self.K, rank, T), (rank, self.M - T + 1), rank)
def reconstruct(self, H, W):
return F.conv1d(H[None, :], W.flip(2), padding=self.pad_size)[0]
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum(1)
denominator = H_sum[None, :, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WHHt = F.conv1d(WH[:, None], H[:, None])
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2))
denominator = W_sum[:, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WtWH = F.conv1d(WH[None, :], W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
def sort(self):
_, maxidx = self.W.data.sum(2).max(0)
_, idx = maxidx.sort()
self.W.data = self.W.data[:, idx]
self.H.data = self.H.data[idx]
class NMF2D(_NMF):
def __init__(self, Vshape, win=1, rank=None):
try:
F, T = win
except:
F = T = win
if len(Vshape) == 3:
self.channel, self.K, self.M = Vshape
else:
self.K, self.M = Vshape
self.channel = 1
self.pad_size = (F - 1, T - 1)
super().__init__((self.channel, rank, F, T), (rank, self.K - F + 1, self.M - T + 1), rank)
def reconstruct(self, H, W):
out = F.conv2d(H[None, ...], W.flip((2, 3)), padding=self.pad_size)[0]
if self.channel == 1:
return out[0]
return out
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum((1, 2))
denominator = H_sum[None, :, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(self.channel, 1, self.K, self.M)
WHHt = F.conv2d(WH, H[:, None])
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2, 3))
denominator = W_sum[:, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(1, self.channel, self.K, self.M)
WtWH = F.conv2d(WH, W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
def sort(self):
raise NotImplementedError
class NMF3D(_NMF):
def __init__(self, Vshape: tuple, rank: int = None, win=1):
try:
T, H, W = win
except:
T = H = W = win
if len(Vshape) == 4:
self.channel, self.N, self.K, self.M = Vshape
else:
self.N, self.K, self.M = Vshape
self.channel = 1
self.pad_size = (T - 1, H - 1, W - 1)
if not rank:
rank = self.K
super().__init__((self.channel, rank, T, H, W), (rank, self.N - T + 1, self.K - H + 1, self.M - W + 1), rank)
def reconstruct(self, H, W):
out = F.conv3d(H[None, ...], W.flip((2, 3, 4)), padding=self.pad_size)[0]
if self.channel == 1:
return out[0]
return out
def get_W_positive(self, WH, beta, H_sum):
H = self.H
if beta == 1:
if H_sum is None:
H_sum = H.sum((1, 2, 3))
denominator = H_sum[None, :, None, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(self.channel, 1, self.N, self.K, self.M)
WHHt = F.conv3d(WH, H[:, None])
denominator = WHHt
return denominator, H_sum
def get_H_positive(self, WH, beta, W_sum):
W = self.W
if beta == 1:
if W_sum is None:
W_sum = W.sum((0, 2, 3, 4))
denominator = W_sum[:, None, None, None]
else:
if beta != 2:
WH = WH.pow(beta - 1)
WH = WH.view(1, self.channel, self.N, self.K, self.M)
WtWH = F.conv3d(WH, W.transpose(0, 1))[0]
denominator = WtWH
return denominator, W_sum
def sort(self):
raise NotImplementedError
| 2.34375 | 2 |
src/utils.py | yoichi1484/sake_embedding | 2 | 12788904 | from gensim.models import KeyedVectors
import pprint
import json
PATH_DATA = '../data/sake_dataset_v1.json'
def preprocessing(sake_data):
return sake_data.strip().replace(' ', '_')
def fix_data(data):
fixed_data = []
for k, v in sorted(data.items(), key=lambda x:x[0]):
if 'mean' in v:
fixed_data.append('{}:{}'.format(k, v['mean']))
elif type(v) == list:
for _v in v:
_v = preprocessing(_v)
fixed_data.append('{}:{}'.format(k, _v))
else:
v = preprocessing(v)
fixed_data.append('{}:{}'.format(k, v))
return fixed_data
def load_dataset(path = PATH_DATA):
with open(path) as f:
dataset = json.load(f)
return dataset
def load_sake_embedding(path):
return KeyedVectors.load_word2vec_format(path)
class SearchAPI():
def __init__(self, path = PATH_DATA):
self.dataset = load_dataset(path)['dataset']
def and_search(self, *args):
""" This function returns sake data that contain the queries
Args:
queries
Return:
data (list) that contain the queries
Example:
>>> api = SearchAPI()
>>> results = api.and_search("brand:英勲", "rice:祝")
>>> pprint.pprint(results[0], width=40)
{'alcohol_rate': {'max': '15.00', 'mean': '15.00', 'min': '15.00'},
'amino_acid_content': {'max': '', 'mean': '', 'min': ''},
'brand': '英勲',
...
}
"""
result = self.dataset
for query in args:
result = self._filtering(query, result)
return result
def _filtering(self, query, dataset):
return [d for d in dataset if query in fix_data(d)]
| 2.921875 | 3 |
visigoth/stimuli/elementarray.py | mwaskom/visigoth | 2 | 12788905 | """Psychopy ElementArrayStim with flexible pedestal luminance.
Psychopy authors have said on record that this functionality should exist in
Psychopy itself. Future users of this code should double check as to whether
that has been implemented and if this code can be excised.
Note however that we have also added some functinoality to set the contrast in
a way that depends on the pedestal, which may not get added.
This module is adapted from a similar extension to GratingStim
Original credit to https://github.com/nwilming/PedestalGrating/
Covered under the PsychoPy license, as it is a simple extension of prior code:
Copyright (C) 2015 <NAME>
Distributed under the terms of the GNU General Public License (GPL).
"""
from __future__ import division
import pyglet
pyglet.options['debug_gl'] = False
import ctypes # noqa: 402
GL = pyglet.gl
from psychopy.visual.elementarray import ElementArrayStim # noqa: 402
from psychopy.visual.basevisual import MinimalStim, TextureMixin # noqa: 402
try:
from psychopy.visual import shaders
except ImportError:
from psychopy import _shadersPyglet as shaders
# Framgent shader for the gabor stimulus. This is needed to add the pedestal to
# the color values for each location. I'm keeping it in this file to make the
# stimulus fairly self contained and to avoid messing with anything else.
# Almost a one to one copy of the original psychopy shader.
fragSignedColorTexMask = '''
uniform sampler2D texture, mask;
uniform float pedestal;
void main() {
vec4 textureFrag = texture2D(texture,gl_TexCoord[0].st);
vec4 maskFrag = texture2D(mask,gl_TexCoord[1].st);
gl_FragColor.a = gl_Color.a*maskFrag.a*textureFrag.a;
gl_FragColor.rgb = ((pedestal+1.0)/2.0)
+ ((textureFrag.rgb
* (gl_Color.rgb*2.0-1.0)+1.0)/2.0) -0.5;
}
'''
class ElementArray(ElementArrayStim, MinimalStim, TextureMixin):
"""Field of elements that are independently controlled and rapidly drawn.
This stimulus class defines a field of elements whose behaviour can be
independently controlled. Suitable for creating 'global form' stimuli or
more detailed random dot stimuli.
This stimulus can draw thousands of elements without dropping a frame, but
in order to achieve this performance, uses several OpenGL extensions only
available on modern graphics cards (supporting OpenGL2.0). See the
ElementArray demo.
"""
def __init__(self,
win,
units=None,
fieldPos=(0.0, 0.0),
fieldSize=(1.0, 1.0),
fieldShape='circle',
nElements=100,
sizes=2.0,
xys=None,
rgbs=None,
colors=(1.0, 1.0, 1.0),
colorSpace='rgb',
opacities=None,
depths=0,
fieldDepth=0,
oris=0,
sfs=1.0,
contrs=1,
phases=0,
elementTex='sin',
elementMask='gauss',
texRes=48,
interpolate=True,
name=None,
autoLog=False,
maskParams=None,
pedestal=None):
super(ElementArray, self).__init__(
win, units=units, fieldPos=fieldPos, fieldSize=fieldSize,
fieldShape=fieldShape, nElements=nElements, sizes=sizes, xys=xys,
rgbs=rgbs, colors=colors, colorSpace=colorSpace,
opacities=opacities, depths=depths, fieldDepth=fieldDepth,
oris=oris, sfs=sfs, contrs=contrs, phases=phases,
elementTex=elementTex, elementMask=elementMask, texRes=texRes,
interpolate=interpolate, name=name, autoLog=autoLog,
maskParams=maskParams)
# Set the default pedestal assuming a gray window color
pedestal = win.background_color if pedestal is None else pedestal
self.pedestal = pedestal
self._progSignedTexMask = shaders.compileProgram(
shaders.vertSimple, fragSignedColorTexMask)
@property
def pedestal_contrs(self):
"""Stimulus contrast, accounting for pedestal"""
return self.contrs / (self.pedestal + 1)
@pedestal_contrs.setter
def pedestal_contrs(self, values):
"""Stimulus contrast, accounting for pedestal."""
adjusted_values = values * (self.pedestal + 1)
self.contrs = adjusted_values
def draw(self, win=None):
"""Draw the stimulus in its relevant window.
You must call this method after every win.update() if you want the
stimulus to appear on that frame and then update the screen again.
"""
if win is None:
win = self.win
self._selectWindow(win)
if self._needVertexUpdate:
self._updateVertices()
if self._needColorUpdate:
self.updateElementColors()
if self._needTexCoordUpdate:
self.updateTextureCoords()
# scale the drawing frame and get to centre of field
GL.glPushMatrix() # push before drawing, pop after
# push the data for client attributes
GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
# GL.glLoadIdentity()
self.win.setScale('pix')
cpcd = ctypes.POINTER(ctypes.c_double)
GL.glColorPointer(4, GL.GL_DOUBLE, 0,
self._RGBAs.ctypes.data_as(cpcd))
GL.glVertexPointer(3, GL.GL_DOUBLE, 0,
self.verticesPix.ctypes.data_as(cpcd))
# setup the shaderprogram
_prog = self._progSignedTexMask
GL.glUseProgram(_prog)
# set the texture to be texture unit 0
GL.glUniform1i(GL.glGetUniformLocation(_prog, b"texture"), 0)
# mask is texture unit 1
GL.glUniform1i(GL.glGetUniformLocation(_prog, b"mask"), 1)
# BEGIN ADDED CODE
GL.glUniform1f(GL.glGetUniformLocation(_prog, b"pedestal"), self.pedestal)
# END ADDED CODE
# bind textures
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._maskID)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
GL.glEnable(GL.GL_TEXTURE_2D)
# setup client texture coordinates first
GL.glClientActiveTexture(GL.GL_TEXTURE0)
GL.glTexCoordPointer(2, GL.GL_DOUBLE, 0, self._texCoords.ctypes)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glClientActiveTexture(GL.GL_TEXTURE1)
GL.glTexCoordPointer(2, GL.GL_DOUBLE, 0, self._maskCoords.ctypes)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glDrawArrays(GL.GL_QUADS, 0, self.verticesPix.shape[0] * 4)
# unbind the textures
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glDisable(GL.GL_TEXTURE_2D)
# main texture
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glDisable(GL.GL_TEXTURE_2D)
# disable states
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glUseProgram(0)
GL.glPopClientAttrib()
GL.glPopMatrix() | 2.046875 | 2 |
ansible-simple-exports-demo.py | j-sims/isilon-ansible-demos | 0 | 12788906 | #!/usr/bin/env python
import json
import yaml
sharesFilename = 'simple-exports.json'
with open(sharesFilename, 'r') as f:
shares = json.load(f)
### For Loop to write out playbook for each cluster
for cluster in shares['clusters']:
playbookFilename = 'playbook-simple-exports-%s.yml' % cluster['name']
with open(playbookFilename, 'w') as playbook:
play = [
{
'hosts': 'localhost',
'name': 'Isilon New NFS Export with URI module',
'tasks': [],
}
]
startsession = {
'name': 'get isilon API session IDs',
'register': 'results_login',
'uri': {
'body': {'password': cluster['password'],
'services': ['platform', 'namespace'],
'username': cluster['username']},
'body_format': 'json',
'method': 'POST',
'status_code': 201,
'url': 'https://' + cluster['name'] +':8080/session/1/session',
'validate_certs': False }
}
play[0]['tasks'].append(startsession)
for export in cluster['exports']:
createexport = {
'name': 'make NFS Export',
'uri': {
'body': {
'description': export['description'],
'paths': export['paths'],
'zone': export['zone']},
'body_format': 'json',
'headers': {'Cookie': 'isisessid={{ results_login.cookies.isisessid }}',
'X-CSRF-Token': '{{ results_login.cookies.isicsrf }}',
'referer': 'https://'+cluster['name']+':8080'},
'method': 'POST',
'status_code': 201,
'url': 'https://'+cluster['name']+':8080/platform/4/protocols/nfs/exports',
'validate_certs': False,
}
}
play[0]['tasks'].append(createexport)
endsession = {
'name': 'Delete isilon API session IDs',
'register': 'results_DEL_cookie',
'uri': {
'headers': {
'Cookie': 'isisessid={{ results_login.cookies.isisessid }}',
'X-CSRF-Token': '{{ results_login.cookies.isicsrf }}',
'referer': 'https://'+cluster['name']+':8080',
},
'method': 'DELETE',
'status_code': 204,
'url': 'https://'+cluster['name']+':8080/session/1/session',
'validate_certs': False,
}
}
play[0]['tasks'].append(endsession)
yaml.safe_dump(play, playbook, default_flow_style=False)
| 2.109375 | 2 |
app.py | eocode/Queens | 0 | 12788907 | <gh_stars>0
"""
Start app
"""
from app import queen
if __name__ == "__main__":
"""Main function for run application"""
queen.run()
| 1.46875 | 1 |
core/utils/k8s.py | kubesys/kubevm | 0 | 12788908 | <reponame>kubesys/kubevm
import socket
import time
import traceback
import operator
from json import dumps
import os, sys
from sys import exit
from kubernetes import client, config
from kubernetes.client import V1DeleteOptions
from kubernetes.client.rest import ApiException
import logging
import logging.handlers
try:
from utils import constants
from utils.exception import BadRequest
except:
import constants
from exception import BadRequest
TOKEN = constants.KUBERNETES_TOKEN_FILE
VM_PLURAL = constants.KUBERNETES_PLURAL_VM
VMP_PLURAL = constants.KUBERNETES_PLURAL_VMP
VMD_PLURAL = constants.KUBERNETES_PLURAL_VMD
VMDI_PLURAL = constants.KUBERNETES_PLURAL_VMDI
VMDSN_PLURAL = constants.KUBERNETES_PLURAL_VMDSN
VM_KIND = constants.KUBERNETES_KIND_VM
VMP_KIND = constants.KUBERNETES_KIND_VMP
VMD_KIND = constants.KUBERNETES_KIND_VMD
VMDI_KIND = constants.KUBERNETES_KIND_VMDI
VMDSN_KIND = constants.KUBERNETES_KIND_VMDSN
VERSION = constants.KUBERNETES_API_VERSION
GROUP = constants.KUBERNETES_GROUP
config.load_kube_config(config_file=TOKEN)
LOG = '/var/log/virtctl.log'
RETRY_TIMES = 15
def set_logger(header, fn):
logger = logging.getLogger(header)
handler1 = logging.StreamHandler()
handler2 = logging.handlers.RotatingFileHandler(filename=fn, maxBytes=10000000, backupCount=10)
logger.setLevel(logging.DEBUG)
handler1.setLevel(logging.ERROR)
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(lineno)s %(levelname)s %(message)s")
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
k8s_logger = set_logger(os.path.basename(__file__), LOG)
resources = {}
kind_plural = {VM_KIND:VM_PLURAL, VMP_KIND:VMP_PLURAL, VMD_KIND:VMD_PLURAL,
VMDI_KIND:VMDI_PLURAL, VMDSN_KIND:VMDSN_PLURAL}
for kind,plural in kind_plural.items():
resource = {}
resource['version'] = VERSION
resource['group'] = GROUP
resource['plural'] = plural
resources[kind] = resource
def get(name, kind):
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[kind]['group'],
version=resources[kind]['version'],
namespace='default',
plural=resources[kind]['plural'],
name=name)
return jsondict
def create(name, data, kind):
hostname = get_hostname_in_lower_case()
jsondict = {'spec': {'volume': {}, 'nodeName': hostname, 'status': {}},
'kind': kind, 'metadata': {'labels': {'host': hostname}, 'name': name},
'apiVersion': '%s/%s' % (resources[kind]['group'], resources[kind]['version'])}
jsondict = updateJsonRemoveLifecycle(jsondict, data)
body = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
return client.CustomObjectsApi().create_namespaced_custom_object(
group=resources[kind]['group'], version=resources[kind]['version'], namespace='default',
plural=resources[kind]['plural'], body=body)
def update(name, data, kind):
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[kind]['group'], version=resources[kind]['version'], namespace='default',
plural=resources[kind]['plural'], name=name, body=data)
def delete(name, data, kind):
k8s_logger.debug('deleteVMBackupdebug %s' % name)
return client.CustomObjectsApi().delete_namespaced_custom_object(
group=resources[kind]['group'], version=resources[kind]['version'], namespace='default',
plural=resources[kind]['plural'], name=name, body=data)
def addPowerStatusMessage(jsondict, reason, message):
if jsondict:
status = {'conditions': {'state': {'waiting': {'message': message, 'reason': reason}}}}
spec = get_spec(jsondict)
if spec:
spec['status'] = status
return jsondict
def get_spec(jsondict):
spec = jsondict.get('spec')
if not spec:
raw_object = jsondict.get('raw_object')
if raw_object:
spec = raw_object.get('spec')
return spec
def deleteLifecycleInJson(jsondict):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
del spec['lifecycle']
return jsondict
def updateJsonRemoveLifecycle(jsondict, body):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
del spec['lifecycle']
spec.update(body)
return jsondict
def hasLifeCycle(jsondict):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
return True
return False
def removeLifecycle(jsondict):
if jsondict:
spec = get_spec(jsondict)
if spec:
lifecycle = spec.get('lifecycle')
if lifecycle:
del spec['lifecycle']
return jsondict
def get_hostname_in_lower_case():
return 'vm.%s' % socket.gethostname().lower()
def changeNode(jsondict, newNodeName):
if jsondict:
jsondict['metadata']['labels']['host'] = newNodeName
spec = get_spec(jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
spec['nodeName'] = newNodeName
return jsondict
def replaceData(jsondict):
all_kind = {'VirtualMachine': 'domain',
'VirtualMachinePool': 'pool',
'VirtualMachineDisk': 'volume',
'VirtualMachineDiskImage': 'volume',
'VirtualMachineDiskSnapshot': 'volume',
'VirtualMachineBackup': 'backup'}
mkind = jsondict['kind']
mn = jsondict['metadata']['name']
k8s = K8sHelper(mkind)
current = k8s.get(mn)
host = jsondict['metadata']['labels']['host']
# nodename = jsondicts[i]['metadata']['labels']['host']
changeNode(current, host)
if jsondict:
key = all_kind[mkind]
if 'spec' in jsondict.keys() and isinstance(jsondict['spec'], dict) and key in jsondict['spec'].keys():
data = jsondict['spec'][key]
if current:
current['spec'][key] = data
return current
def get_node_name(jsondict):
if jsondict:
return jsondict['metadata']['labels']['host']
return None
def list_node():
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
jsondict = client.CoreV1Api().list_node().to_dict()
return jsondict
except ApiException as e:
if e.reason == 'Not Found':
return False
else:
time.sleep(3)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not get node info from k8s.')
class K8sHelper(object):
def __init__(self, kind):
self.kind = kind
def exist(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[self.kind]['group'],
version=resources[self.kind][
'version'],
namespace='default',
plural=resources[self.kind]['plural'],
name=name)
return True
except ApiException as e:
if e.reason == 'Not Found':
return False
else:
time.sleep(3)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not get %s %s response from k8s.' % (self.kind, name))
def get(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[self.kind]['group'],
version=resources[self.kind][
'version'],
namespace='default',
plural=resources[self.kind]['plural'],
name=name)
return jsondict
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not get %s %s on k8s.' % (self.kind, name))
def get_data(self, name, key):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
jsondict = client.CustomObjectsApi().get_namespaced_custom_object(group=resources[self.kind]['group'],
version=resources[self.kind][
'version'],
namespace='default',
plural=resources[self.kind]['plural'],
name=name)
if 'spec' in jsondict.keys() and isinstance(jsondict['spec'], dict) and key in jsondict['spec'].keys():
return jsondict['spec'][key]
return None
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
time.sleep(3)
raise BadRequest('can not get %s %s on k8s.' % (self.kind, name))
def get_create_jsondict(self, name, key, data):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
hostname = get_hostname_in_lower_case()
jsondict = {'spec': {'volume': {}, 'nodeName': hostname, 'status': {}},
'kind': self.kind, 'metadata': {'labels': {'host': hostname}, 'name': name},
'apiVersion': '%s/%s' % (resources[self.kind]['group'], resources[self.kind]['version'])}
jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
body = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
return body
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
time.sleep(3)
raise BadRequest('can not get %s %s data on k8s.' % (self.kind, name))
def create(self, name, key, data):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
if self.exist(name):
return
hostname = get_hostname_in_lower_case()
jsondict = {'spec': {'volume': {}, 'nodeName': hostname, 'status': {}},
'kind': self.kind, 'metadata': {'labels': {'host': hostname}, 'name': name},
'apiVersion': '%s/%s' % (resources[self.kind]['group'], resources[self.kind]['version'])}
jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
body = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
return client.CustomObjectsApi().create_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], body=body)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
error_print(500, 'can not create %s %s on k8s.' % (self.kind, name))
def add_label(self, name, domain):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
if not self.exist(name):
return
jsondict = self.get(name)
jsondict['metadata']['labels']['domain'] = domain
# jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
# jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not modify %s %s on k8s.' % (self.kind, name))
def update(self, name, key, data):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
if not self.exist(name):
return
jsondict = self.get(name)
if 'spec' in jsondict.keys() and isinstance(jsondict['spec'], dict) and key in jsondict['spec'].keys() \
and operator.eq(jsondict['spec'][key], data) == 0:
return
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = updateJsonRemoveLifecycle(jsondict, {key: data})
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not modify %s %s on k8s.' % (self.kind, name))
def updateAll(self, name, jsondict):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
if not self.exist(name):
return
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = deleteLifecycleInJson(jsondict)
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not modify %s %s on k8s.' % (self.kind, name))
def createAll(self, name, jsondict):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
if self.exist(name):
return
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = deleteLifecycleInJson(jsondict)
return client.CustomObjectsApi().create_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], body=jsondict)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not modify %s %s on k8s.' % (self.kind, name))
def delete(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
return client.CustomObjectsApi().delete_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'], namespace='default',
plural=resources[self.kind]['plural'], name=name, body=V1DeleteOptions())
except ApiException as e:
if e.reason == 'Not Found':
return
else:
time.sleep(3)
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not delete %s %s on k8s.' % (self.kind, name))
def delete_lifecycle(self, name):
for i in range(RETRY_TIMES):
try:
config.load_kube_config(TOKEN)
if not self.exist(name):
return
jsondict = self.get(name)
if hasLifeCycle(jsondict):
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict = removeLifecycle(jsondict)
return client.CustomObjectsApi().replace_namespaced_custom_object(
group=resources[self.kind]['group'], version=resources[self.kind]['version'],
namespace='default',
plural=resources[self.kind]['plural'], name=name, body=jsondict)
else:
return
except Exception as e:
if repr(e).find('Connection refused') != -1 or repr(e).find('No route to host') != -1 or repr(e).find(
'ApiException') != -1:
config.load_kube_config(TOKEN)
k8s_logger.debug(traceback.format_exc())
k8s_logger.debug("sleep 3 sec")
time.sleep(3)
raise BadRequest('can not delete lifecycle %s %s on k8s.' % (self.kind, name))
def change_node(self, name, newNodeName):
if not self.exist(name):
return
jsondict = self.get(name)
if jsondict:
jsondict = addPowerStatusMessage(jsondict, 'Ready', 'The resource is ready.')
jsondict['metadata']['labels']['host'] = newNodeName
spec = get_spec(jsondict)
if spec:
nodeName = spec.get('nodeName')
if nodeName:
spec['nodeName'] = newNodeName
self.updateAll(name, jsondict)
def error_print(code, msg, data=None):
if data is None:
print(dumps({"result": {"code": code, "msg": msg}, "data": {}}))
exit(1)
else:
print(dumps({"result": {"code": code, "msg": msg}, "data": data}))
exit(1)
if __name__ == '__main__':
# data = {
# 'domain': 'cloudinit',
# 'pool': 'migratepoolnodepool22'
# }
helper = K8sHelper(VMP_KIND)
# backup_helper.create('backup1', 'backup', data)
# print(backup_helper.add_label('vmbackup2', 'cloudinit'))
# print get_all_node_ip()
# get_pools_by_path('/var/lib/libvirt/cstor/1709accf174vccaced76b0dbfccdev/1709accf174vccaced76b0dbfccdev')
# k8s = K8sHelper('VirtualMachineDisk')
# disk1 = k8s.get('disk33333clone')
# print dumps(disk1)
# k8s.delete('disk33333clone1')
# k8s.create('disk33333clone1', 'volume', disk1['spec']['volume'])
# disk1['spec']['volume']['filename'] = 'lalalalalalala'
# k8s.update('disk33333clone1', 'volume', disk1['spec']['volume'])
| 1.679688 | 2 |
upload/views.py | travelgeezer/sasukekun-flask | 0 | 12788909 | from flask import request, Blueprint, send_file
from sasukekun_flask.utils import v1, format_response
from sasukekun_flask.config import API_IMAGE
from .models import PasteFile
ONE_MONTH = 60 * 60 * 24 * 30
upload = Blueprint('upload', __name__)
@upload.route(v1('/upload/'), methods=['GET', 'POST'])
def upload_file():
if request.method == 'GET':
paste_files = PasteFile.objects.all()
data = [paste_file.json for paste_file in paste_files]
return format_response(data=data)
elif request.method == 'POST':
uploaded_file = request.files['file']
w = request.form.get('w')
h = request.form.get('h')
if not uploaded_file:
format_response(code=400, info='not file')
if False and w and h:
paste_file = PasteFile.rsize(uploaded_file, w, h) # TODO: fix issues
else:
paste_file = PasteFile.create_by_uploaded_file(uploaded_file)
paste_file.save()
return format_response(data=paste_file.to_dict())
@upload.route(v1('/upload/<filehash>/', base=API_IMAGE),
methods=['GET'])
def download(filehash):
paste_file = PasteFile.get_by_filehash(filehash)
return send_file(
open(paste_file.path, 'rb'),
mimetype='application/octet-stream',
cache_timeout=ONE_MONTH,
as_attachment=True,
attachment_filename=paste_file.filename.encode('utf-8'))
| 2.390625 | 2 |
storages/backends/s3refreshablesession.py | Techainer/django-storages | 2 | 12788910 | from uuid import uuid4
from datetime import datetime
from time import time
import boto3
from boto3 import Session
from botocore.credentials import RefreshableCredentials
from botocore.session import get_session
from botocore.credentials import InstanceMetadataFetcher
from storages.utils import setting
import logging
class InstanceMetadataBotoSession:
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
"""
Boto Helper class which lets us create refreshable session, so that we can cache the client or resource.
Usage
-----
session = BotoSession().refreshable_session()
client = session.client("s3") # we now can cache this client object without worrying about expiring credentials
"""
def __init__(
self,
region_name: str = None,
session_name: str = None,
):
"""
Initialize `BotoSession`
Parameters
----------
region_name : str (optional)
Default region when creating new connection.
session_name : str (optional)
An identifier for the assumed role session. (required when `sts_arn` is given)
"""
self.region_name = region_name
# read why RoleSessionName is important https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts.html
self.session_name = session_name or uuid4().hex
self._role_fetcher = InstanceMetadataFetcher(timeout=setting("S3_CREDENTIALS_TIMEOUT", 1000), num_attempts=3)
self.access_key = None
self.secret_key = None
self.security_token = None
def __get_session_credentials(self):
"""
Get session credentials
"""
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logging.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
credentials = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
self.access_key = credentials.access_key
self.secret_key = credentials.secret_key
self.security_token = credentials.token
return credentials
def refreshable_session(self) -> Session:
"""
Get refreshable boto3 session.
"""
try:
# get refreshable credentials
refreshable_credentials = RefreshableCredentials.create_from_metadata(
metadata=self.__get_session_credentials(),
refresh_using=self._role_fetcher.retrieve_iam_role_credentials,
method=self.METHOD,
)
# attach refreshable credentials current session
session = get_session()
session._credentials = refreshable_credentials
session.set_config_variable("region", self.region_name)
autorefresh_session = Session(botocore_session=session)
return autorefresh_session
except:
return boto3.session.Session() | 2.34375 | 2 |
cellpainting2/reporting.py | apahl/cellpainting2 | 0 | 12788911 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#########
Reporting
#########
*Created on Thu Jun 8 14:40 2017 by <NAME>*
Tools for creating HTML Reports."""
import time
import base64
import os
import gc
import os.path as op
from string import Template
from io import BytesIO as IO
import pandas as pd
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import numpy as np
from PIL import Image, ImageChops
import matplotlib.pyplot as plt
from cellpainting2 import tools as cpt
from cellpainting2 import report_templ as cprt
from cellpainting2 import processing as cpp
cp_config = cpt.load_config("config")
# cp_plates = cpt.load_config("plates")
IPYTHON = cpt.is_interactive_ipython()
if IPYTHON:
from IPython.core.display import HTML
ACT_PROF_PARAMETERS = cp_config["Parameters"]
ACT_CUTOFF_PERC = cp_config["Cutoffs"]["ActCutoffPerc"]
ACT_CUTOFF_PERC_H = cp_config["Cutoffs"]["ActCutoffPercH"]
ACT_CUTOFF_PERC_REF = cp_config["Cutoffs"]["ActCutoffPercRef"]
OVERACT_H = cp_config["Cutoffs"]["OverActH"]
LIMIT_ACTIVITY_H = cp_config["Cutoffs"]["LimitActivityH"]
LIMIT_ACTIVITY_L = cp_config["Cutoffs"]["LimitActivityL"]
LIMIT_CELL_COUNT_H = cp_config["Cutoffs"]["LimitCellCountH"]
LIMIT_CELL_COUNT_L = cp_config["Cutoffs"]["LimitCellCountL"]
LIMIT_SIMILARITY_H = cp_config["Cutoffs"]["LimitSimilarityH"]
LIMIT_SIMILARITY_L = cp_config["Cutoffs"]["LimitSimilarityL"]
PARAMETER_HELP = cp_config["ParameterHelp"]
# get positions of the compartments in the list of parameters
x = 1
XTICKS = [x]
for comp in ["Median_Cytoplasm", "Median_Nuclei"]:
for idx, p in enumerate(ACT_PROF_PARAMETERS[x:], 1):
if p.startswith(comp):
XTICKS.append(idx + x)
x += idx
break
XTICKS.append(len(ACT_PROF_PARAMETERS))
Draw.DrawingOptions.atomLabelFontFace = "DejaVu Sans"
Draw.DrawingOptions.atomLabelFontSize = 18
try:
from misc_tools import apl_tools
AP_TOOLS = True
# Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} ({})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime(
"%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
# Try to import Avalon so it can be used for generation of 2d coordinates.
from rdkit.Avalon import pyAvalonTools as pyAv
USE_AVALON_2D = True
except ImportError:
print(" * Avalon not available. Using RDKit for 2d coordinate generation.")
USE_AVALON_2D = False
try:
import holoviews as hv
hv.extension("bokeh")
HOLOVIEWS = True
except ImportError:
HOLOVIEWS = False
print("* holoviews could not be import. heat_hv is not available.")
def check_2d_coords(mol, force=False):
"""Check if a mol has 2D coordinates and if not, calculate them."""
if not force:
try:
mol.GetConformer()
except ValueError:
force = True # no 2D coords... calculate them
if force:
if USE_AVALON_2D:
pyAv.Generate2DCoords(mol)
else:
mol.Compute2DCoords()
def mol_from_smiles(smi, calc_2d=True):
mol = Chem.MolFromSmiles(smi)
if not mol:
mol = Chem.MolFromSmiles("*")
else:
if calc_2d:
check_2d_coords(mol)
return mol
def autocrop(im, bgcolor="white"):
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return None # no contents
def get_value(str_val):
if not str_val:
return ""
try:
val = float(str_val)
if "." not in str_val:
val = int(val)
except ValueError:
val = str_val
return val
def isnumber(x):
"""Returns True, if x is a number (i.e. can be converted to float)."""
try:
float(x)
return True
except ValueError:
return False
def convert_bool(dict, dkey, true="Yes", false="No", default="n.d."):
if dkey in dict:
if dict[dkey]:
dict[dkey] = true
else:
dict[dkey] = false
else:
dict[dkey] = default
def load_image(path, well, channel):
image_fn = "{}/{}_w{}.jpg".format(path, well, channel)
im = Image.open(image_fn)
return im
def b64_mol(mol, size=300):
img_file = IO()
try:
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
except UnicodeEncodeError:
print(Chem.MolToSmiles(mol))
mol = Chem.MolFromSmiles("C")
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
img.save(img_file, format='PNG')
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
img_file.close()
return b64
def b64_img(im, format="JPEG"):
if isinstance(im, IO):
needs_close = False
img_file = im
else:
needs_close = True
img_file = IO()
im.save(img_file, format=format)
b64 = base64.b64encode(img_file.getvalue())
b64 = b64.decode()
if needs_close:
img_file.close()
return b64
def mol_img_tag(mol, options=None):
tag = """<img {} src="data:image/png;base64,{}" alt="Mol"/>"""
if options is None:
options = ""
img_tag = tag.format(options, b64_mol(mol))
return img_tag
def img_tag(im, format="jpeg", options=None):
tag = """<img {} src="data:image/{};base64,{}" alt="Image"/>"""
if options is None:
options = ""
b = b64_img(im, format=format)
img_tag = tag.format(options, format.lower(), b)
return img_tag
def load_control_images(src_dir):
image_dir = op.join(src_dir, "images")
ctrl_images = {}
for ch in range(1, 6):
im = load_image(image_dir, "H11", ch)
ctrl_images[ch] = img_tag(im, options='style="width: 250px;"')
return ctrl_images
def sanitize_filename(fn):
result = fn.replace(":", "_").replace(",", "_").replace(".", "_")
return result
def write(text, fn):
with open(fn, "w") as f:
f.write(text)
def write_page(page, title="Report", fn="index.html", templ=cprt.HTML_INTRO):
t = Template(templ + page + cprt.HTML_EXTRO)
result = t.substitute(title=title)
write(result, fn=fn)
def assign_colors(rec):
act_cutoff_high = ACT_CUTOFF_PERC_H
if "Toxic" in rec:
if rec["Toxic"]:
rec["Col_Toxic"] = cprt.COL_RED
else:
rec["Col_Toxic"] = cprt.COL_GREEN
else:
rec["Col_Toxic"] = cprt.COL_WHITE
if "Pure_Flag" in rec:
if rec["Pure_Flag"] == "Ok":
rec["Col_Purity"] = cprt.COL_GREEN
elif rec["Pure_Flag"] == "Warn":
rec["Col_Purity"] = cprt.COL_YELLOW
elif rec["Pure_Flag"] == "Fail":
rec["Col_Purity"] = cprt.COL_RED
else:
rec["Col_Purity"] = cprt.COL_WHITE
else:
rec["Col_Purity"] = cprt.COL_WHITE
if rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_H:
rec["Col_Cell_Count"] = cprt.COL_GREEN
elif rec["Rel_Cell_Count"] >= LIMIT_CELL_COUNT_L:
rec["Col_Cell_Count"] = cprt.COL_YELLOW
else:
rec["Col_Cell_Count"] = cprt.COL_RED
if rec["Activity"] > act_cutoff_high:
rec["Col_Act"] = cprt.COL_RED
elif rec["Activity"] >= LIMIT_ACTIVITY_H:
rec["Col_Act"] = cprt.COL_GREEN
elif rec["Activity"] >= LIMIT_ACTIVITY_L:
rec["Col_Act"] = cprt.COL_YELLOW
else:
rec["Col_Act"] = cprt.COL_RED
if rec["Act_Flag"] == "active":
rec["Col_Act_Flag"] = cprt.COL_GREEN
else:
rec["Col_Act_Flag"] = cprt.COL_RED
def remove_colors(rec):
for k in rec.keys():
if k.startswith("Col_"):
rec[k] = cprt.COL_WHITE
def overview_report(df, cutoff=LIMIT_SIMILARITY_L / 100,
highlight=False, mode="cpd"):
"""mode `int` displays similarities not to references but to other internal compounds
(just displays the `Similarity` column)."""
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
detailed_cpds = []
if isinstance(df, cpp.DataSet):
df = df.data
t = Template(cprt.OVERVIEW_TABLE_HEADER)
if "int" in mode:
tbl_header = t.substitute(sim_entity="to another Test Compound")
else:
tbl_header = t.substitute(sim_entity="to a Reference")
report = [cprt.OVERVIEW_TABLE_INTRO, tbl_header]
row_templ = Template(cprt.OVERVIEW_TABLE_ROW)
idx = 0
for _, rec in df.iterrows():
act_cutoff_low = ACT_CUTOFF_PERC
act_cutoff_high = ACT_CUTOFF_PERC_H
idx += 1
well_id = rec["Well_Id"]
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
rec["Act_Flag"] = "active"
rec["Max_Sim"] = ""
rec["Link"] = ""
rec["Col_Sim"] = cprt.COL_WHITE
has_details = True
if rec["Activity"] < act_cutoff_low:
has_details = False
rec["Act_Flag"] = "inactive"
# print(rec)
# similar references are searched for non-toxic compounds with an activity >= LIMIT_ACTIVITY_L
if rec["Activity"] < LIMIT_ACTIVITY_L or rec["Activity"] > act_cutoff_high or rec["Toxic"] or rec["OverAct"] > OVERACT_H:
similars_determined = False
if rec["OverAct"] > OVERACT_H:
rec["Max_Sim"] = "Overact."
rec["Col_Sim"] = cprt.COL_RED
else:
similars_determined = True
assign_colors(rec)
convert_bool(rec, "Toxic")
if has_details:
detailed_cpds.append(well_id)
details_fn = sanitize_filename(well_id)
plate = rec["Plate"]
rec["Link"] = '<a href="../{}/details/{}.html">Detailed<br>Report</a>'.format(
plate, details_fn)
if similars_determined:
if "int" in mode:
# similar = {"Similarity": [rec["Similarity"]]}
similar = pd.DataFrame(
{"Well_Id": [well_id], "Similarity": [rec["Similarity"]]})
else:
similar = sim_refs[sim_refs["Well_Id"] == well_id].compute()
similar = similar.sort_values("Similarity",
ascending=False).reset_index()
if len(similar) > 0:
max_sim = round(
similar["Similarity"][0] * 100, 1) # first in the list has the highest similarity
rec["Max_Sim"] = max_sim
if max_sim >= LIMIT_SIMILARITY_H:
rec["Col_Sim"] = cprt.COL_GREEN
elif max_sim >= LIMIT_SIMILARITY_L:
rec["Col_Sim"] = cprt.COL_YELLOW
else:
rec["Col_Sim"] = cprt.COL_WHITE
print("ERROR: This should not happen (Max_Sim).")
else:
rec["Max_Sim"] = "< {}".format(LIMIT_SIMILARITY_L)
rec["Col_Sim"] = cprt.COL_RED
if not highlight:
# remove all coloring again:
remove_colors(rec)
report.append(row_templ.substitute(rec))
report.append(cprt.TABLE_EXTRO)
return "\n".join(report), detailed_cpds
def sim_ref_table(similar):
cpp.load_resource("REFERENCES")
df_refs = cpp.REFERENCES
table = [cprt.TABLE_INTRO, cprt.REF_TABLE_HEADER]
templ = Template(cprt.REF_TABLE_ROW)
for idx, rec in similar.iterrows():
rec = rec.to_dict()
ref_id = rec["Ref_Id"]
ref_data = df_refs[df_refs["Well_Id"] == ref_id]
if cpp.is_dask(ref_data):
ref_data = ref_data.compute()
if len(ref_data) == 0:
print(rec)
raise ValueError("BUG: ref_data should not be empty.")
ref_data = ref_data.copy()
ref_data = ref_data.fillna("—")
rec.update(ref_data.to_dict("records")[0])
mol = mol_from_smiles(rec.get("Smiles", "*"))
rec["Sim_Format"] = "{:.1f}".format(rec["Similarity"] * 100)
rec["Tan_Format"] = "{:.1f}".format(rec["Tanimoto"] * 100)
if rec["Tan_Format"] == np.nan:
rec["Tan_Format"] = "—"
rec["mol_img"] = mol_img_tag(mol)
rec["idx"] = idx + 1
link = "../../{}/details/{}.html".format(rec["Plate"],
sanitize_filename(rec["Well_Id"]))
rec["link"] = link
row = templ.substitute(rec)
table.append(row)
table.append(cprt.TABLE_EXTRO)
return "\n".join(table)
def changed_parameters_table(act_prof, val, parameters=ACT_PROF_PARAMETERS):
changed = cpt.parameters_from_act_profile_by_val(
act_prof, val, parameters=parameters)
table = []
templ = Template(cprt.PARM_TABLE_ROW)
for idx, p in enumerate(changed, 1):
p_elmnts = p.split("_")
p_module = p_elmnts[2]
p_name = "_".join(p_elmnts[1:])
rec = {
"idx": idx,
"Parameter": p_name,
"Help_Page": PARAMETER_HELP[p_module]
}
row = templ.substitute(rec)
table.append(row)
return "\n".join(table), changed
def parm_stats(parameters):
result = []
channels = ["_Mito", "_Ph_golgi", "_Syto", "_ER", "Hoechst"]
for ch in channels:
cnt = len([p for p in parameters if ch in p])
result.append(cnt)
return result
def parm_hist(increased, decreased, hist_cache):
# try to load histogram from cache:
if op.isfile(hist_cache):
result = open(hist_cache).read()
return result
labels = [
"Mito",
"Golgi / Membrane",
"RNA / Nucleoli",
"ER",
"Nuclei"
]
inc_max = max(increased)
dec_max = max(decreased)
max_total = max([inc_max, dec_max])
if max_total == 0:
result = "No compartment-specific parameters were changed."
return result
inc_norm = [v / max_total for v in increased]
dec_norm = [v / max_total for v in decreased]
n_groups = 5
dpi = 96
# plt.rcParams['axes.titlesize'] = 25
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['legend.fontsize'] = 20
size = (1500, 1000)
figsize = (size[0] / dpi, size[1] / dpi)
fig, ax = plt.subplots(figsize=figsize)
index = np.arange(n_groups)
bar_width = 0.25
plt.bar(index, inc_norm, bar_width,
color='#94caef',
label='Inc')
plt.bar(index + bar_width, dec_norm, bar_width,
color='#ffdd1a',
label='Dec')
plt.xlabel('Cell Compartment')
plt.ylabel('rel. Occurrence')
plt.xticks(index + bar_width / 2, labels, rotation=45)
plt.legend()
plt.tight_layout()
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg", options='style="width: 800px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.close()
open(hist_cache, "w").write(result) # cache the histogram
return result
def heat_mpl(df, id_prop="Compound_Id", cmap="bwr",
show=True, colorbar=True, biosim=False, chemsim=False, method="dist_corr",
sort_parm=False, parm_dict=None,
plot_cache=None):
# try to load heatmap from cache:
if plot_cache is not None and op.isfile(plot_cache):
result = open(plot_cache).read()
return result
if "dist" in method.lower():
profile_sim = cpt.profile_sim_dist_corr
else:
profile_sim = cpt.profile_sim_tanimoto
df_len = len(df)
img_size = 15 if show else 17
plt.style.use("seaborn-white")
plt.style.use("seaborn-pastel")
plt.style.use("seaborn-talk")
plt.rcParams['axes.labelsize'] = 25
# plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.figsize'] = (img_size, 1.1 + 0.47 * (df_len - 1))
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 15
fs_text = 18
y_labels = []
fp_list = []
max_val = 3 # using a fixed color range now
min_val = -3
ylabel_templ = "{}{}{}"
ylabel_cs = ""
ylabel_bs = ""
id_prop_list = []
for ctr, (_, rec) in enumerate(df.iterrows()):
if sort_parm:
if ctr == 0:
compartments = ["Median_Cells", "Median_Cytoplasm", "Median_Nuclei"]
parm_list = []
for comp in compartments:
parm_comp = [x for x in ACT_PROF_PARAMETERS if x.startswith(comp)]
val_list = [rec[x] for x in parm_comp]
parm_sorted = [x for _, x in sorted(zip(val_list, parm_comp))]
parm_list.extend(parm_sorted)
else:
parm_list = ACT_PROF_PARAMETERS
fp = [rec[x] for x in ACT_PROF_PARAMETERS]
fp_view = [rec[x] for x in parm_list]
fp_list.append(fp_view)
id_prop_list.append(rec[id_prop])
if chemsim:
if ctr == 0:
mol = mol_from_smiles(rec.get("Smiles", "*"))
if len(mol.GetAtoms()) > 1:
ylabel_cs = "Chem | "
mol_fp = Chem.GetMorganFingerprint(mol, 2) # ECFC4
else: # no Smiles present in the DataFrame
ylabel_cs = ""
chemsim = False
else:
q = rec.get("Smiles", "*")
if len(q) < 2:
ylabel_cs = " | "
else:
sim = cpt.chem_sim(mol_fp, q) * 100
ylabel_cs = "{:3.0f}% | ".format(sim)
if biosim:
if ctr == 0:
prof_ref = fp
ylabel_bs = " Bio | "
else:
sim = profile_sim(prof_ref, fp) * 100
ylabel_bs = "{:3.0f}% | ".format(sim)
ylabel = ylabel_templ.format(ylabel_cs, ylabel_bs, rec[id_prop])
y_labels.append(ylabel)
# m_val = max(fp) # this was the calculation of the color range
# if m_val > max_val:
# max_val = m_val
# m_val = min(fp)
# if m_val < min_val:
# min_val = m_val
if isinstance(parm_dict, dict):
parm_dict["Parameter"] = parm_list
for i in range(len(id_prop_list)):
parm_dict[str(id_prop_list[i])] = fp_list[i].copy()
# calc the colorbar range
max_val = max(abs(min_val), max_val)
# invert y axis:
y_labels = y_labels[::-1]
fp_list = fp_list[::-1]
Z = np.asarray(fp_list)
plt.xticks(XTICKS)
plt.yticks(np.arange(df_len) + 0.5, y_labels)
plt.pcolor(Z, vmin=-max_val, vmax=max_val, cmap=cmap)
plt.text(XTICKS[1] // 2, -1.1, "Cells",
horizontalalignment='center', fontsize=fs_text)
plt.text(XTICKS[1] + ((XTICKS[2] - XTICKS[1]) // 2), -1.1,
"Cytoplasm", horizontalalignment='center', fontsize=fs_text)
plt.text(XTICKS[2] + ((XTICKS[3] - XTICKS[2]) // 2), -1.1,
"Nuclei", horizontalalignment='center', fontsize=fs_text)
if colorbar and len(df) > 3:
plt.colorbar()
plt.tight_layout()
if show:
plt.show()
else:
img_file = IO()
plt.savefig(img_file, bbox_inches='tight', format="jpg")
result = img_tag(img_file, format="jpg",
options='style="width: 900px;"')
img_file.close()
# important, otherwise the plots will accumulate and fill up memory:
plt.clf()
plt.close()
gc.collect()
if plot_cache is not None: # cache the plot
open(plot_cache, "w").write(result)
return result
def heat_hv(df, id_prop="Compound_Id", cmap="bwr", invert_y=False):
if not HOLOVIEWS:
raise ImportError("# holoviews library could not be imported")
df_parm = df[[id_prop] + ACT_PROF_PARAMETERS].copy()
df_len = len(df_parm)
col_bar = False if df_len < 3 else True
values = list(df_parm.drop(id_prop, axis=1).values.flatten())
max_val = max(values)
min_val = min(values)
max_val = max(abs(min_val), max_val)
hm_opts = dict(width=950, height=40 + 30 * df_len, tools=['hover'], invert_yaxis=invert_y,
xrotation=90, labelled=[], toolbar='above', colorbar=col_bar, xaxis=None,
colorbar_opts={"width": 10})
hm_style = {"cmap": cmap}
opts = {'HeatMap': {'plot': hm_opts, "style": hm_style}}
df_heat = cpt.melt(df_parm, id_prop=id_prop)
heatmap = hv.HeatMap(df_heat).redim.range(Value=(-max_val, max_val))
return heatmap(opts)
def show_images(plate_full_name, well):
"""For interactive viewing in the notebook."""
if not IPYTHON:
return
src_dir = op.join(cp_config["Paths"]["SrcPath"], plate_full_name)
ctrl_images = load_control_images(src_dir)
image_dir = op.join(src_dir, "images")
templ_dict = {}
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(
im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
tbody_templ = Template(cprt.IMAGES_TABLE)
table = cprt.TABLE_INTRO + \
tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO
return HTML(table)
def get_data_for_wells(well_ids):
cpp.load_resource("DATASTORE")
data = cpp.DATASTORE
result = data[data["Well_Id"].isin(well_ids)]
if cpp.is_dask(result):
result = result.compute()
result["_sort"] = pd.Categorical(
result["Well_Id"], categories=well_ids, ordered=True)
result = result.sort_values("_sort")
result.drop("_sort", axis=1, inplace=False)
return result
def detailed_report(rec, src_dir, ctrl_images):
# print(rec)
cpp.load_resource("SIM_REFS")
sim_refs = cpp.SIM_REFS
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
image_dir = op.join(src_dir, "images")
well_id = rec["Well_Id"]
# act_prof = [rec[x] for x in ACT_PROF_PARAMETERS]
mol = mol_from_smiles(rec.get("Smiles", "*"))
if "Pure_Flag" not in rec:
rec["Pure_Flag"] = "n.d."
templ_dict = rec.copy()
log2_vals = [(x, rec[x]) for x in ACT_PROF_PARAMETERS]
parm_table = []
for idx, x in enumerate(log2_vals, 1):
parm_table.extend(["<tr><td>", str(idx), "</td>",
# omit the "Median_" head of each parameter
"<td>", x[0][7:], "</td>",
'<td align="right">', "{:.2f}".format(x[1]), "</td></tr>\n"])
templ_dict["Parm_Table"] = "".join(parm_table)
df_heat = pd.DataFrame([rec])
templ_dict["Date"] = date
templ_dict["mol_img"] = mol_img_tag(mol, options='class="cpd_image"')
if templ_dict["Is_Ref"]:
if not isinstance(templ_dict["Trivial_Name"], str) or templ_dict["Trivial_Name"] == "":
templ_dict["Trivial_Name"] = "—"
if not isinstance(templ_dict["Known_Act"], str) or templ_dict["Known_Act"] == "":
templ_dict["Known_Act"] = "—"
t = Template(cprt.DETAILS_REF_ROW)
templ_dict["Reference"] = t.substitute(templ_dict)
else:
templ_dict["Reference"] = ""
well = rec["Metadata_Well"]
for ch in range(1, 6):
im = load_image(image_dir, well, ch)
templ_dict["Img_{}_Cpd".format(ch)] = img_tag(
im, options='style="width: 250px;"')
templ_dict["Img_{}_Ctrl".format(ch)] = ctrl_images[ch]
act_cutoff_high = ACT_CUTOFF_PERC_H
if rec["Rel_Cell_Count"] < LIMIT_CELL_COUNT_L:
templ_dict["Ref_Table"] = "Because of compound toxicity, no similarity was determined."
elif rec["Activity"] < LIMIT_ACTIVITY_L:
templ_dict["Ref_Table"] = "Because of low induction (< {}%), no similarity was determined.".format(LIMIT_ACTIVITY_L)
elif rec["Activity"] > act_cutoff_high:
templ_dict["Ref_Table"] = "Because of high induction (> {}%), no similarity was determined.".format(act_cutoff_high)
elif rec["OverAct"] > OVERACT_H:
templ_dict["Ref_Table"] = "Because of high similarity to the overactivation profile (> {}%), no similarity was determined.".format(OVERACT_H)
else:
similar = sim_refs[sim_refs["Well_Id"] == well_id].compute()
if len(similar) > 0:
similar = similar.sort_values("Similarity",
ascending=False).reset_index().head(5)
ref_tbl = sim_ref_table(similar)
templ_dict["Ref_Table"] = ref_tbl
sim_data = get_data_for_wells(similar["Ref_Id"].values)
df_heat = pd.concat([df_heat, sim_data])
else:
templ_dict["Ref_Table"] = "No similar references found."
cache_path = op.join(cp_config["Dirs"]["DataDir"], "plots", rec["Plate"])
if not op.isdir(cache_path):
os.makedirs(cache_path, exist_ok=True)
hm_fn = sanitize_filename(rec["Well_Id"] + ".txt")
hm_cache = op.join(cache_path, hm_fn)
templ_dict["Heatmap"] = heat_mpl(df_heat, id_prop="Compound_Id", cmap="bwr",
show=False, colorbar=True, plot_cache=hm_cache)
t = Template(cprt.DETAILS_TEMPL)
report = t.substitute(templ_dict)
return report
def full_report(df, src_dir, report_name="report", plate=None,
cutoff=0.6, highlight=False):
report_full_path = op.join(cp_config["Dirs"]["ReportDir"], report_name)
overview_fn = op.join(report_full_path, "index.html")
date = time.strftime("%d-%m-%Y %H:%M", time.localtime())
cpt.create_dirs(op.join(report_full_path, "details"))
if isinstance(df, cpp.DataSet):
df = df.data
print("* creating overview...")
header = "{}\n<h2>Cell Painting Overview Report</h2>\n".format(cprt.LOGO)
title = "Overview"
if plate is not None:
title = plate
header += "<h3>Plate {}</h3>\n".format(plate)
header += "<p>({})</p>\n".format(date)
if highlight:
highlight_legend = cprt.HIGHLIGHT_LEGEND
else:
highlight_legend = ""
overview, detailed_cpds = overview_report(df, cutoff=cutoff, highlight=highlight)
overview = header + overview + highlight_legend
write_page(overview, title=title, fn=overview_fn,
templ=cprt.OVERVIEW_HTML_INTRO)
# print(detailed_cpds)
print("* creating detailed reports...")
print(" * loading control images...")
ctrl_images = load_control_images(src_dir)
print(" * writing individual reports...")
df_detailed = df[df["Well_Id"].isin(detailed_cpds)]
ctr = 0
df_len = len(df_detailed)
for _, rec in df_detailed.iterrows():
ctr += 1
if not IPYTHON and ctr % 10 == 0:
print(" ({:3d}%)\r".format(int(100 * ctr / df_len)), end="")
well_id = rec["Well_Id"]
fn = op.join(report_full_path, "details",
"{}.html".format(sanitize_filename(well_id)))
title = "{} Details".format(well_id)
# similar = detailed_cpds[well_id]
details = detailed_report(rec, src_dir, ctrl_images)
write_page(details, title=title, fn=fn, templ=cprt.DETAILS_HTML_INTRO)
print("* done. ")
if IPYTHON:
return HTML('<a href="{}">{}</a>'.format(overview_fn, "Overview"))
| 2.1875 | 2 |
post/models.py | Yash1256/Django-Intern | 1 | 12788912 | <reponame>Yash1256/Django-Intern
from django.db import models
from registration.models import Author
# Create your models here.
class Post(models.Model):
author_id = models.IntegerField(null=False)
title = models.CharField(max_length=255, null=False)
description = models.CharField(max_length=500, null=False)
content = models.TextField(null=False)
date = models.DateField(null=False)
class Meta:
db_table = "posts"
verbose_name = "Post"
verbose_name_plural = "Posts"
@property
def author(self):
return Author.objects.get(pk=self.author_id)
def __str__(self):
return f"{self.title}({self.author.name})"
| 2.6875 | 3 |
backend/api_v2/migrations/0008_auto_20170101_0105.py | AstroMatt/subjective-time-perception | 0 | 12788913 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-01 01:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_v2', '0007_auto_20170101_0101'),
]
operations = [
migrations.AlterField(
model_name='trial',
name='percentage_all',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - all', null=True, verbose_name='P'),
),
migrations.AlterField(
model_name='trial',
name='percentage_blue',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - blue', null=True, verbose_name='PB'),
),
migrations.AlterField(
model_name='trial',
name='percentage_red',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - red', null=True, verbose_name='PR'),
),
migrations.AlterField(
model_name='trial',
name='percentage_white',
field=models.FloatField(blank=True, help_text='Percentage Coefficient - white', null=True, verbose_name='PW'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_all',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - all', null=True, verbose_name='TM'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_blue',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - blue', null=True, verbose_name='TMB'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_red',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - red', null=True, verbose_name='TMR'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_white',
field=models.FloatField(blank=True, help_text='Time Coefficient Mean - white', null=True, verbose_name='TMW'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_all',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - all', null=True, verbose_name='TSD'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_blue',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - blue', null=True, verbose_name='TSDB'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_red',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - red', null=True, verbose_name='TSDR'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_white',
field=models.FloatField(blank=True, help_text='Time Coefficient Standard Deviation - white', null=True, verbose_name='TSDW'),
),
migrations.AlterField(
model_name='trial',
name='timeout',
field=models.FloatField(help_text='Seconds per color', verbose_name='Timeout'),
),
]
| 1.5625 | 2 |
lib/utils/meta_manager.py | kbase/sample_search_api | 0 | 12788914 | <reponame>kbase/sample_search_api<filename>lib/utils/meta_manager.py
from utils.re_utils import execute_query
SAMPLE_NODE_COLLECTION = "samples_nodes"
SAMPLE_SAMPLE_COLLECTION = "samples_sample"
META_AQL_TEMPLATE = f"""
let version_ids = (for sample_id in @sample_ids
let doc = DOCUMENT({SAMPLE_SAMPLE_COLLECTION}, sample_id.id)
RETURN {{
'id': doc.id,
'version_id': doc.vers[sample_id.version - 1],
'version': sample_id.version
}}
)
let node_metas = (for version_id in version_ids
for node in {SAMPLE_NODE_COLLECTION}
FILTER node.id == version_id.id AND node.uuidver == version_id.version_id
LIMIT @num_sample_ids
let cmeta_keys = (FOR meta IN node.cmeta
RETURN meta.ok
)
let ucmeta_keys = (FOR meta IN node.ucmeta
RETURN CONCAT("custom:", meta.ok)
)
RETURN APPEND(cmeta_keys, ucmeta_keys)
)
RETURN UNIQUE(FLATTEN(node_metas))
"""
class MetadataManager:
def __init__(cls, re_api_url, re_admin_token=None):
cls.re_api_url = re_api_url
cls.re_admin_token = re_admin_token
def get_sampleset_meta(self, sample_ids, user_token):
# use the user token if an admin token is not provided
query_params = {"sample_ids": sample_ids, 'num_sample_ids': len(sample_ids)}
run_token = self.re_admin_token if self.re_admin_token else user_token
ret = execute_query(
META_AQL_TEMPLATE,
self.re_api_url,
run_token,
query_params
)
return ret['results'][0]
| 2.09375 | 2 |
utils/replay.py | xuzhiyuan1528/KTM-DRL | 10 | 12788915 | import numpy as np
import torch
# https://github.com/sfujim/TD3/blob/ade6260da88864d1ab0ed592588e090d3d97d679/utils.py
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
torch.from_numpy(self.state[ind]).float().to(self.device),
torch.from_numpy(self.action[ind]).float().to(self.device),
torch.from_numpy(self.next_state[ind]).float().to(self.device),
torch.from_numpy(self.reward[ind]).float().to(self.device),
torch.from_numpy(self.not_done[ind]).float().to(self.device)
)
def sample_np(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (
np.float32(self.state[ind]),
np.float32(self.action[ind]),
np.float32(self.next_state[ind]),
np.float32(self.reward[ind]),
np.float32(self.not_done[ind])
)
def save(self, fdir):
np.save(fdir + '/sample-state', self.state[:self.size])
np.save(fdir + '/sample-action', self.action[:self.size])
np.save(fdir + '/sample-nstate', self.next_state[:self.size])
np.save(fdir + '/sample-reward', self.reward[:self.size])
np.save(fdir + '/sample-ndone', self.not_done[:self.size])
def load(self, fdir):
state = np.load(fdir + '/sample-state.npy', allow_pickle=True)
action = np.load(fdir + '/sample-action.npy', allow_pickle=True)
nstate = np.load(fdir + '/sample-nstate.npy', allow_pickle=True)
reward = np.load(fdir + '/sample-reward.npy', allow_pickle=True)
ndone = np.load(fdir + '/sample-ndone.npy', allow_pickle=True)
for s, a, ns, r, nd in zip(state, action, nstate, reward, ndone):
self.add(s, a, ns, r, 1. - nd)
def reset(self):
self.ptr = 0
self.size = 0
| 2.40625 | 2 |
scripts/codepost/codepostAbet.py | cbourke/ComputerScienceII | 20 | 12788916 | """
This script interfaces with the codepost.io API to produce exemplar reports
for ABET accreditation.
For a particular assignment, a report includes an assignment summary
(basic info and stats) as well as the full assessment of 3 student
examples of an A, B, and C submission. The report includes all line-by-line
grader comments (and point deductions) as well as source files. Source files
are formatted in markdown. In the codepost.io web client the comments would
be embedded directly in the source files but for this report they are collected
in the summary.
A, B, and C examples are automatically chosen from all graded submissions.
The top submission is chosen for the A example while the B/C are chosen
to be the closest to a 85%/75% score based on the total number of points of
the assignment.
The report is written to both a markdown-formatted output file as well as
a PDF version (which is produced from the markdown using pandoc/latex via
a system call so these are expected to be installed and available).
You can run this script in one of two modes: you can provide either a single
assignment ID which will produce a single report for that assignment only, or
you can provide a course ID which will produce (separate) reports for all
assignments in the course. In either case, the IDs must be valid codepost.io
IDs. Optionally, you can provide your own codepost API key via the command
line, otherwise it must be specified in the config.py file.
"""
import argparse
import os
import codepost
from config import config
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--codePostApiKey',
type=str,
default=config.codePostApiKey,
help='Optionally provide a codepost API key to use. By default the API key in the config.py file is used.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--codePostCourseId',
type=int,
help='Generates ABET reports for *every* assignment in the provided codepost course.')
group.add_argument('--codePostAssignmentId',
type=int,
help='Generates a single ABET report for the provided codepost assignment.')
args = parser.parse_args()
def submissionToMarkdown(submission,title,assignmentPts):
"""
Returns both a summary and source files of the provided submission as
markdown-formatted strings.
"""
details = ""
result = f"""
## {title} Example
* Student(s): {submission.students}
* Score: {submission.grade:.1f} / {assignmentPts:.1f} = {(100*submission.grade/assignmentPts):.2f}%
"""
for fileId in submission.files:
f = codepost.file.retrieve(id=fileId.id)
fileName = f.name
# fools be puttin unicode shite in their source, so...
fileContents = f.code.encode('utf-8').decode('ascii','ignore')
fileExtension = f.extension
fileGraderCommentIds = [x.id for x in f.comments]
result += f" * Source File: `{fileName}`\n"
details += f"## {title} Example - `{fileName}`\n"
details += f"```{fileExtension}\n{fileContents}\n```\n"
for commentId in fileGraderCommentIds:
c = codepost.comment.retrieve(id=commentId)
cleanText = c.text.replace("\n\n", "\n")
result += f" * Lines {c.startLine:d} - {c.endLine:d} (-{c.pointDelta:.1f}): {cleanText:s}\n"
return result, details
def getAssignmentReport(assignment):
"""
Produces an ABET assignment report (as a markdown-formatted string)
for the given assignment (which is expected to be a codepost API
object) by pulling all relevant data as well as source
code files (and grader comments) for randomly selected A, B and C samples
"""
courseId = assignment.course
course = codepost.course.retrieve(id=courseId)
courseName = course.name
coursePeriod = course.period
assignmentName = assignment.name
assignmentPts = assignment.points
assignmentMean = assignment.mean
assignmentMedian = assignment.median
summary = f"""
# {courseName} - {coursePeriod}
## {assignmentName}
* Points: {assignmentPts}
* Mean: {assignmentMean}
* Median: {assignmentMedian}\n\n"""
# find ideal A, B, C samples
submissions = assignment.list_submissions()
aSubmission = submissions[0]
bSubmission = submissions[0]
cSubmission = submissions[0]
# we only expect 1 submission per student since submissions are via our
# scripts, but in any case, find the 3 closest to A=max%, B = 85%, C = 75%
for submission in submissions:
if submission.grade > aSubmission.grade:
aSubmission = submission
if abs(submission.grade / assignmentPts - .85) < abs(bSubmission.grade / assignmentPts - .85):
bSubmission = submission
if abs(submission.grade / assignmentPts - .75) < abs(cSubmission.grade / assignmentPts - .75):
cSubmission = submission
aSummary, aDetail = submissionToMarkdown(aSubmission,"A",assignmentPts)
bSummary, bDetail = submissionToMarkdown(bSubmission,"B",assignmentPts)
cSummary, cDetail = submissionToMarkdown(cSubmission,"C",assignmentPts)
return summary + aSummary + bSummary + cSummary + "\n\n" + aDetail + bDetail + cDetail
def produceCourseReports(courseId):
"""
Produces ABET reports (as both md and pdf files) for all assignments in
the specified course
"""
course = codepost.course.retrieve(id=courseId)
for a in course.assignments:
assignmentId = a.id
produceAssignmentReport(assignmentId)
def produceAssignmentReport(assignmentId):
"""
Produces a single report (as an md and pdf file) for the specified assignment
"""
a = codepost.assignment.retrieve(id=assignmentId)
assignmentName = a.name
baseFileName = assignmentName.replace(" ", "_")
assignmentId = a.id
report = getAssignmentReport(a)
fileNameMd = baseFileName + ".md"
fileNamePdf = baseFileName + ".pdf"
f = open(fileNameMd, "w")
f.write(report)
f.close()
os.system("pandoc -s -V geometry:margin=1in -o "+fileNamePdf+" "+fileNameMd)
return None
codePostApiKey = args.codePostApiKey
codepost.configure_api_key(codePostApiKey)
if args.codePostCourseId:
produceCourseReports(args.codePostCourseId)
elif args.codePostAssignmentId:
produceAssignmentReport(args.codePostAssignmentId)
else:
print("ERROR: neither course ID nor assignment ID specified")
| 2.90625 | 3 |
integrations/tensorflow/bindings/python/iree/tf/support/tf_utils_test.py | schoppmp/iree | 0 | 12788917 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iree.tf.support.tf_utils."""
from absl.testing import parameterized
from iree.tf.support import tf_utils
import numpy as np
import tensorflow as tf
class UtilsTests(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters([('int8_to_i8', np.int8, 'i8'),
('int32_to_i32', np.int32, 'i32'),
('float32_to_f32', np.float32, 'f32'),
('float64_to_f64', np.float64, 'f64')])
def test_to_mlir_type(self, numpy_type, mlir_type):
self.assertEqual(tf_utils.to_mlir_type(numpy_type), mlir_type)
@parameterized.named_parameters([
('single_i32', [np.array([1, 2], dtype=np.int32)], '2xi32=1 2'),
('single_f32', [np.array([1, 2], dtype=np.float32)], '2xf32=1.0 2.0'),
])
def test_save_input_values(self, inputs, inputs_str):
self.assertEqual(tf_utils.save_input_values(inputs), inputs_str)
def test_apply_function(self):
inputs = [1, [2, 3], (4, 5), {'6': 6, '78': [7, 8]}]
expected = [0, [1, 2], (3, 4), {'6': 5, '78': [6, 7]}]
result = tf_utils.apply_function(inputs, lambda x: x - 1)
self.assertEqual(result, expected)
self.assertNotEqual(inputs, expected)
@parameterized.named_parameters([
{
'testcase_name': 'all the same',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': True,
},
{
'testcase_name': 'wrong int',
'array_c': np.array([1, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': False,
},
{
'testcase_name': 'wrong string',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['a', '1', '2']),
'array_e': np.array([0.0, 0.1, 0.2]),
'tar_same': False,
},
{
'testcase_name': 'wrong float',
'array_c': np.array([0, 1, 2]),
'array_d': np.array(['0', '1', '2']),
'array_e': np.array([1.0, 0.1, 0.2]),
'tar_same': False,
},
])
def test_recursive_check_same(self, array_c, array_d, array_e, tar_same):
# yapf: disable
ref = {
'a': 1,
'b': [
{'c': np.array([0, 1, 2])},
{'d': np.array(['0', '1', '2'])},
{'e': np.array([0.0, 0.1, 0.2])}
],
}
tar = {
'a': 1,
'b': [
{'c': array_c},
{'d': array_d},
{'e': array_e}
],
}
# yapf: enable
same, _ = tf_utils.check_same(ref, tar, rtol=1e-6, atol=1e-6)
self.assertEqual(tar_same, same)
if __name__ == '__main__':
tf.test.main()
| 1.914063 | 2 |
imbedding/parameter.py | bjih1999/Uhzzuda_project | 0 | 12788918 | import pandas as pd
import numpy as np
texts = []
f = open('preprocess/jull_review.csv', 'r')
for line in f.readlines():
oneline = line.replace("\n", "").split(",")
oneline = list(filter(None, oneline))
texts.append(oneline)
print(len(texts))
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
documents = [TaggedDocument(doc, [i]) for i, doc in enumerate(texts)]
doc2vec_model = Doc2Vec(documents, vector_size=100, window=10, min_count=30, workers=4)
doc2vec_model.save('doc2vec_v=100_dm0.model')
doc2vec_model = Doc2Vec(documents, vector_size=100, window=10, min_count=30, workers=4)
doc2vec_model.save('doc2vec_v=100_dm1.model') | 2.484375 | 2 |
run.py | iTecAI/roomdash | 0 | 12788919 | import subprocess, json, sys, time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
with open('config.json', 'r') as f:
CONFIG = json.load(f)
proc = subprocess.Popen([sys.executable, 'server.py'], stdout=sys.stdout)
print('Waiting for server to start.')
time.sleep(4)
options = Options()
options.add_argument(f'--kiosk http://{CONFIG["host"]}:{str(CONFIG["port"])}')
driver = webdriver.Firefox(firefox_options=options)
driver.get(f'http://{CONFIG["host"]}:{str(CONFIG["port"])}')
driver.fullscreen_window()
proc.wait() | 2.390625 | 2 |
variantgrid/settings/env/_settings_template.py | SACGF/variantgrid | 5 | 12788920 | from variantgrid.settings.components.celery_settings import * # pylint: disable=wildcard-import, unused-wildcard-import
from variantgrid.settings.components.default_settings import * # pylint: disable=wildcard-import, unused-wildcard-import
from variantgrid.settings.components.seqauto_settings import * # pylint: disable=wildcard-import, unused-wildcard-import
# ANNOTATION_ENTREZ_EMAIL = '<EMAIL>'
WEB_HOSTNAME = 'yourdomain.com'
WEB_IP = '127.0.0.1'
ALLOWED_HOSTS = ["localhost", WEB_HOSTNAME, WEB_IP]
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# PEDIGREE_MADELINE2_COMMAND = "madeline2"
| 1.296875 | 1 |
two_sum/two_sum_test.py | kevinzen/learning | 0 | 12788921 | import unittest
from two_sum.solution import Solution
class MyTestCase(unittest.TestCase):
def test_two_sum(self):
s = Solution()
nums = [2,7,11,15]
target = 9
result = s.twoSum(nums, target)
self.assertEqual(result, [0,1])
nums = [-1,-2,-3,-4,-5]
target = -8
result = s.twoSum(nums, target)
self.assertEqual(result, [2,4])
def test_two_sum_two_pass_hash(self):
s = Solution()
nums = [2,7,11,15]
target = 9
result = s.twoSumTwoPassHash(nums, target)
self.assertEqual(result, [0,1])
nums = [-1,-2,-3,-4,-5]
target = -8
result = s.twoSumTwoPassHash(nums, target)
self.assertEqual(result, [2,4])
def test_two_sum_one_pass_hash(self):
s = Solution()
# nums = [2,7,11,15]
# target = 9
# result = s.twoSumOnePassHash(nums, target)
# self.assertEqual(result, [0,1])
#
#
# nums = [-1,-2,-3,-4,-5]
# target = -8
# result = s.twoSumOnePassHash(nums, target)
# self.assertEqual(result, [2,4])
#
nums = [3,3]
target = 6
result = s.twoSumOnePassHash(nums, target)
self.assertEqual(result, [0,1])
| 3.59375 | 4 |
spyder/plugins/editor/fallback/tests/conftest.py | seryj/spyder | 0 | 12788922 | # -*- coding: utf-8 -*-
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
import pytest
from spyder.plugins.editor.fallback.actor import FallbackActor
from spyder.plugins.editor.lsp.tests.conftest import qtbot_module
@pytest.fixture(scope='module')
def fallback(qtbot_module, request):
fallback = FallbackActor(None)
qtbot_module.addWidget(fallback)
with qtbot_module.waitSignal(fallback.sig_fallback_ready, timeout=30000):
fallback.start()
def teardown():
fallback.stop()
request.addfinalizer(teardown)
return fallback
| 2.109375 | 2 |
resnet.py | geodekid/resnet1 | 12 | 12788923 | <reponame>geodekid/resnet1
# You will learn how to build very deep convolutional networks, using Residual Networks (ResNets)
# In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by He et al., allow you to train much deeper networks than were previously practically feasible.
# Let's import packages
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
# Identity block
def identity_block(X, f, filters, stage, block):
"""
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F2, kernel_size=(f,f), strides = (1,1), padding='same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(filters = F3, kernel_size=(1,1), strides = (1,1), padding="valid", name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X,X_shortcut])
X = Activation('relu')(X)
return X
# Creating a TF graph and session
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
# The convolutional block
# GRADED FUNCTION: convolutional_block
def convolutional_block(X, f, filters, stage, block, s = 2):
"""
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(filters = F1, kernel_size= (1, 1), strides = (s,s),padding="valid", name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F2, kernel_size=(f,f), strides=(1,1), name = conv_name_base + '2b', padding="same",kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name= bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(filters = F3, kernel_size=(1,1), strides = (1,1), name= conv_name_base + '2c',padding="valid", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
##### SHORTCUT PATH ####
X_shortcut = Conv2D(filters = F3, kernel_size= (1,1), strides=(s,s), name=conv_name_base + '1', padding="valid", kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3, name=bn_name_base+'1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X_shortcut,X])
X = Activation("relu")(X)
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
# ResNet 50
def ResNet50(input_shape = (64, 64, 3), classes = 6):
"""
Implementation of the popular ResNet50 the following architecture:
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
Arguments:
input_shape -- shape of the images of the dataset
classes -- integer, number of classes
Returns:
model -- a Model() instance in Keras
"""
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
X = convolutional_block(X, f=3, filters = [128,128,512], stage = 3, block='a', s=2)
X = identity_block(X, 3, filters = [128,128,512],stage=3, block='b')
X = identity_block(X, 3, filters = [128,128,512], stage=3, block='c')
X = identity_block(X, 3, filters = [128,128,512], stage =3, block='d')
# Stage 4
X = convolutional_block(X, f=3, filters = [256,256,1024],stage=4, block='a', s=2)
X = identity_block(X, 3, filters = [256,256,1024], stage=4, block='b')
X = identity_block(X, 3, filters = [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, filters= [256,256,1024], stage=4, block='d')
X = identity_block(X, 3, filters=[256,256,1024], stage=4, block='e')
X = identity_block(X, 3, filters=[256,256,1024], stage=4, block='f')
# Stage 5
X = convolutional_block(X, f=3, filters=[256,256,2048], stage=5,block='a', s=3)
X = identity_block(X, 3, filters=[256,256,2048], stage=5, block='b')
X = identity_block(X,3, filters=[256,256,2048], stage=5, block='c')
# AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)"
X = AveragePooling2D((2,2), name='avg_pool')(X)
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
# Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running model.fit(...) below.
model = ResNet50(input_shape = (64, 64, 3), classes = 6)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# The model is ready to be trained. The only thing you need is a dataset.
# Use any dataset of your choice.
# You're on your own now, you now have the required tools to build ResNet. I am providing a sample of what it may look like from HERE
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
model.fit(X_train, Y_train, epochs = 2, batch_size = 32)
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
# Transfer Learning, you can use a pretrained ResNet 50 here, available online
model = load_model('ResNet50.h5')
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
#ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations.
# That's it . Congratulations on learning Residual Networks. Thank you for watching. | 3.421875 | 3 |
utilities.py | tdrmk/pyklotski | 0 | 12788924 | from pygame.draw import rect as draw_rect
def darken_color(color, factor):
return tuple(int(c * factor) for c in color)
def draw_piece(surf, color, left, top, width, height, size):
padding_factor = 0.025
shadow_factor = 0.085
margin_factor = 0.05
base_color = color
margin_color = darken_color(color, 0.8)
bottom_color = darken_color(color, 0.4)
# Applying padding
padding = int(size * padding_factor)
left, top = left + padding, top + padding
width, height = width - 2 * padding, height - 2 * padding
size = size - 2 * padding
# Applying shadow effect
shadow = int(size * shadow_factor)
top_rect = (left, top, width - shadow, height - shadow)
bottom_rect = (left + shadow, top + shadow, width - shadow, height - shadow)
draw_rect(surf, bottom_color, bottom_rect)
draw_rect(surf, base_color, top_rect)
# Draw margins
draw_rect(surf, margin_color, top_rect, int(size * margin_factor))
| 3.703125 | 4 |
{{cookiecutter.project_slug}}/src/{{cookiecutter.package_name}}/exceptions.py | pcrespov/cookiecutter-simcore-pyservice | 0 | 12788925 | <reponame>pcrespov/cookiecutter-simcore-pyservice<filename>{{cookiecutter.project_slug}}/src/{{cookiecutter.package_name}}/exceptions.py
""" All exceptions used in the {{ cookiecutter.package_name }} code base are defined here.
"""
class ServiceException(Exception):
"""
Base exception class. All service-specific exceptions should subclass
this class.
"""
| 1.421875 | 1 |
api/reports/migrations/0001_initial.py | Egor4ik325/rankrise | 0 | 12788926 | <gh_stars>0
# Generated by Django 3.2.9 on 2022-01-02 17:46
import uuid
import django.db.models.deletion
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="Report",
fields=[
(
"id",
model_utils.fields.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("title", models.CharField(max_length=100, verbose_name="title")),
("description", models.TextField(verbose_name="description")),
(
"object_pk",
models.CharField(max_length=100, verbose_name="object ID"),
),
(
"created",
models.DateTimeField(auto_now_add=True, verbose_name="Created"),
),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="reports",
to="contenttypes.contenttype",
verbose_name="content type",
),
),
(
"reporter",
models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="reports",
to=settings.AUTH_USER_MODEL,
verbose_name="Reporter",
),
),
],
options={
"verbose_name": "Report",
"verbose_name_plural": "Reports",
"ordering": ["created"],
},
),
]
| 1.820313 | 2 |
python/paddle/trainer_config_helpers/tests/configs/test_gated_unit_layer.py | shenchaohua/Paddle | 3 | 12788927 | <gh_stars>1-10
from paddle.trainer_config_helpers import *
data = data_layer(name='input', size=256)
glu = gated_unit_layer(
size=512,
input=data,
act=TanhActivation(),
gate_attr=ExtraLayerAttribute(error_clipping_threshold=100.0),
gate_param_attr=ParamAttr(initial_std=1e-4),
gate_bias_attr=ParamAttr(initial_std=1),
inproj_attr=ExtraLayerAttribute(error_clipping_threshold=100.0),
inproj_param_attr=ParamAttr(initial_std=1e-4),
inproj_bias_attr=ParamAttr(initial_std=1),
layer_attr=ExtraLayerAttribute(error_clipping_threshold=100.0))
outputs(glu)
| 2.03125 | 2 |
ModularER_2D/gym_rem2D/morph/circular_module.py | FrankVeenstra/gym_rem2D | 27 | 12788928 |
#!/usr/bin/env python
"""
Standard BOX 2D module with single joint
"""
import gym_rem2D.morph.module_utility as mu
from gym_rem.utils import Rot
from enum import Enum
import numpy as np
from Controller import m_controller
import random
import math
from gym_rem2D.morph import abstract_module
from gym_rem2D.morph import simple_module as sm
import Box2D as B2D
from Box2D.b2 import (edgeShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
class Connection(Enum):
"""Available connections for standard 2D module"""
left = (1.,0.,0.)
right = (-1.,0.,0.)
top = (0.,1.0,0.)
class Circular2D(abstract_module.Module):
"""Standard 2D module"""
def __init__(self, theta=0, size=(0.1,0.1, 0.0)):
self.theta = theta % 2 # double check
self.size = np.array(size)
assert self.size.shape == (3,), "Size must be a 3 element vector! : this is a 2D module but takes in a three dimensional size vector for now. Third entry is ignored"
self.connection_axis = np.array([0., 0., 1.])
self.orientation = Rot.from_axis(self.connection_axis,
-self.theta * (np.pi / 2.))
# NOTE: The fudge factor is to avoid colliding with the plane once
# spawned
self.position = np.array([0., self.size[2] / 2. + 0.002, 0.]) # uses only x and y
self._children = {}
self.controller = m_controller.Controller()
# relative scales
self.radius = 0.25
self.angle = math.pi/2
self.type = "CIRCLE"
self.MIN_RADIUS = 0.25
self.MAX_RADIUS = 0.5
self.MIN_ANGLE = math.pi/4
self.MAX_ANGLE = math.pi*2
self.torque = 50
#self.joint = None # needs joint
def limitWH(self):
"""Limit morphology to bounds"""
if self.radius > self.MAX_RADIUS:
self.radius = self.MAX_RADIUS
elif self.radius < self.MIN_RADIUS:
self.radius = self.MIN_RADIUS
if self.angle >self.MAX_ANGLE:
self.angle = self.MAX_ANGLE
elif self.angle < self.MIN_ANGLE:
self.angle = self.MIN_ANGLE
def mutate(self, MORPH_MUTATION_RATE,MUTATION_RATE,MUT_SIGMA):
"""
To mutate the shape and controller stored in the modules.
"""
#return
if random.uniform(0,1) < MORPH_MUTATION_RATE:
self.radius = random.gauss(self.radius, MUT_SIGMA)
if random.uniform(0,1) < MORPH_MUTATION_RATE:
self.angle = random.gauss(self.angle,MUT_SIGMA * math.pi)
self.limitWH()
if self.controller is not None:
self.controller.mutate(MUTATION_RATE,MUT_SIGMA, self.angle)
def setMorph(self,val1, val2, val3):
# values are between -1 and 1
self.radius = val1 + 1.5
# val2 is not used since radius
self.angle = self.MIN_ANGLE +(((val3 + 1.0)*0.5) * (self.MAX_ANGLE-self.MIN_ANGLE))
# limit values
self.limitWH()
def __setitem__(self, key, module):
if not isinstance(key, Connection):
raise TypeError("Key: '{}' is not a Connection type".format(key))
if key in self._children:
raise ModuleAttached()
if key not in self.available:
raise ConnectionObstructed()
# Add module as a child
self._children[key] = module
# Calculate connection point
direction = self.orientation.rotate(np.array(key.value))
position = self.position + (direction * self.size) / 2.
# Update parent pointer of module
module.update(self, position, direction)
def update(self, parent=None, pos=None, direction=None):
# Update own orientation first in case we have been previously
# connected
self.orientation = Rot.from_axis(self.connection_axis,
-self.theta * (np.pi / 2.))
# Update position in case parent is None
self.position = np.array([0., 0., self.size[2] / 2. + 0.002])
# Reset connection in case parent is None
self.connection = None
# Call super to update orientation
super().update(parent, pos, direction)
# If parent is not None we need to update position and connection point
if self.parent is not None:
# Update center position for self
# NOTE: We add a little fudge factor to avoid overlap
self.position = pos + (direction * self.size * 1.01) / 2.
# Calculate connection points for joint
conn = np.array([0., 0., -self.size[2] / 2.])
parent_conn = parent.orientation.T.rotate(pos - parent.position)
self.connection = (parent_conn, conn)
# Update potential children
self.update_children()
def update_children(self):
for conn in self._children:
direction = self.orientation.rotate(np.array(conn.value))
position = self.position + (direction * self.size) / 2.
self._children[conn].update(self, position, direction)
def spawn(self):
orient = self.orientation.as_quat()
cuid = B2D.b2CircleShape
cuid.m_p.Set(self.position)
if (self.parent):
self.joint = B2D.b2RevoluteJoint()
return cuid
def get_global_position_of_connection_site(self,con=None, parent_component = None):
if con is None:
con = Connection.left # get intersection of rectangle from width and height
local_position = [] # 2d array
local_angle = (con.value[0] * (self.angle)) # positive for left, negative for right
# position relative to y directional vector
if parent_component:
local_angle+=parent_component.angle
x = math.cos(local_angle+ math.pi/2)*self.radius
y = math.sin(local_angle+ math.pi/2)*self.radius
local_position.append(x)
local_position.append(y)
if parent_component is None:
return local_position,local_angle
global_position = [local_position[0]+parent_component.position[0],
local_position[1]+parent_component.position[1]]
return global_position, local_angle
def create(self,world,TERRAIN_HEIGHT,module=None,node=None,connection_site=None, p_c=None, module_list=None, position = None):
# get module height and width
if p_c is not None and connection_site is None:
raise("When you want to attach a new component to a parent component, you have to supply",
"a connection_site object with it. This connection_site object defines where to anchor",
"the joint in between to components")
n_radius = self.radius
angle = 0
pos = [7,10,0];
if position is not None:
pos = position
if (p_c is not None):
local_pos_x =math.cos(connection_site.orientation.x+ math.pi/2) * n_radius
local_pos_y =math.sin(connection_site.orientation.x+ math.pi/2) * n_radius
pos[0] = (local_pos_x) + connection_site.position.x
pos[1] = (local_pos_y) + connection_site.position.y
# This module will create one component that will be temporarily stored in ncomponent
new_component = None
# This module will create one joint (if a parent component is present) that will be temporarily stored in njoint
njoint = None
components = []
joints = []
if connection_site:
angle += connection_site.orientation.x
if (pos[1] - n_radius < TERRAIN_HEIGHT): #TODO CHANGE TO TERRAIN_HEIGT OR DO CHECK ELSEWHERE
if node is not None:
node.component = None
return components,joints
else:
fixture = fixtureDef(
shape=B2D.b2CircleShape(radius =n_radius),
density=1,
friction=0.1,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001
)
new_component = world.CreateDynamicBody(
position=(pos[0],pos[1]),
angle = angle,
fixtures = fixture)
color = [255,255,255]
if node is not None and module_list is not None:
color = world.cmap(node.type/len(module_list))
elif node is not None and module_list is None:
print("Note: cannot assign a color to the module since the 'module_list' is not passed as an argument")
# move to component creator
new_component.color1 = (color[0],color[1],color[2])
new_component.color2 = (color[0],color[1],color[2])
components.append(new_component)
if node is not None:
node.component = [new_component]
if connection_site is not None:
joint = mu.create_joint(world, p_c,new_component,connection_site, angle, self.torque)
joints.append(joint)
return components, joints
| 3.109375 | 3 |
level_05.py | katsukaree/chapter-weasel | 0 | 12788929 | #!/usr/bin/env python3
import requests
import base64
import re
from levels_credentials import credentials
level_url = credentials[5]["url"]
level_username = credentials[5]["level"]
level_password = credentials[5]["password"]
next_level_url = credentials[6]["url"]
next_level_username = credentials[6]["level"]
credentials = "%s:%s" % (level_username, level_password)
auth_creds = base64.b64encode(credentials.encode("ascii"))
heads = {"Authorization": "Basic %s" % auth_creds.decode("ascii"), "Referer": next_level_url}
cooks = {"loggedin": "1"}
response = requests.get(level_url, headers=heads, cookies=cooks)
data = response.text
strings = re.split('\n|:|\s|<|>', data)
next_password = strings[strings.index(next_level_username) + 2]
print(next_password)
| 2.71875 | 3 |
python/testData/refactoring/rename/referencesInsideFStringsNotReportedAsStringOccurrences.py | tgodzik/intellij-community | 2 | 12788930 | <filename>python/testData/refactoring/rename/referencesInsideFStringsNotReportedAsStringOccurrences.py
def func():
v<caret>ar = 42
s = f'{var}' | 1.46875 | 1 |
libs/modules/trader.py | meetri/cryptolib | 0 | 12788931 | <reponame>meetri/cryptolib
import os,sys,talib,numpy,math,time,datetime
from influxdbwrapper import InfluxDbWrapper
from coincalc import CoinCalc
from exchange import Exchange
class Trader(object):
def __init__(self, market = None, exchange=None, currency=None):
self.influxdb = InfluxDbWrapper.getInstance()
self.market = market
if exchange is None:
exchange = "bittrex"
self.exchange = exchange
self.cs = None
self.indicators = None
self.timeframe = None
self.cssize = None
self.candle_seconds = 0
self.candle_remaining = 0
self.candle_last_time = None
if currency is not None:
self.market = CoinCalc.getInstance().get_market(currency)
def set_currency(self,currency):
self.market = CoinCalc.getInstance().get_market(currency)
return self
def project_volume( volkey = "basevolume" ):
size = self.cssize
m = 1
if size[-1] == "m":
m = 60
elif size[-1] == "h":
m = 3600
elif size[-1] == "d":
m = 86400
sec_ofs = float(size[0:-1]) * m
ts = time.time() % sec_ofs
remaining = sec_ofs - ts
rem = sec_ofs / ( sec_ofs - remaining)
return cs[volkey][-1] * rem
def getCandleRemaining(self):
rem = None
if self.candle_last_time is not None:
ts = time.time() - self.candle_last_time
if ts < self.candle_remaining:
return self.candle_remaining - ts
return rem
def get_candlesticks(self, timeframe = "1h", size = "1m", dateOffset = "now()" , base_size="1m"):
self.timeframe = timeframe
self.cssize = size
m = 1
if size[-1] == "m":
m = 60
elif size[-1] == "h":
m = 3600
elif size[-1] == "d":
m = 86400
sec_ofs = float(size[0:-1]) * m
ts = time.time() % sec_ofs
if len(base_size) > 0:
dateOffset = (datetime.datetime.utcnow() - datetime.timedelta(seconds=ts) + datetime.timedelta(seconds=sec_ofs)).strftime('%Y-%m-%dT%H:%M:%SZ')
pres = self.influxdb.raw_query("""SELECT SUM(base_volume) AS base_volume, SUM(volume) AS volume, MAX(high) as high, MIN(low) as low, FIRST(open) as open, LAST(close) AS close FROM "market_ohlc" WHERE market='{0}' AND exchange='{5}' AND time < '{1}' AND time > '{1}' - {2} AND period='{4}' GROUP BY time({3})""".format(self.market,dateOffset,timeframe,size,base_size,self.exchange))
points = pres.get_points()
else:
points = self.influxdb.raw_query("""select base_volume, volume, open, high, low, close FROM "market_ohlc" WHERE market='{0}' AND exchange='{4}' AND time < {1} AND time > {1} - {2} AND period='{3}'""".format(self.market,dateOffset,timeframe,size,self.exchange)).get_points()
cs = self.clear_candlesticks()
psize = 0
for point in points:
if point["volume"] == None:
continue
#point["volume"] = 0
#if point["base_volume"] == None:
# point["base_volume"] = 0
psize += 1
cs["low"].extend([point["low"]])
cs["high"].extend([point["high"]])
cs["closed"].extend([point["close"]])
cs["open"].extend([point["open"]])
cs["volume"].extend([float(point["volume"])])
cs["basevolume"].extend([float(point["base_volume"])])
cs["time"].extend([point["time"]])
self.candle_remaining = sec_ofs - ts
self.candle_seconds = sec_ofs
self.candle_last_time = time.time()
if psize == 0:
raise Exception("no market data for {} at {}".format(self.market,dateOffset))
self.cs = {
"low": numpy.array(cs["low"]),
"high": numpy.array(cs["high"]),
"closed": numpy.array(cs["closed"]),
"volume": numpy.array(cs["volume"]),
"basevolume": numpy.array(cs["basevolume"]),
"open": numpy.array(cs["open"]),
"time": cs["time"],
"remaining": numpy.array(cs["remaining"]),
"projected_volume": numpy.array(cs["projected_volume"]),
"projected_basevolume": numpy.array(cs["projected_basevolume"]),
}
Exchange.getInstance().set_market_value(self.market, self.cs["closed"][-1] )
return self.cs
def x_get_candlesticks(self, timeframe = "1h", size = "5m", dateOffset = "now()" ):
self.timeframe = timeframe
self.cssize = size
points = self.influxdb.raw_query("""select LAST(basevolume) as basevolume, LAST(volume) as volume, FIRST(last) as open, LAST(last) as closed, MAX(last) as high, MIN(last) as low FROM "market_summary" WHERE marketname='{0}' and time < {1} and time > {1} - {2} group by time({3})""".format(self.market,dateOffset,timeframe,size)).get_points()
cs = self.clear_candlesticks()
psize = 0
for point in points:
psize += 1
cs["low"].extend([point["low"]])
cs["high"].extend([point["high"]])
cs["closed"].extend([point["closed"]])
cs["open"].extend([point["open"]])
cs["volume"].extend([point["volume"]])
cs["basevolume"].extend([point["basevolume"]])
cs["time"].extend([point["time"]])
if psize == 0:
raise Exception("no market data for {} at {}".format(self.market,dateOffset))
def fix_gaps(lst):
for idx,val in enumerate(lst):
if val == None:
if idx > 0:
lst[idx] = lst[idx-1]
if idx == 0:
lst[idx] = 0
fix_gaps(cs["low"])
fix_gaps(cs["high"])
fix_gaps(cs["closed"])
fix_gaps(cs["open"])
fix_gaps(cs["volume"])
fix_gaps(cs["basevolume"])
fix_gaps(cs["time"])
self.cs = {
"low": numpy.array(cs["low"]),
"high": numpy.array(cs["high"]),
"closed": numpy.array(cs["closed"]),
"volume": numpy.array(cs["volume"]),
"basevolume": numpy.array(cs["basevolume"]),
"open": numpy.array(cs["open"]),
"time": cs["time"]
}
Exchange.getInstance().set_market_value(self.market, self.cs["closed"][-1] )
return self.cs
def xget_candlesticks(self, timeframe = "1h", size = "5m" ):
self.timeframe = timeframe
self.cssize = size
points = self.influxdb.raw_query("""select FIRST(last) as open, LAST(last) as closed, MAX(last) as high, MIN(last) as low, (LAST(basevolume)+LAST(volume)) as volume FROM "market_summary" WHERE marketname='{}' and time > now() - {} group by time({})""".format(self.market,timeframe,size)).get_points()
cs = self.clear_candlesticks()
for point in points:
cs["low"].extend([point["low"]])
cs["high"].extend([point["high"]])
cs["closed"].extend([point["closed"]])
cs["open"].extend([point["open"]])
cs["volume"].extend([point["volume"]])
cs["basevolume"].extend([point["basevolume"]])
cs["time"].extend([point["time"]])
def fix_gaps(lst):
for idx,val in enumerate(lst):
if val == None:
if idx > 0:
lst[idx] = lst[idx-1]
if idx == 0:
lst[idx] = 0
fix_gaps(cs["low"])
fix_gaps(cs["high"])
fix_gaps(cs["closed"])
fix_gaps(cs["open"])
fix_gaps(cs["volume"])
fix_gaps(cs["basevolume"])
fix_gaps(cs["time"])
self.cs = {
"low": numpy.array(cs["low"]),
"high": numpy.array(cs["high"]),
"closed": numpy.array(cs["closed"]),
"volume": numpy.array(cs["volume"]),
"basevolume": numpy.array(cs["basevolume"]),
"open": numpy.array(cs["open"]),
"time": cs["time"]
}
Exchange.getInstance().set_market_value(self.market, self.cs["closed"][-1] )
return self.cs
def clear_candlesticks(self):
return { "open": [], "closed": [], "high": [], "low": [], "volume": [], "basevolume": [], "time":[], "opening":[],"closing":[],"remaining":[],"projected_volume":[],"projected_basevolume":[] }
| 2.53125 | 3 |
__init__.py | merialdo/research.mojito | 0 | 12788932 | <gh_stars>0
from mojito.mojito import Mojito
from mojito.chart import chart
| 1.140625 | 1 |
conf/__init__.py | FredrikM97/Medical-ROI | 0 | 12788933 | <filename>conf/__init__.py
"""This package includes all the modules related to data loading and preprocessing.
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
"""
import os
import json
from utils import load_configs
"""
def load_configs():
configs = {}
for pos_json in os.listdir('.'):
if pos_json.endswith('.json'):
with open('configs/' +pos_json) as json_file:
for name, config in json.load(json_file).items():
if name in configs:
raise Exception(f"Config from {pos_json} with name {name} already exists!")
configs.update({name:config})
return configs
"""
def load_config(name):
return load_configs('configs/')[name] | 2.71875 | 3 |
sharpy-sc2/sharpy/plans/require/gas.py | etzhang416/sharpy-bot-eco | 0 | 12788934 | import warnings
import sc2
from sharpy.plans.require.require_base import RequireBase
class Gas(RequireBase):
"""Require that a specific number of minerals are "in the bank"."""
def __init__(self, vespene_requirement: int):
assert vespene_requirement is not None and isinstance(vespene_requirement, int)
super().__init__()
self.vespene_requirement = vespene_requirement
def check(self) -> bool:
if self.ai.vespene > self.vespene_requirement:
return True
return False
class RequiredGas(Gas):
def __init__(self, vespene_requirement: int):
warnings.warn("'RequiredGas' is deprecated, use 'Gas' instead", DeprecationWarning, 2)
super().__init__(vespene_requirement)
| 2.84375 | 3 |
src/__init__.py | bkatwal/distributed-kafka-consumer-python | 2 | 12788935 | import logging
import os
# set the default logging level to info
logging.basicConfig(level=logging.INFO)
ROOT_SRC_DIR = os.path.dirname(os.path.abspath(__file__))
USERNAME = os.environ.get('APP_USERNAME', 'admin')
PASSWORD = os.environ.get('APP_PASSWORD', '<PASSWORD>')
WORKER_NUM_CPUS = os.environ.get('WORKER_NUM_CPUS', .25)
SASL_USERNAME = os.environ.get('SASL_USERNAME', None)
SASL_PASSWORD = os.environ.get('SASL_PASSWORD', None)
SECURITY_PROTOCOL = os.environ.get('SECURITY_PROTOCOL', 'PLAINTEXT')
SASL_MECHANISM = os.environ.get('SASL_MECHANISM')
WORKER_CONFIG_PATH = os.environ.get('WORKER_CONFIG_PATH', '/../config/consumer_config.json')
RAY_HEAD_ADDRESS = os.environ.get('RAY_HEAD_ADDRESS', 'auto')
LOCAL_MODE = os.environ.get('LOCAL_MODE', 'Y')
| 1.890625 | 2 |
ddweb/apps/images/views.py | neic/ddweb | 0 | 12788936 | <gh_stars>0
import os
from django.contrib.auth.decorators import permission_required
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.views.decorators.http import require_POST
from jfu.http import upload_receive, UploadResponse, JFUResponse
from ddweb.apps.images.models import Image
@permission_required("image.add_image")
def uploadForm(request, content_type, object_id):
ct = ContentType.objects.get(model=content_type)
associatedObject = ct.get_object_for_this_type(pk=object_id)
context = {
"associatedObject": associatedObject,
"content_type": content_type,
"object_id": object_id,
}
return render(request, "upload.html", context)
@require_POST
@permission_required("image.add_image", raise_exception=True)
def upload(request):
# The assumption here is that jQuery File Upload
# has been configured to send files one at a time.
# If multiple files can be uploaded simulatenously,
# 'file' may be a list of files.
image = upload_receive(request)
content_type = ContentType.objects.get(model=request.POST["content_type"])
object_id = request.POST["object_id"]
instance = Image(image=image, content_type=content_type, object_id=object_id)
instance.save()
basename = os.path.basename(instance.image.path)
file_dict = {
"name": basename,
"size": image.size,
"url": instance.image.url,
"deleteUrl": reverse("jfu_delete", kwargs={"pk": instance.pk}),
"deleteType": "POST",
}
return UploadResponse(request, file_dict)
@require_POST
@permission_required("image.delete_image", raise_exception=True)
def upload_delete(request, pk):
success = True
try:
instance = Image.objects.get(pk=pk)
os.unlink(instance.image.path)
instance.delete()
except Image.DoesNotExist:
success = False
return JFUResponse(request, success)
| 2.125 | 2 |
mmdet/det_core/utils/mAP_utils.py | Karybdis/mmdetection-mini | 834 | 12788937 | import numpy as np
from multiprocessing import Pool
from ..bbox import bbox_overlaps
# https://zhuanlan.zhihu.com/p/34655990
def calc_PR_curve(pred, label):
pos = label[label == 1] # 正样本
threshold = np.sort(pred)[::-1] # pred是每个样本的正例预测概率值,逆序
label = label[pred.argsort()[::-1]]
precision = []
recall = []
tp = 0
fp = 0
ap = 0 # 平均精度
for i in range(len(threshold)):
if label[i] == 1:
tp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
# 近似曲线下面积
ap += (recall[i] - recall[i - 1]) * precision[i]
else:
fp += 1
recall.append(tp / len(pos))
precision.append(tp / (tp + fp))
return precision, recall, ap
def tpfp_voc(det_bboxes, gt_bboxes, iou_thr=0.5):
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
# tp和fp都是针对预测个数而言,不是gt个数
tp = np.zeros(num_dets, dtype=np.float32)
fp = np.zeros(num_dets, dtype=np.float32)
# 如果gt=0,那么所有预测框都算误报,所有预测bbox位置的fp都设置为1
if gt_bboxes.shape[0] == 0:
fp[...] = 1
return tp, fp
if num_dets == 0:
return tp, fp
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes).numpy()
# print(ious)
# 对于每个预测框,找到最匹配的gt iou
ious_max = ious.max(axis=1)
# 对于每个预测框,找到最匹配gt的索引
ious_argmax = ious.argmax(axis=1)
# 按照预测概率分支降序排列
sort_inds = np.argsort(-det_bboxes[:, -1])
gt_covered = np.zeros(num_gts, dtype=bool)
# 多对一情况下,除了概率分值最大且大于阈值的预测框算tp外,其他框全部算fp
for i in sort_inds:
# 如果大于iou,则表示匹配
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
# 每个gt bbox只匹配一次,且是和预测概率最大的匹配,不是按照iou
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[i] = 1
else:
fp[i] = 1
else:
fp[i] = 1
return tp, fp
def _average_precision(recalls, precisions, mode='voc2007'):
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'voc2012': # 平滑后就是标准的PR曲线算法
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
# 写法比较高级,高效
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) # 每段区间内,精度都是取最大值,也就是水平线
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] # 找到召回率转折点,表示x轴移动区间索引
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) # 每段面积和
elif mode == 'voc2007': # 11点法,需要平平滑处理
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
return ap
# code ref from mmdetection
def voc_eval_map(results, annotations, iou_thr=0.5, name='voc2007', nproc=4):
"""
:param results: list[list],外层list是指代图片编号,内层list是指代类别编号,
假设一共20个类,则内层list长度为20,每个List内部是numpy矩阵,nx5表示每张图片对应的每个类别的检测bbox,xyxyconf格式
:param annotations:和results一样
:param iou_thr: 是否算TP的阈值,voc默认是0.5
:param name: 采用哪一种评估指标,voc2007是11点,voc2012是标准pr曲线计算
:return:
"""
assert len(results) == len(annotations)
num_imgs = len(results) # 图片个数
num_classes = len(results[0]) # positive class num
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
cls_dets = [img_res[i] for img_res in results]
cls_gts = [img_res[i] for img_res in annotations]
tpfp = pool.starmap(
tpfp_voc,
zip(cls_dets, cls_gts, [iou_thr for _ in range(num_imgs)]))
# 得到每个预测bbox的tp和fp情况
tp, fp = tuple(zip(*tpfp))
# 统计gt bbox数目
num_gts = 0
for j, bbox in enumerate(cls_gts):
num_gts += bbox.shape[0]
# 合并所有图片所有预测bbox
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0] # 检测bbox个数
# 以上计算出了每个预测bbox的tp和fp情况
# 此处计算精度和召回率,写的比较高级
sort_inds = np.argsort(-cls_dets[:, -1]) # 按照预测概率分值降序排列
# 仔细思考这种写法,其实是c3_pr_roc.py里面calc_PR_curve的高级快速写法
tp = np.hstack(tp)[sort_inds][None]
fp = np.hstack(fp)[sort_inds][None]
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts, eps)
precisions = tp / np.maximum((tp + fp), eps)
recalls = recalls[0, :]
precisions = precisions[0, :]
# print('recalls', recalls, 'precisions', precisions)
ap = _average_precision(recalls, precisions, name)[0]
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
pool.close()
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
return mean_ap
| 2.140625 | 2 |
azad/utils.py | CoAxLab/azad | 6 | 12788938 | import cloudpickle
import numpy as np
import torch
from typing import List, Tuple
from scipy.linalg import eigh
def save_checkpoint(state, filename='checkpoint.pkl'):
data = cloudpickle.dumps(state)
with open(filename, 'wb') as fi:
fi.write(data)
def load_checkpoint(filename='checkpoint.pkl'):
with open(filename, 'rb') as fi:
return cloudpickle.load(fi)
| 2.34375 | 2 |
python/chapter2/sshcmd.py | xiaostar2016/KaliTest | 1 | 12788939 | <gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
import threading
import paramiko
import subprocess
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
# paramiko支持用密钥认证代,实际环境推荐使用密钥认证,这里设置账号和密码认证
# client.load_host_keys('/home/justin/.ssh/known_hosts')
# 设置自动添加和保存目标SSH服务器的SSH密钥
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, username=user, password=<PASSWORD>) # 连接
ssh_session = client.get_transport().open_session() # 打开会话
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024)
return
# 调用函数,以用户pi机器密码连接自己的树莓派,并执行id这个命令
ssh_command('192.168.3.11', 'pi', 'raspberry', 'id')
| 2.515625 | 3 |
Interview/evenNumberList.py | dnootana/Python | 1 | 12788940 | <reponame>dnootana/Python
# Enter your code here. Read input from STDIN. Print output to STDOUT
def evenlist():
list = input("Enter numbers : ")
L = list.split()
E = []
for i in range(len(L)):
num = int(L[i],10)
if(num%2==0):
E.append(L[i])
print("Even numbers are : ",E)
| 3.734375 | 4 |
impstall/core.py | ryanniehaus/impstall | 2 | 12788941 | <reponame>ryanniehaus/impstall
#!/usr/bin/env python
'''
This module can be used to import python packages and install them if not already installed.
'''
import os
import sys
import subprocess
import tempfile
import urllib
_pipSetupUrl = 'https://bootstrap.pypa.io/get-pip.py'
PIP_OPTIONS=[]
INSTALL_PIP_OPTIONS=[]
PYTHON_EXE_PATH=sys.executable
HTTP_PROXY=None
HTTPS_PROXY=None
def _updateModVarsFromEnv():
if os.environ.get('PYTHON_EXE_PATH') is not None:
global PYTHON_EXE_PATH
PYTHON_EXE_PATH=os.environ['PYTHON_EXE_PATH']
if os.environ.get('INSTALL_PIP_OPTIONS') is not None:
global INSTALL_PIP_OPTIONS
INSTALL_PIP_OPTIONS=os.environ['INSTALL_PIP_OPTIONS']
if os.environ.get('PIP_OPTIONS') is not None:
global PIP_OPTIONS
PIP_OPTIONS=os.environ['PIP_OPTIONS']
global HTTPS_PROXY
if os.environ.get('https_proxy') is not None and os.environ['https_proxy']!='':
HTTPS_PROXY=os.environ['https_proxy']
elif os.environ.get('HTTPS_PROXY') is not None and os.environ['HTTPS_PROXY']!='':
HTTPS_PROXY=os.environ['HTTPS_PROXY']
if HTTPS_PROXY is not None:
if os.environ.get('https_proxy') is None:
os.environ['https_proxy']=HTTPS_PROXY
if os.environ.get('HTTPS_PROXY') is None:
os.environ['HTTPS_PROXY']=HTTPS_PROXY
global HTTP_PROXY
if os.environ.get('http_proxy') is not None and os.environ['http_proxy']!='':
HTTP_PROXY=os.environ['http_proxy']
elif os.environ.get('HTTP_PROXY') is not None and os.environ['HTTP_PROXY']!='':
HTTP_PROXY=os.environ['HTTP_PROXY']
if HTTP_PROXY is not None:
if os.environ.get('http_proxy') is None:
os.environ['http_proxy']=HTTP_PROXY
if os.environ.get('HTTP_PROXY') is None:
os.environ['HTTP_PROXY']=HTTP_PROXY
def _installWithPip(pipName, pythonExePath=eval('PYTHON_EXE_PATH'), getPipOpts=eval('INSTALL_PIP_OPTIONS'), pipOpts=eval('PIP_OPTIONS')):
'''
:param pipName:
:return:
'''
pipAvail = False
try:
import pip as pip
pipAvail = True
except ImportError:
pass
proxyArgs = []
if HTTP_PROXY is not None:
proxyArgs.append('--proxy='+HTTP_PROXY)
elif HTTPS_PROXY is not None:
proxyArgs.append('--proxy='+HTTPS_PROXY)
if not pipAvail:
print 'Downloading pip installer:', _pipSetupUrl
tmpDir = tempfile.gettempdir()
pipSetupFilePath = os.path.join(tmpDir, os.path.basename(_pipSetupUrl))
urllib.urlretrieve(_pipSetupUrl, pipSetupFilePath)
pipSetupArgs = [pythonExePath, pipSetupFilePath]
pipSetupArgs.extend(proxyArgs)
pipSetupArgs.extend(getPipOpts)
print 'Executing pip installer:', ' '.join(pipSetupArgs)
subprocess.Popen(pipSetupArgs)
pipAvail = False
try:
import pip as pip
pipAvail = True
except ImportError:
pass
if pipAvail:
pipArgs=proxyArgs
pipArgs.extend(pipOpts)
pipArgs.extend(['install', pipName])
print 'Installing', pipName + ':', 'pip', ' '.join(pipArgs)
pip.main(pipArgs)
else:
print 'Pip not available...'
#Look at pypi repo for installers
#Download and use installer if available
def set_pip_options(pipOptions=[]):
global PIP_OPTIONS
PIP_OPTIONS=pipOptions
def get_pip_options():
return PIP_OPTIONS
def set_pip_installer_options(pipInstallerOptions=[]):
global INSTALL_PIP_OPTIONS
INSTALL_PIP_OPTIONS=pipInstallerOptions
def get_pip_installer_options():
return INSTALL_PIP_OPTIONS
def set_custom_python_exe_path(pythonExePath=sys.executable):
global PYTHON_EXE_PATH
PYTHON_EXE_PATH=pythonExePath
def get_current_python_exe_path():
return PYTHON_EXE_PATH
def set_http_proxy(httpProxy=None):
global HTTP_PROXY
HTTP_PROXY=httpProxy
def get_http_proxy():
return HTTP_PROXY
def set_https_proxy(httpsProxy=None):
global HTTPS_PROXY
HTTPS_PROXY=httpsProxy
def get_https_proxy():
return HTTPS_PROXY
def impstall(module, items={}, pipPackage=None):
'''
This is the main function of the module. It will import `importName` if it can. If not, it will try to install it.
First, it tries to import the module. If pip is not installed, it tries to install pip. If that fails, it tries to install from pip.
If the pip install fails or the module fails to install from pip, we try to find a module installer on the internet.
If that fails, an exception is raised.
:param module: str
This is the name of the module that we want to import or import from. It should be the name that would be used in a standard import statement.
:param pipPackage: str, optional
This is the name of the module as it would be requested through pip. If not provided, it is set to `module`
:return: N/A
'''
baseModule=module.split('.')[0]
packageAlreadyInstalled = False
try:
exec('import '+baseModule)
packageAlreadyInstalled = True
except ImportError:
pass
if not packageAlreadyInstalled:
if pipPackage is None:
pipPackage = baseModule
_updateModVarsFromEnv()
_installWithPip(pipPackage)
if len(items) == 0:
builtImportString = 'import ' + module
else:
builtImportString = 'from ' + module + ' import '
tempIdx = 0
for key in items:
if tempIdx > 0:
builtImportString += ', '
builtImportString += key
if items[key] is not None and items[key] != '':
builtImportString += ' as ' + items[key]
tempIdx += 1
exec (builtImportString, sys._getframe(1).f_globals)
| 2.140625 | 2 |
protopigeon/__init__.py | gregorynicholas/proto-pigeon | 0 | 12788942 | <filename>protopigeon/__init__.py<gh_stars>0
from protorpc.messages import *
from protorpc.protojson import *
from .translators import *
| 1.140625 | 1 |
src/tests/test_inference.py | Islandora-Image-Segmentation/Newspaper-Navigator-API | 0 | 12788943 | <filename>src/tests/test_inference.py
import os
from PIL import Image
from inference import predict
from . import TEST_ASSETS_DIR
def test_inference_one():
""" Test for the segmentation ML model.
This test requires the model weights `model_final.pth` to be present in src/resources.
"""
img = Image.open(os.path.join(TEST_ASSETS_DIR, "test_image_one.png"))
result = predict(img)
assert len(result.bounding_boxes) > 0
def test_inference_two():
""" Test for the segmentation ML model.
This test requires the model weights `model_final.pth` to be present in src/resources.
"""
img = Image.open(os.path.join(TEST_ASSETS_DIR, "test_image_two.png"))
result = predict(img)
assert len(result.bounding_boxes) > 0
| 2.5 | 2 |
Python learnings/Django projects/advcbv/basic_app/admin.py | warpalatino/public | 1 | 12788944 | from django.contrib import admin
from .models import School,Student
# Register your models here.
admin.site.register(School)
admin.site.register(Student)
| 1.46875 | 1 |
mp_gui/layout/__init__.py | kerryeon/mp_python | 1 | 12788945 | <filename>mp_gui/layout/__init__.py
__all__ = ['MpGui']
from mp_gui.layout.main import MpGuiLinker as MpGui
| 1.25 | 1 |
exps/Baseline-Complement/models/model.py | Championchess/Generative-3D-Part-Assembly | 80 | 12788946 | """
B-Complement
Input:
part point clouds: B x P x N x 3
Output:
R and T: B x P x(3 + 4)
Losses:
Center L2 Loss, Rotation L2 Loss, Rotation Chamder-Distance Loss
"""
import torch
from torch import nn
import torch.nn.functional as F
import sys, os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
from cd.chamfer import chamfer_distance
from quaternion import qrot
import ipdb
from scipy.optimize import linear_sum_assignment
# PointNet Front-end
class PartPointNet(nn.Module):
def __init__(self, feat_len):
super(PartPointNet, self).__init__()
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 64, 1)
self.conv3 = nn.Conv1d(64, 64, 1)
self.conv4 = nn.Conv1d(64, 128, 1)
#self.conv5 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
#self.bn5 = nn.BatchNorm1d(1024)
self.mlp1 = nn.Linear(128, feat_len)
self.bn6 = nn.BatchNorm1d(feat_len)
"""
Input: B x N x 3
Output: B x F
"""
def forward(self, x):
x = x.permute(0, 2, 1)
x = torch.relu(self.bn1(self.conv1(x)))
x = torch.relu(self.bn2(self.conv2(x)))
x = torch.relu(self.bn3(self.conv3(x)))
x = torch.relu(self.bn4(self.conv4(x)))
#x = torch.relu(self.bn5(self.conv5(x)))
x = x.max(dim=-1)[0]
x = torch.relu(self.bn6(self.mlp1(x)))
return x
# PointNet Back-end
class PoseDecoder(nn.Module):
def __init__(self, feat_len):
super(PoseDecoder, self).__init__()
self.mlp1 = nn.Linear(feat_len, 512)
self.mlp2 = nn.Linear(512, 256)
self.trans = nn.Linear(256, 3)
self.quat = nn.Linear(256, 4)
self.quat.bias.data.zero_()
"""
Input: B x (2F + P + 16)
Output: B x 7
"""
def forward(self, feat):
feat = torch.relu(self.mlp1(feat))
feat = torch.relu(self.mlp2(feat))
trans = torch.tanh(self.trans(feat)) # consider to remove torch.tanh if not using PartNet normalization
quat_bias = feat.new_tensor([[[1.0, 0.0, 0.0, 0.0]]])
quat = self.quat(feat).add(quat_bias)
quat = quat / (1e-12 + quat.pow(2).sum(dim=-1, keepdim=True)).sqrt()
out = torch.cat([trans, quat.squeeze(0)], dim=-1)
return out
class Network(nn.Module):
def __init__(self, conf):
super(Network, self).__init__()
self.conf = conf
self.part_pointnet = PartPointNet(conf.feat_len)
self.pose_decoder = PoseDecoder(2 * conf.feat_len + conf.max_num_part + 16)
"""
Input: B x P x N x 3, B x P, B x P x P, B x 7
Output: B x P x (3 + 4)
"""
def forward(self,seq, part_pcs, part_valids, instance_label, gt_part_pose):
batch_size = part_pcs.shape[0]
num_part = part_pcs.shape[1]
num_point = part_pcs.shape[2]
pred_part_poses = np.zeros((batch_size, num_part, 7))
pred_part_poses = torch.tensor(pred_part_poses).to(self.conf.device)
# generate random_noise
random_noise = np.random.normal(loc=0.0, scale=1.0, size=[batch_size, num_part, 16]).astype(
np.float32) # B x P x 16
random_noise = torch.tensor(random_noise).to(self.conf.device)
for iter in range(num_part):
select_ind = seq[:,iter].int().tolist()
batch_ind = [i for i in range(len(select_ind))]
if iter == 0:
cur_pred_pose = gt_part_pose # B x 7
pred_part_poses= pred_part_poses.float()
pred_part_poses[batch_ind,select_ind,:] = cur_pred_pose
cur_pred_center = cur_pred_pose[:, :3].unsqueeze(1).repeat(1, num_point, 1) # B x N x 3
cur_pred_qrot = cur_pred_pose[:, 3:].unsqueeze(1).repeat(1, num_point, 1) # B x N x 4
cur_part = cur_pred_center + qrot(cur_pred_qrot, part_pcs[batch_ind,select_ind, :, :])# B x N x 3
cur_part = cur_part.unsqueeze(1) # B x 1 x N x 3
cur_shape = cur_part # B x batch_ind,select_ind x N x 3
else:
cur_shape_feat = self.part_pointnet(cur_shape.view(batch_size, -1, 3)) # B x F
cur_part_feat = self.part_pointnet(part_pcs[batch_ind,select_ind, :, :])# B x F
cat_feat = torch.cat([cur_shape_feat, cur_part_feat, instance_label[batch_ind,select_ind, :].contiguous(), random_noise[batch_ind,select_ind, :].contiguous()], dim=-1) # B x (2F + P + 16)
cur_pred_pose = self.pose_decoder(cat_feat) # B x 7
pred_part_poses[batch_ind,select_ind, :] = cur_pred_pose
cur_pred_center = cur_pred_pose[:, :3].unsqueeze(1).repeat(1, num_point, 1) # B x N x 3
cur_pred_qrot = cur_pred_pose[:, 3:].unsqueeze(1).repeat(1, num_point, 1) # B x N x 4
cur_part = cur_pred_center + qrot(cur_pred_qrot, part_pcs[batch_ind,select_ind, :, :]) # B x N x 3
cur_part = cur_part.unsqueeze(1) # B x 1 x N x 3
cur_shape = torch.cat([cur_shape, cur_part], dim=1) # B x select_ind x N x 3
pred_part_poses = pred_part_poses.double() * part_valids.unsqueeze(2).double()
return pred_part_poses.float()
"""
Input: * x N x 3, * x 3, * x 4, * x 3, * x 4,
Output: *, * (two lists)
"""
def linear_assignment(self, pts, centers1, quats1, centers2, quats2):
cur_part_cnt = pts.shape[0]
num_point = pts.shape[1]
with torch.no_grad():
cur_quats1 = quats1.unsqueeze(1).repeat(1, num_point, 1)
cur_centers1 = centers1.unsqueeze(1).repeat(1, num_point, 1)
cur_pts1 = qrot(cur_quats1, pts) + cur_centers1
cur_quats2 = quats2.unsqueeze(1).repeat(1, num_point, 1)
cur_centers2 = centers2.unsqueeze(1).repeat(1, num_point, 1)
cur_pts2 = qrot(cur_quats2, pts) + cur_centers2
cur_pts1 = cur_pts1.unsqueeze(1).repeat(1, cur_part_cnt, 1, 1).view(-1, num_point, 3)
cur_pts2 = cur_pts2.unsqueeze(0).repeat(cur_part_cnt, 1, 1, 1).view(-1, num_point, 3)
dist1, dist2 = chamfer_distance(cur_pts1, cur_pts2, transpose=False)
dist_mat = (dist1.mean(1) + dist2.mean(1)).view(cur_part_cnt, cur_part_cnt)
rind, cind = linear_sum_assignment(dist_mat.cpu().numpy())
return rind, cind
"""
Input: B x P x 3, B x P x 3, B x P
Output: B
"""
def get_trans_l2_loss(self, trans1, trans2, valids):
loss_per_data = (trans1 - trans2).pow(2).sum(dim=-1)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data
"""
Input: B x P x N x 3, B x P x 4, B x P x 4, B x P
Output: B
"""
def get_rot_l2_loss(self, pts, quat1, quat2, valids):
batch_size = pts.shape[0]
num_point = pts.shape[2]
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
loss_per_data = (pts1 - pts2).pow(2).sum(-1).mean(-1)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data
"""
Input: B x P x N x 3, B x P x 4, B x P x 4, B x P
Output: B
"""
def get_rot_cd_loss(self, pts, quat1, quat2, valids, device):
batch_size = pts.shape[0]
num_point = pts.shape[2]
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts)
dist1, dist2 = chamfer_distance(pts1.view(-1, num_point, 3), pts2.view(-1, num_point, 3), transpose=False)
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.view(batch_size, -1)
loss_per_data = loss_per_data.to(device)
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data #
def get_total_cd_loss(self, pts, quat1, quat2, valids, center1, center2, device):
batch_size = pts.shape[0]
num_part = pts.shape[1]
num_point = pts.shape[2]
center1 = center1.unsqueeze(2).repeat(1,1,num_point,1)
center2 = center2.unsqueeze(2).repeat(1,1,num_point,1)
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center1
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center2
dist1, dist2 = chamfer_distance(pts1.view(-1, num_point, 3), pts2.view(-1, num_point, 3), transpose=False)
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.view(batch_size, -1)
thre = 0.01
loss_per_data = loss_per_data.to(device)
acc = [[0 for i in range(num_part)]for j in range(batch_size)]
for i in range(batch_size):
for j in range(num_part):
if loss_per_data[i,j] < thre and valids[i,j]:
acc[i][j] = 1
loss_per_data = (loss_per_data * valids).sum(1) / valids.sum(1)
return loss_per_data , acc
def get_shape_cd_loss(self, pts, quat1, quat2, valids, center1, center2, device):
batch_size = pts.shape[0]
num_part = pts.shape[1]
num_point = pts.shape[2]
center1 = center1.unsqueeze(2).repeat(1,1,num_point,1)
center2 = center2.unsqueeze(2).repeat(1,1,num_point,1)
pts1 = qrot(quat1.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center1
pts2 = qrot(quat2.unsqueeze(2).repeat(1, 1, num_point, 1), pts) + center2
pts1 = pts1.view(batch_size,num_part*num_point,3)
pts2 = pts2.view(batch_size,num_part*num_point,3)
dist1, dist2 = chamfer_distance(pts1, pts2, transpose=False)
valids = valids.unsqueeze(2).repeat(1,1,1000).view(batch_size,-1)
dist1 = dist1 * valids
dist2 = dist2 * valids
loss_per_data = torch.mean(dist1, dim=1) + torch.mean(dist2, dim=1)
loss_per_data = loss_per_data.to(device)
return loss_per_data
def get_sym_point(self, point, x, y, z):
if x:
point[0] = - point[0]
if y:
point[1] = - point[1]
if z:
point[2] = - point[2]
return point.tolist()
def get_possible_point_list(self, point, sym):
sym = torch.tensor([1.0, 1.0, 1.0])
point_list = []
if sym.equal(torch.tensor([0.0, 0.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
elif sym.equal(torch.tensor([1.0, 0.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
elif sym.equal(torch.tensor([0.0, 1.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
elif sym.equal(torch.tensor([0.0, 0.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
elif sym.equal(torch.tensor([1.0, 1.0, 0.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 1, 1, 0))
elif sym.equal(torch.tensor([1.0, 0.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 1, 0, 1))
elif sym.equal(torch.tensor([0.0, 1.0, 1.0])):
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 0, 1, 1))
else:
point_list.append(self.get_sym_point(point, 0, 0, 0))
point_list.append(self.get_sym_point(point, 1, 0, 0))
point_list.append(self.get_sym_point(point, 0, 1, 0))
point_list.append(self.get_sym_point(point, 0, 0, 1))
point_list.append(self.get_sym_point(point, 1, 1, 0))
point_list.append(self.get_sym_point(point, 1, 0, 1))
point_list.append(self.get_sym_point(point, 0, 1, 1))
point_list.append(self.get_sym_point(point, 1, 1, 1))
return point_list
def get_min_l2_dist(self, list1, list2, center1, center2, quat1, quat2):
list1 = torch.tensor(list1) # m x 3
list2 = torch.tensor(list2) # n x 3
len1 = list1.shape[0]
len2 = list2.shape[0]
center1 = center1.unsqueeze(0).repeat(len1, 1)
center2 = center2.unsqueeze(0).repeat(len2, 1)
quat1 = quat1.unsqueeze(0).repeat(len1, 1)
quat2 = quat2.unsqueeze(0).repeat(len2, 1)
list1 = list1.to(self.conf.device)
list2 = list2.to(self.conf.device)
list1 = center1 + qrot(quat1, list1)
list2 = center2 + qrot(quat2, list2)
mat1 = list1.unsqueeze(1).repeat(1, len2, 1)
mat2 = list2.unsqueeze(0).repeat(len1, 1, 1)
mat = (mat1 - mat2) * (mat1 - mat2)
mat = mat.sum(dim=-1)
return mat.min()
"""
Contact point loss metric
Date: 2020/5/22
Input B x P x 3, B x P x 4, B x P x P x 4, B x P x 3
Ouput B
"""
def get_contact_point_loss(self, center, quat, contact_points, sym_info):
batch_size = center.shape[0]
num_part = center.shape[1]
contact_point_loss = torch.zeros(batch_size)
total_num = 0
count = 0
for b in range(batch_size):
sum_loss = 0
for i in range(num_part):
for j in range(num_part):
if contact_points[b, i, j, 0]:
contact_point_1 = contact_points[b, i, j, 1:]
contact_point_2 = contact_points[b, j, i, 1:]
sym1 = sym_info[b, i]
sym2 = sym_info[b, j]
point_list_1 = self.get_possible_point_list(contact_point_1, sym1)
point_list_2 = self.get_possible_point_list(contact_point_2, sym2)
dist = self.get_min_l2_dist(point_list_1, point_list_2, center[b, i, :], center[b, j, :],
quat[b, i, :], quat[b, j, :]) # 1
if dist < 0.01:
count += 1
total_num += 1
sum_loss += dist
contact_point_loss[b] = sum_loss
return contact_point_loss, count, total_num
| 2.21875 | 2 |
tests/test_dynamo.py | FernandoGarzon/dmwmclient | 1 | 12788947 | import pytest
from dmwmclient import Client
@pytest.mark.asyncio
async def test_cycle():
client = Client()
dynamo = client.dynamo
cycle = await dynamo.latest_cycle()
assert type(cycle) is dict
assert set(cycle.keys()) == {'cycle', 'partition_id', 'timestamp', 'comment'}
@pytest.mark.asyncio
async def test_detail():
client = Client()
dynamo = client.dynamo
df = await dynamo.site_detail('T2_PK_NCP', 34069)
assert set(df.columns) == {'condition', 'condition_id', 'decision', 'name', 'site', 'size'}
assert df.sum()['size'] == 99787.18272119202
| 2.109375 | 2 |
tests/cases/base.py | chop-dbhi/varify | 6 | 12788948 | <gh_stars>1-10
from django.contrib.auth.models import User
from django.core.cache import cache
from django_rq import get_queue, get_connection
from rq.queue import get_failed_queue
from django.test import TestCase, TransactionTestCase
class AuthenticatedBaseTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.client.login(username='test', password='<PASSWORD>')
class QueueTestCase(TransactionTestCase):
def setUp(self):
cache.clear()
get_queue('variants').empty()
get_queue('default').empty()
get_failed_queue(get_connection()).empty()
| 2.015625 | 2 |
avalonbot/cards.py | AvantiShri/avalon-bot | 0 | 12788949 | from __future__ import division, print_function, absolute_import
import random
from collections import OrderedDict
#Mimicking Enums, but having the values be strings
class CardType(object):
LOYAL_SERVANT_OF_ARTHUR="LOYAL_SERVANT_OF_ARTHUR"
MERLIN="MERLIN"
PERCIVAL="PERCIVAL"
MINION_OF_MORDRED="MINION_OF_MORDRED"
ASSASSIN="ASSASSIN"
MORGANA="MORGANA"
MORDRED="MORDRED"
OBERON="OBERON"
#Mimicking Enums, but having the values be strings
class Team(object):
GOOD="GOOD"
EVIL="EVIL"
class Card(object):
def __init__(self, team, card_type, special_abilities):
self.team = team
self.card_type = card_type
self.special_abilities = special_abilities
def get_additional_info_to_provide_to_player(self, game):
raise NotImplementedError()
def get_card_summary(self):
return OrderedDict([("Team", str(self.team)),
("Special abilities", self.special_abilities)])
class LoyalServantOfArthur(Card):
def __init__(self):
Card.__init__(self,
team=Team.GOOD,
card_type=CardType.LOYAL_SERVANT_OF_ARTHUR,
special_abilities="This card has no special abilities.")
def get_additional_info_to_provide_to_player(self, game):
return ("As a loyal servant, you don't have any additional info"
+" beyond what the other cards in the game are. Review those"
+" cards and their abilities to strategize.")
class Merlin(Card):
def __init__(self):
Card.__init__(self,
team=Team.GOOD,
card_type=CardType.MERLIN,
special_abilities=("You will be given information on who"
+" the players on the evil team are, *with the exception of"
+" MORDRED* (if MORDRED is present in the game)."
+" You will not know the specific roles of the players on the"
+" evil team you are told about. If PERCIVAL is in the game, your"
+" identity will be made known to them - however, if"
+" MORGANA is also in the game, PERCIVAL will also be given"
+" MORGANA's name and won't be told which of the two of you"
+" is the real MERLIN. You should not be too"
+" obvious about being MERLIN or else the evil team will win by"
+" assassinating you at the end."))
def get_additional_info_to_provide_to_player(self, game):
evil_team_players = []
for player in game.players:
if (player.card.team==Team.EVIL and
player.card.card_type != CardType.MORDRED):
evil_team_players.append(player)
return ("You know that the following players are on the evil team: "
+", ".join(sorted(str(x) for x in evil_team_players)))
class Percival(Card):
def __init__(self):
Card.__init__(self,
team=Team.GOOD,
card_type=CardType.PERCIVAL,
special_abilities=(
"You will be given the names of players who are either"
+" MORGANA or MERLIN, but you will not be told who is who."
+" Note that if MORGANA is absent from the game, you will just"
+" be given the name of MERLIN."))
def get_additional_info_to_provide_to_player(self, game):
morgana_or_merlin = []
for player in game.players:
if (player.card.card_type==CardType.MORGANA
or player.card.card_type==CardType.MERLIN):
morgana_or_merlin.append(player)
return ("These players are either MORGANA or MERLIN"
+" (if MORGANA is absent from the game, this is just"
+" the name of MERLIN): "
+" & ".join(sorted(str(x) for x in morgana_or_merlin)))
class BadGuy(Card):
def __init__(self, card_type, special_abilities):
Card.__init__(self, team=Team.EVIL,
card_type=card_type,
special_abilities=special_abilities)
def get_additional_info_to_provide_to_player(self, game):
evil_team_players = []
for player in game.players:
if (player.card.team==Team.EVIL
and player.card.card_type != CardType.OBERON):
evil_team_players.append(player)
return ("You know that the following players are on the evil team: "
+", ".join(sorted(str(x) for x in evil_team_players)))
class Assassin(BadGuy):
def __init__(self):
super(Assassin, self).__init__(
card_type=CardType.ASSASSIN,
special_abilities = (
"At the end of the game, you will take the"
+" final call on who MERLIN is likely to be. If you guess right,"
+" the evil team wins."))
class MinionOfMordred(BadGuy):
def __init__(self):
super(MinionOfMordred, self).__init__(
card_type=CardType.MINION_OF_MORDRED,
special_abilities="This card has no special abilities.")
class Morgana(BadGuy):
def __init__(self):
super(Morgana, self).__init__(
card_type=CardType.MORGANA,
special_abilities = (
"PERCIVAL will be given your"
+" name along with the name of the person playing MERLIN, but"
+" PERCIVAL will not be told who is who. You should try to figure"
+" out who PERCIVAL is and convince PERCIVAL that you are MERLIN."))
class Mordred(BadGuy):
def __init__(self):
super(Mordred, self).__init__(
card_type=CardType.MORDRED,
special_abilities=(
"MERLIN does not know you are on the evil team"
+" (this is a major advantage for the evil team)."))
class Oberon(BadGuy):
def __init__(self):
super(Oberon, self).__init__(
card_type=CardType.OBERON,
special_abilities = (
"The other players on the evil team won't know that"
+" you are also on the evil team, and similarly, you don't"
+" know who the other players on the evil team are (this is a"
+" disadvantage for the evil team)."))
def get_additional_info_to_provide_to_player(self, game):
return ("None - unlike all the other players on the evil team,"
+" you don't have any information about who is on the"
+" evil team!")
card_type_to_class = {
"LOYAL_SERVANT_OF_ARTHUR": LoyalServantOfArthur,
"MERLIN": Merlin,
"PERCIVAL": Percival,
"MINION_OF_MORDRED": MinionOfMordred,
"ASSASSIN": Assassin,
"MORGANA": Morgana,
"MORDRED": Mordred,
"OBERON": Oberon
}
| 3.46875 | 3 |
utils/__init__.py | mpi2/impc-reference-harvester | 0 | 12788950 | import configparser
import os
import logging
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../config.ini')
config = configparser.ConfigParser()
config.read_file(open(filename))
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
| 2.265625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.